tensorflow2+dataset+multi loss 多输出任务 匹配 多loss

 之前提过在model的fit中使用 dataset

参考链接:https://blog.csdn.net/qq_24677259/article/details/108166601

 现在继续熟悉一下multi output和multi loss,多输出多损失函数

keras的函数式模型主要用于多输入多输出任务,输出按顺序对应损失函数list,在dataset构建时也要分为一个tuple含两个元素,分别对应输入和输出

dataset = tf.data.Dataset.from_tensor_slices((x_train, (y_train,y_train))).batch(32)

例子如下

from tensorflow.keras.datasets import cifar10
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Dense,Input
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import mean_squared_error,categorical_crossentropy
import numpy as np
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experime
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
以下是基于PyTorch实现的多任务学习硬共享机制模型的代码,使用的是ResNet网络: ```python import torch import torch.nn as nn import torch.optim as optim # Define the ResNet model class ResNet(nn.Module): def __init__(self, num_classes=6): super(ResNet, self).__init__() self.resnet = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace=True), nn.Conv2d(1024, 2048, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(2048), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(2048, num_classes) def forward(self, x): x = self.resnet(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x # Define the multi-task learning model class MultiTaskModel(nn.Module): def __init__(self, num_classes_list=[2, 2, 2, 2, 2, 2]): super(MultiTaskModel, self).__init__() self.resnet = ResNet() self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, 512) self.fc3 = nn.Linear(512, 256) self.fc4_list = nn.ModuleList([nn.Linear(256, num_classes) for num_classes in num_classes_list]) def forward(self, x): x = self.resnet(x) x = self.fc1(x) x = nn.ReLU(inplace=True)(x) x = self.fc2(x) x = nn.ReLU(inplace=True)(x) x = self.fc3(x) x = nn.ReLU(inplace=True)(x) outputs = [fc4(x) for fc4 in self.fc4_list] return outputs # Define the training loop def train(model, dataloader_list, criterion_list, optimizer, epochs): for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(zip(*dataloader_list)): inputs_list, labels_list = data inputs_list = [inputs.to(device) for inputs in inputs_list] labels_list = [labels.to(device) for labels in labels_list] optimizer.zero_grad() outputs_list = model(inputs_list) loss = 0.0 for j, outputs in enumerate(outputs_list): loss += criterion_list[j](outputs, labels_list[j]) loss.backward() optimizer.step() running_loss += loss.item() print('Epoch {}, loss: {:.4f}'.format(epoch+1, running_loss/(i+1))) # Define the evaluation loop def evaluate(model, dataloader_list, criterion_list): with torch.no_grad(): total_loss_list = [0.0 for _ in range(len(dataloader_list))] total_correct_list = [0 for _ in range(len(dataloader_list))] total_samples_list = [0 for _ in range(len(dataloader_list))] for data in zip(*dataloader_list): inputs_list, labels_list = data inputs_list = [inputs.to(device) for inputs in inputs_list] labels_list = [labels.to(device) for labels in labels_list] outputs_list = model(inputs_list) loss_list = [criterion_list[j](outputs_list[j], labels_list[j]) for j in range(len(outputs_list))] total_loss_list = [total_loss_list[j] + loss_list[j].item() for j in range(len(loss_list))] pred_list = [torch.argmax(outputs_list[j], dim=1) for j in range(len(outputs_list))] correct_list = [torch.sum(pred_list[j] == labels_list[j]).item() for j in range(len(pred_list))] total_correct_list = [total_correct_list[j] + correct_list[j] for j in range(len(correct_list))] total_samples_list = [total_samples_list[j] + len(labels_list[j]) for j in range(len(labels_list))] avg_loss_list = [total_loss_list[j] / len(dataloader_list[j].dataset) for j in range(len(dataloader_list))] avg_acc_list = [total_correct_list[j] / total_samples_list[j] for j in range(len(total_correct_list))] print('Loss: {}, Accuracy: {}'.format(avg_loss_list, avg_acc_list)) # Define the data loading and preprocessing from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, ToTensor, Normalize from torch.utils.data import DataLoader, random_split transform = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) cifar10_train = CIFAR10(root='./data', train=True, transform=transform, download=True) cifar10_test = CIFAR10(root='./data', train=False, transform=transform, download=True) train_datasets = [cifar10_train, cifar10_train, cifar10_train, cifar10_train, cifar10_train, cifar10_train] test_datasets = [cifar10_test, cifar10_test, cifar10_test, cifar10_test, cifar10_test, cifar10_test] train_loaders = [DataLoader(train_dataset, batch_size=128, shuffle=True) for train_dataset in train_datasets] test_loaders = [DataLoader(test_dataset, batch_size=128, shuffle=False) for test_dataset in test_datasets] # Define the training hyperparameters device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") num_classes_list = [2, 2, 2, 2, 2, 2] model = MultiTaskModel(num_classes_list).to(device) criterion_list = [nn.CrossEntropyLoss() for _ in range(len(num_classes_list))] optimizer = optim.Adam(model.parameters(), lr=0.001) epochs = 10 # Train the model train(model, train_loaders, criterion_list, optimizer, epochs) # Evaluate the model evaluate(model, test_loaders, criterion_list) ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值