python:用densenet121网络做dicom图像的二分类预训练

import os
import torch
import copy
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from pydicom import dcmread
from PIL import Image
import numpy as np

# 数据集类
class DicomDataset(Dataset):
    def __init__(self, csv_file, dicom_dir, transform=None):
        self.dataframe = pd.read_csv(csv_file)
        self.dicom_dir = dicom_dir
        self.transform = transform

    def __len__(self):
        return len(self.dataframe)

    def __getitem__(self, idx):
        #file_path = os.path.join(self.dicom_dir, self.dataframe.iloc[idx, 0] + '.dcm')
        file_path = os.path.join(self.dicom_dir, self.dataframe.iloc[idx, 0])
        label = self.dataframe.iloc[idx, 1]
        dicom_image = dcmread(file_path)
        image = dicom_image.pixel_array.astype(np.float32)  # 转换为浮点数以进行归一化

        # 归一化图像数据到 [0, 1],然后乘以 255 转换到 [0, 255]
        # 这里假设图像数据已经是灰度图,并且像素值在 [0, max_val] 范围内
        max_val = np.iinfo(dicom_image.pixel_array.dtype).max
        image = (image / max_val) * 255.0

        # 转换为 uint8 类型
        image = image.astype(np.uint8)

        # 应用转换
        if self.transform:
            image = self.transform(image)

        return image, label

# 数据预处理
transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),  # 调整图像大小以匹配DenseNet-121的输入
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1)),  # 复制灰度通道到3个通道
])

# 加载数据集
dataset = DicomDataset(csv_file='/data2/gechaoyang/feiyan_by_zr/filenamewithlabel(number).csv', dicom_dir='/data2/gechaoyang/feiyan_by_zr/DCM_AP_files_last_version', transform=transform)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

# 数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# 加载预训练的DenseNet-121模型
model = models.densenet121(pretrained=True)
num_ftrs = model.classifier.in_features
model.classifier = torch.nn.Linear(num_ftrs, 2)  # 替换最后的全连接层

# 定义损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=0.0001)



# 训练模型
def train_model(model, train_loader, criterion, optimizer, num_epochs=50):
    model.train()  # 设置为训练模式
    for epoch in range(num_epochs):
        running_loss = 0.0
        for images, labels in train_loader:
            # 假设 labels 是一个元组,我们只需要其中的标签索引
            # 如果 labels 已经是张量,你不需要进行下面的转换
            labels = labels[1] if isinstance(labels, tuple) else labels

            # 清零梯度
            optimizer.zero_grad()
            # 前向传播
            outputs = model(images)
            # 计算损失
            loss = criterion(outputs, labels)
            # 反向传播
            loss.backward()
            # 更新权重
            optimizer.step()
            running_loss += loss.item()
        print(f'Epoch {epoch+1}, Loss: {running_loss/len(train_loader)}')
# 测试模型
def test_model(model, test_loader):
    model.eval()  # 设置为评估模式
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print(f'Accuracy: {100 * correct / total}%')

# # 训练和测试
train_model(model, train_loader, criterion, optimizer)
test_model(model, test_loader)

加上验证集之后,代码改进:

#8月30日过拟合改进
import os
import torch
import copy
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from pydicom import dcmread
from PIL import Image
import numpy as np

# 数据集类
class DicomDataset(Dataset):
    def __init__(self, csv_file, dicom_dir, transform=None):
        self.dataframe = pd.read_csv(csv_file)
        self.dicom_dir = dicom_dir
        self.transform = transform

    def __len__(self):
        return len(self.dataframe)

    def __getitem__(self, idx):
        file_path = os.path.join(self.dicom_dir, self.dataframe.iloc[idx, 0])
        label = self.dataframe.iloc[idx, 1]
        dicom_image = dcmread(file_path)
        image = dicom_image.pixel_array.astype(np.float32)  # 转换为浮点数以进行归一化

        # 归一化图像数据到 [0, 1],然后乘以 255 转换到 [0, 255]
        max_val = np.iinfo(dicom_image.pixel_array.dtype).max
        image = (image / max_val) * 255.0

        # 转换为 uint8 类型
        image = image.astype(np.uint8)

        # 应用转换
        if self.transform:
            image = self.transform(image)

        return image, label

# 数据预处理
transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),  # 调整图像大小以匹配DenseNet-121的输入
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.RandomRotation(10),      # 随机旋转
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485], std=[0.229]),  # 添加归一化
    transforms.Lambda(lambda x: x.repeat(3, 1, 1)),  # 复制灰度通道到3个通道
])

# 加载数据集
dataset = DicomDataset(csv_file='/data2/gechaoyang/feiyan_by_zr/filenamewithlabel(number).csv', dicom_dir='/data2/gechaoyang/feiyan_by_zr/DCM_AP_files_last_version', transform=transform)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

# 数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# 自定义DenseNet-121模型
import torch.nn as nn

class DenseNet121Custom(nn.Module):
    def __init__(self):
        super(DenseNet121Custom, self).__init__()
        self.densenet121 = models.densenet121(pretrained=False)
        num_ftrs = self.densenet121.classifier.in_features
        self.densenet121.classifier = nn.Sequential(
            nn.Linear(num_ftrs, 256),
            nn.ReLU(),
            nn.Dropout(0.5),  # 添加Dropout层
            nn.Linear(256, 2)
        )

    def forward(self, x):
        return self.densenet121(x)

# 实例化模型
model = DenseNet121Custom()

# 定义损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=0.01, weight_decay=0.001)  # 添加权重衰减

# 定义学习率调度器
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)

# 检查是否存在保存的模型权重,如果存在则加载
if os.path.isfile('best_model_weights.pth'):
    print('Loading saved model weights...')
    model.load_state_dict(torch.load('best_model_weights.pth'))
else:
    print('No saved model weights found. Starting from scratch.')

# 训练模型
def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs=300):
    best_val_loss = float('inf')
    patience = 0
    early_stopping_rounds = 10
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for images, labels in train_loader:
            labels = labels.type(torch.LongTensor)  # 确保标签是正确的类型
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        avg_train_loss = running_loss / len(train_loader)
        print(f'Epoch {epoch+1}/{num_epochs}, Train Loss: {avg_train_loss:.4f}')

        # 验证过程
        val_loss = validate_model(model, test_loader, criterion)
        
        # 检查是否满足早停条件
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            patience = 0
            torch.save(model.state_dict(), 'best_model_weights.pth')
            print('Best model weights saved.')
        else:
            patience += 1
            if patience >= early_stopping_rounds:
                print('Early stopping!')
                break

# 验证模型
def validate_model(model, test_loader, criterion):
    model.eval()
    val_loss = 0.0
    with torch.no_grad():
        for images, labels in test_loader:
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_loss += loss.item()
    avg_val_loss = val_loss / len(test_loader)
    print(f'Validation Loss: {avg_val_loss:.4f}')
    return avg_val_loss

# 测试模型
def test_model(model, test_loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    accuracy = 100 * correct / total
    print(f'Accuracy: {accuracy:.2f}%')

# 训练和测试
train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs=300)
test_model(model, test_loader)
#9月2日9:42分改进
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import torch
import copy
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from pydicom import dcmread
from PIL import Image
import numpy as np



# 数据集类
class DicomDataset(Dataset):
    def __init__(self, csv_file, dicom_dir, transform=None):
        self.dataframe = pd.read_csv(csv_file)
        self.dicom_dir = dicom_dir
        self.transform = transform

    def __len__(self):
        return len(self.dataframe)

    def __getitem__(self, idx):
        #file_path = os.path.join(self.dicom_dir, self.dataframe.iloc[idx, 0] + '.dcm')
        file_path = os.path.join(self.dicom_dir, self.dataframe.iloc[idx, 0])
        label = self.dataframe.iloc[idx, 1]
        dicom_image = dcmread(file_path)
        image = dicom_image.pixel_array.astype(np.float32)  # 转换为浮点数以进行归一化

        # 归一化图像数据到 [0, 1],然后乘以 255 转换到 [0, 255]
        # 这里假设图像数据已经是灰度图,并且像素值在 [0, max_val] 范围内
        max_val = np.iinfo(dicom_image.pixel_array.dtype).max
        image = (image / max_val) * 255.0

        # 转换为 uint8 类型
        image = image.astype(np.uint8)

        # 应用转换
        if self.transform:
            image = self.transform(image)

        return image, label

# 数据预处理
transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),  # 调整图像大小以匹配DenseNet-121的输入
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.RandomRotation(10),      # 随机旋转
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485], std=[0.229]),  # 添加归一化
    #transforms.Lambda(lambda x: x.repeat(3, 1, 1)),  # 复制灰度通道到3个通道
])

# 加载数据集
dataset = DicomDataset(csv_file='/data2/gechaoyang/feiyan_by_zr/filenamewithlabel(number).csv', dicom_dir='/data2/gechaoyang/feiyan_by_zr/DCM_AP_files_last_version', transform=transform)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

# 数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True,num_workers=8)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False,num_workers=8)

# # 加载预训练的DenseNet-121模型
# model = models.densenet121(pretrained=False)
# num_ftrs = model.classifier.in_features
# model.classifier = torch.nn.Linear(num_ftrs, 2)  # 替换最后的全连接层

# 自定义DenseNet-121模型
class DenseNet121Custom(nn.Module):
    def __init__(self):
        super(DenseNet121Custom, self).__init__()
        self.densenet121 = models.densenet121(pretrained=False)
        num_ftrs = self.densenet121.classifier.in_features
        self.densenet121.classifier = nn.Sequential(
            nn.Linear(num_ftrs, 256),
            nn.ReLU(),
            nn.Dropout(0.5),  # 添加Dropout层
            nn.Linear(256, 2)
        )
        # 修改第一个卷积层以接受1个通道的输入
        self.densenet121.features[0] = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)

    def forward(self, x):
        return self.densenet121(x)

# 实例化模型
model = DenseNet121Custom()

# 定义损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.01)
# 定义学习率调度器
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)


# 检查是否存在保存的模型权重,如果存在则加载
if os.path.isfile('best_model_weights.pth'):
    print('Loading saved model weights...')
    model.load_state_dict(torch.load('best_model_weights.pth'), strict=False)
else:
    print('No saved model weights found. Starting from scratch.')

# 将模型移动到 GPU,设置CUDA设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)

# 训练模型
def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs=300):
    #best_val_loss = float('inf')
    best_train_loss = float('inf')
    patience = 0
    early_stopping_rounds = 10
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for images, labels in train_loader:
            images, labels = images.to(device), labels.to(device)
            labels = labels.type(torch.LongTensor).to(device)  # 确保标签是正确的类型并移动到 GPU
            #labels = labels.type(torch.LongTensor)  # 确保标签是正确的类型
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        avg_train_loss = running_loss / len(train_loader)
        print(f'Epoch {epoch+1}/{num_epochs}, Train Loss: {avg_train_loss:.4f}')

        # 验证过程
        #val_loss = validate_model(model, test_loader, criterion)
        
        # 检查是否满足早停条件
        if avg_train_loss < best_train_loss:
            best_train_loss = avg_train_loss
            torch.save(model.state_dict(), 'best_model_weights.pth')
            print('Best model weights saved.')
        else:
            patience += 1
            if patience >= early_stopping_rounds:
                print('Early stopping!')
                break
        # 验证过程
        validate_model(model, test_loader, criterion)
        # if val_loss < best_val_loss:
        #     best_val_loss = val_loss
        #     patience = 0
        #     torch.save(model.state_dict(), 'best_model_weights.pth')
        #     print('Best model weights saved.')
        # else:
        #     patience += 1
        #     if patience >= early_stopping_rounds:
        #         print('Early stopping!')
        #         break

# 验证模型
def validate_model(model, test_loader, criterion):
    model.eval()
    val_loss = 0.0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_loss += loss.item()
    avg_val_loss = val_loss / len(test_loader)
    print(f'Validation Loss: {avg_val_loss:.4f}')
    return avg_val_loss

# 测试模型
def test_model(model, test_loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    accuracy = 100 * correct / total
    print(f'Accuracy: {accuracy:.2f}%')

# 训练和测试
train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs=300)
test_model(model, test_loader)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值