DenseNet实现乳腺癌识别

DenseNet实现乳腺癌识别

前言

前面已经对Densenet网络有了简单了解,这周利用该网络实现乳腺癌的识别。
![!](https://img-blog.csdnimg.cn/direct/5c272931b55f4d96a637c3d0f64e5a5f.png)]

densenet 模型

模型结构如下
在这里插入图片描述

import re
from typing import Any, List, Tuple
from collections import OrderedDict

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor


class _DenseLayer(nn.Module):
    def __init__(self,
                 input_c: int,
                 growth_rate: int,
                 bn_size: int,
                 drop_rate: float,
                 memory_efficient: bool = False):
        super(_DenseLayer, self).__init__()

        self.add_module("norm1", nn.BatchNorm2d(input_c))
        self.add_module("relu1", nn.ReLU(inplace=True))
        self.add_module("conv1", nn.Conv2d(in_channels=input_c,
                                           out_channels=bn_size * growth_rate,
                                           kernel_size=1,
                                           stride=1,
                                           bias=False))
        self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate))
        self.add_module("relu2", nn.ReLU(inplace=True))
        self.add_module("conv2", nn.Conv2d(bn_size * growth_rate,
                                           growth_rate,
                                           kernel_size=3,
                                           stride=1,
                                           padding=1,
                                           bias=False))
        self.drop_rate = drop_rate
        self.memory_efficient = memory_efficient

    def bn_function(self, inputs: List[Tensor]) -> Tensor:
        concat_features = torch.cat(inputs, 1)
        bottleneck_output = self.conv1(self.relu1(self.norm1(concat_features)))
        return bottleneck_output

    @staticmethod
    def any_requires_grad(inputs: List[Tensor]) -> bool:
        for tensor in inputs:
            if tensor.requires_grad:
                return True

        return False

    @torch.jit.unused
    def call_checkpoint_bottleneck(self, inputs: List[Tensor]) -> Tensor:
        def closure(*inp):
            return self.bn_function(inp)

        return cp.checkpoint(closure, *inputs)

    def forward(self, inputs: Tensor) -> Tensor:
        if isinstance(inputs, Tensor):
            prev_features = [inputs]
        else:
            prev_features = inputs

        if self.memory_efficient and self.any_requires_grad(prev_features):
            if torch.jit.is_scripting():
                raise Exception("memory efficient not supported in JIT")

            bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
        else:
            bottleneck_output = self.bn_function(prev_features)

        new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
        if self.drop_rate > 0:
            new_features = F.dropout(new_features,
                                     p=self.drop_rate,
                                     training=self.training)

        return new_features


class _DenseBlock(nn.ModuleDict):
    _version = 2

    def __init__(self,
                 num_layers: int,
                 input_c: int,
                 bn_size: int,
                 growth_rate: int,
                 drop_rate: float,
                 memory_efficient: bool = False):
        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(input_c + i * growth_rate,
                                growth_rate=growth_rate,
                                bn_size=bn_size,
                                drop_rate=drop_rate,
                                memory_efficient=memory_efficient)
            self.add_module("denselayer%d" % (i + 1), layer)

    def forward(self, init_features: Tensor) -> Tensor:
        features = [init_features]
        for name, layer in self.items():
            new_features = layer(features)
            features.append(new_features)
        return torch.cat(features, 1)


class _Transition(nn.Sequential):
    def __init__(self,
                 input_c: int,
                 output_c: int):
        super(_Transition, self).__init__()
        self.add_module("norm", nn.BatchNorm2d(input_c))
        self.add_module("relu", nn.ReLU(inplace=True))
        self.add_module("conv", nn.Conv2d(input_c,
                                          output_c,
                                          kernel_size=1,
                                          stride=1,
                                          bias=False))
        self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))


class DenseNet(nn.Module):
    """
    Densenet-BC model class for imagenet

    Args:
        growth_rate (int) - how many filters to add each layer (`k` in paper)
        block_config (list of 4 ints) - how many layers in each pooling block
        num_init_features (int) - the number of filters to learn in the first convolution layer
        bn_size (int) - multiplicative factor for number of bottle neck layers
          (i.e. bn_size * k features in the bottleneck layer)
        drop_rate (float) - dropout rate after each dense layer
        num_classes (int) - number of classification classes
        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient
    """

    def __init__(self,
                 growth_rate: int = 32,
                 block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
                 num_init_features: int = 64,
                 bn_size: int = 4,
                 drop_rate: float = 0,
                 num_classes: int = 1000,
                 memory_efficient: bool = False):
        super(DenseNet, self).__init__()

        # first conv+bn+relu+pool
        self.features = nn.Sequential(OrderedDict([
            ("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ("norm0", nn.BatchNorm2d(num_init_features)),
            ("relu0", nn.ReLU(inplace=True)),
            ("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # each dense block
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers,
                                input_c=num_features,
                                bn_size=bn_size,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate,
                                memory_efficient=memory_efficient)
            self.features.add_module("denseblock%d" % (i + 1), block)
            num_features = num_features + num_layers * growth_rate

            if i != len(block_config) - 1:
                trans = _Transition(input_c=num_features,
                                    output_c=num_features // 2)
                self.features.add_module("transition%d" % (i + 1), trans)
                num_features = num_features // 2

        # finnal batch norm
        self.features.add_module("norm5", nn.BatchNorm2d(num_features))

        # fc layer
        self.classifier = nn.Linear(num_features, num_classes)

        # init weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)

    def forward(self, x: Tensor) -> Tensor:
        features = self.features(x)
        out = F.relu(features, inplace=True)
        out = F.adaptive_avg_pool2d(out, (1, 1))
        out = torch.flatten(out, 1)
        out = self.classifier(out)
        return out


def densenet121(**kwargs: Any) -> DenseNet:
    # Top-1 error: 25.35%
    # 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth'
    return DenseNet(growth_rate=32,
                    block_config=(6, 12, 24, 16),
                    num_init_features=64,
                    **kwargs)


def densenet169(**kwargs: Any) -> DenseNet:
    # Top-1 error: 24.00%
    # 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth'
    return DenseNet(growth_rate=32,
                    block_config=(6, 12, 32, 32),
                    num_init_features=64,
                    **kwargs)


def densenet201(**kwargs: Any) -> DenseNet:
    # Top-1 error: 22.80%
    # 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth'
    return DenseNet(growth_rate=32,
                    block_config=(6, 12, 48, 32),
                    num_init_features=64,
                    **kwargs)


def densenet161(**kwargs: Any) -> DenseNet:
    # Top-1 error: 22.35%
    # 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth'
    return DenseNet(growth_rate=48,
                    block_config=(6, 12, 36, 24),
                    num_init_features=96,
                    **kwargs)


def load_state_dict(model: nn.Module, weights_path: str) -> None:
    # '.'s are no longer allowed in module names, but previous _DenseLayer
    # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
    # They are also in the checkpoints in model_urls. This pattern is used
    # to find such keys.
    pattern = re.compile(
        r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')

    state_dict = torch.load(weights_path)

    num_classes = model.classifier.out_features
    load_fc = num_classes == 1000

    for key in list(state_dict.keys()):
        if load_fc is False:
            if "classifier" in key:
                del state_dict[key]

        res = pattern.match(key)
        if res:
            new_key = res.group(1) + res.group(2)
            state_dict[new_key] = state_dict[key]
            del state_dict[key]
    model.load_state_dict(state_dict, strict=load_fc)
    print("successfully load pretrain-weights.")

训练结果如下

在这里插入图片描述
用tensorboard观察准确率和学习率的变化
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

总结

这周继续巩固了前面的知识点,对Densenet网络的实现和应用有了更深的了解,但是目前也只是学会大致调用原来的模型,自己修改还存在一定困难,后面尝试争取自己修改模型,深入学习。

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是基于DenseNet乳腺癌病理图像分类的代码示例: ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import cv2 from sklearn.utils import shuffle from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.optimizers import RMSprop, Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau, EarlyStopping from keras.applications.densenet import DenseNet121 # 加载数据集 def load_data(): data_dir = '../input/breast-histopathology-images/IDC_regular_ps50_idx5/' patients = os.listdir(data_dir) labels = pd.read_csv('../input/breast-histopathology-images/IDC_regular_ps50_idx5/label.csv') labels = pd.DataFrame({'path': labels['path'].apply(lambda x: x[4:]), 'label': labels['label']}) labels['id'] = labels['path'].apply(lambda x: x.split('/')[0]) labels['id'] = labels['id'].astype(int) labels = labels.set_index('id') labels = labels.loc[patients] labels = labels.reset_index(drop=True) labels = labels.drop(columns=['path']) images = [] for patient in patients: for file in os.listdir(data_dir + patient): image = cv2.imread(data_dir + patient + '/' + file) image = cv2.resize(image, (50, 50)) images.append(image) images = np.array(images) images, labels = shuffle(images, labels, random_state=42) return images, labels # 数据预处理 def preprocess_data(images, labels): images = images / 255.0 labels = to_categorical(labels, num_classes=2) x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42) return x_train, x_test, y_train, y_test # 构建模型 def build_model(): densenet = DenseNet121(include_top=False, input_shape=(50, 50, 3)) model = Sequential() model.add(densenet) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) optimizer = Adam(lr=0.001) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model # 训练模型 def train_model(x_train, x_test, y_train, y_test): datagen = ImageDataGenerator(rotation_range=10, zoom_range=0.1, width_shift_range=0.1, height_shift_range=0.1) datagen.fit(x_train) reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', patience=3, min_lr=0.00001, verbose=1) early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=1) history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=32), epochs=50, validation_data=(x_test, y_test), verbose=1, steps_per_epoch=x_train.shape[0] // 32, callbacks=[reduce_lr, early_stop]) return history # 可视化训练过程 def plot_history(history): sns.set() fig, ax = plt.subplots(1, 2, figsize=(20, 5)) ax[0].plot(history.history['loss'], label='Train Loss') ax[0].plot(history.history['val_loss'], label='Validation Loss') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Loss') ax[0].set_title('Loss') ax[0].legend(loc='best') ax[1].plot(history.history['accuracy'], label='Train Accuracy') ax[1].plot(history.history['val_accuracy'], label='Validation Accuracy') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Accuracy') ax[1].set_title('Accuracy') ax[1].legend(loc='best') plt.show() # 测试模型 def test_model(x_test, y_test): y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_test = np.argmax(y_test, axis=1) accuracy = accuracy_score(y_test, y_pred) cm = confusion_matrix(y_test, y_pred) print('Test Accuracy:', accuracy) print('Confusion Matrix:') print(cm) # 运行程序 images, labels = load_data() x_train, x_test, y_train, y_test = preprocess_data(images, labels) model = build_model() history = train_model(x_train, x_test, y_train, y_test) plot_history(history) test_model(x_test, y_test) ``` 以上代码使用DenseNet121作为卷积神经网络的基础模型,通过数据增强和迁移学习训练乳腺癌病理图像分类模型。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值