PyTorch、TensorFlow和Jax构建神经网络模型的标准化流程

开始之前

在深度学习中训练一个神经网络模型的基本逻辑

  1. trained_weight ⇐ model、init_weight、training_data \text{trained\_weight} \Leftarrow \text{model、init\_weight、training\_data} trained_weightmodelinit_weighttraining_data
  2. predict_label = model(weight_trained, predicting_data) \text{predict\_label} = \text{model(weight\_trained, predicting\_data)} predict_label=model(weight_trained, predicting_data)

一、PyTorch

PyTorch 简介

  • PyTorch 是一个 Facebook 主导的 Python 开源扩展库,具有 GPU 加速功能特性,具有强大张量计算能力,能够基于 Autograd 系统构建具有动态计算图的神经网络模型
  • PyTorch 代码高度自定义,灵活高效

PyTorch 环境部署

  • !conda install pytorch torchvision torchaudio

1.1 PyTorch 版本1.x阶段

import platform
import time
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor


"""定义数据"""
training_data = datasets.FashionMNIST(
    root="./datasets",
    train=True,
    download=True,
    transform=ToTensor(),
)

test_data = datasets.FashionMNIST(
    root="./datasets",
    train=False,
    download=True,
    transform=ToTensor(),
)

batch_size = 32

train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

for X, y in test_dataloader:
    print(f"Shape of X [N, C, H, W]: {X.shape}")
    print(f"Shape of y: {y.shape} {y.dtype}")
    break
"""定义模型"""
if platform.system() == 'Darwin':
    device = "mps" if torch.backends.mps.is_available() else "cpu"
else:
    device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")


class NeuralNetwork(nn.Module):

    def __init__(self):
        super(NeuralNetwork, self).__init__()
        self.conv = torch.nn.Conv2d(1, 32, (3, 3), padding=1)
        self.relu = nn.ReLU()
        self.flatten = nn.Flatten()
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(32 * 28 * 28, 128),
            nn.ReLU(),
            nn.Linear(128, 10),
        )

    def forward(self, x):
        x = self.relu(self.conv(x))
        x = self.flatten(x)
        logits = self.linear_relu_stack(x)
        return logits


model = NeuralNetwork().to(device)
print(model)
"""优化模型"""
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)


def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        pred = model(X)
        loss = loss_fn(pred, y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")


def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(
        f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n"
    )


if __name__ == "__main__":
    """执行训练"""
    epochs = 200
    for t in range(epochs):
        start_time = time.time_ns()
        print(f"Epoch {t+1}\n-------------------------------")
        train(train_dataloader, model, loss_fn, optimizer)
        test(test_dataloader, model, loss_fn)
        end_time = time.time_ns()
        print(f'Take time:{(end_time-start_time) / 1e9 : .4f}s')
        save_model = False
        if save_model:
            torch.save(model.state_dict(), "model.pth")
            print("Saved PyTorch Model State to model.pth")
    print("Done!")
    """验证模型"""
    verify_model = False
    if verify_model:
        model = NeuralNetwork()
        model.load_state_dict(torch.load("model.pth"))

        classes = [
            "T-shirt/top",
            "Trouser",
            "Pullover",
            "Dress",
            "Coat",
            "Sandal",
            "Shirt",
            "Sneaker",
            "Bag",
            "Ankle boot",
        ]

        model.eval()
        x, y = test_data[0][0], test_data[0][1]
        with torch.no_grad():
            pred = model(x)
            predicted, actual = classes[pred[0].argmax(0)], classes[y]
            print(f'Predicted: "{predicted}", Actual: "{actual}"')
    
    print("运行完成")

二、TensorFlow

TensorFlow 简介

  • TensorFlow 是由 Google 主导的一个端到端机器学习开源平台
  • TensorFlow 代码抽象程度高,生态健全,历史悠久
  • TensorFlow2.x 全面使用 kerasapi 构建模型,简化了开发流程

TensorFlow 的环境部署

  • !pip install tensorflow

2.1 TensorFlow 版本 1.x 阶段

import tensorflow as tf
import numpy as np

# 构建神经网络并训练,使模型拟合 y=x^2+1
# 创建输入数据与标签数据
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
y_data = np.square(x_data) + 1 + np.random.normal(0, 0.05, x_data.shape)

# 定义输入数据属性
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])


# 定义模型层函数
def model_layer(inputs, in_size, out_size, activation_function=None):
    """
    :param inputs: 数据输入
    :param in_size: 输入大小
    :param out_size: 输出大小
    :param activation_function: 激活函数(默认没有)
    :return:output:数据输出
    """
    weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    w_mul_x_add_b = tf.matmul(inputs, weights) + biases

    # 根据是否有激活函数
    if activation_function is None:
        output = w_mul_x_add_b
    else:
        output = activation_function(w_mul_x_add_b)
    return output


# 定义一个隐藏层
hidden_layer1 = model_layer(xs, 1, 10, activation_function=tf.nn.relu)

# 定义一个输出层
output_layer1 = model_layer(hidden_layer1, 10, 1)

# 定义全局变量初始化 (在计算图中且被存储的变量,tf.local_variables_initializer()是在计算图中但未被存储的变量)
init_weight = tf.global_variables_initializer()

# 定义损失函数
loss = tf.reduce_mean(
    tf.reduce_sum(tf.square(ys - output_layer1), reduction_indices=[1]))

# 定义训练过程
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

# tf.Session()通过启动一个tf后端会话来处理定义的操作
# 执行全局变量初始化
session = tf.Session()
session.run(init_weight)

# 执行训练过程
for i in range(1000):
    session.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 100 == 0:
        print(session.run(loss, feed_dict={xs: x_data, ys: y_data}))

# 结果关闭会话
session.close()

print("运行完成")

2.2 TensorFlow 版本 2.x 阶段

# encoding:utf-8
import gzip
import os
import time
import numpy as np
import tensorflow as tf
from keras import Model
import matplotlib.pyplot as plt


# 构建神经网络并训练,使模型对图片分类
# 第一阶段数据准备
# 导入数据集
def load_data():
    dirname = os.path.join('datasets', 'fashion-mnist')
    files = [
        'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
        't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
    ]

    paths = []
    for fname in files:
        paths.append(os.path.join(dirname, fname))

    with gzip.open(paths[0], 'rb') as lbpath:
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[1], 'rb') as imgpath:
        x_train = np.frombuffer(imgpath.read(), np.uint8,
                                offset=16).reshape(len(y_train), 28, 28)

    with gzip.open(paths[2], 'rb') as lbpath:
        y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(paths[3], 'rb') as imgpath:
        x_test = np.frombuffer(imgpath.read(), np.uint8,
                               offset=16).reshape(len(y_test), 28, 28)

    return (x_train, y_train), (x_test, y_test)


# 划分训练数据与测试数据并设置标签
(train_images, train_labels), (test_images, test_labels) = load_data()
label_class = {
    0: 'T-shirt/top',
    1: 'Trouser',
    2: 'Pullover',
    3: 'Dress',
    4: 'Coat',
    5: 'Sandal',
    6: 'Shirt',
    7: 'Sneaker',
    8: 'Bag',
    9: 'Ankle boot'
}

# 数据归一化 0~1 并结构化 [b w h c]
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images[..., tf.newaxis].astype("float32")
test_images = test_images[..., tf.newaxis].astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(
    (train_images, train_labels)).shuffle(buffer_size=60000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices(
    (test_images, test_labels)).batch(32)

for X, y in test_ds:
    print(f"Shape of X [N, H, W, C]: {X.shape}")
    print(f"Shape of y: {y.shape} {y.dtype}")
    break

# 数据可视化
visual_data = False
if visual_data:
    plt.figure(figsize=(10, 10))
    for i in range(25):
        plt.subplot(5, 5, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.imshow(train_images[i], cmap=plt.cm.binary)
        plt.xlabel(label_class[train_labels[i]])
    plt.show()


# 第二阶段构建模型
# 定义模型类
class MyModel(Model):

    def get_config(self):
        pass

    def __init__(self):
        super(MyModel, self).__init__()
        self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
        self.flatten = tf.keras.layers.Flatten()
        self.d1 = tf.keras.layers.Dense(128, activation='relu')
        self.d2 = tf.keras.layers.Dense(10, activation='softmax')

    def call(self, inputs, training=None):
        x = self.conv1(inputs)
        x = self.flatten(x)
        x = self.d1(x)
        x = self.d2(x)
        output = x

        return output


# 实例化模型
model = MyModel()

# 定义优化器与损失函数
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
loss_function = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False)

# 定义训练损失与准确性记录容器
train_loss_mean = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
    name='train_accuracy')

# 定义测试损失与准确性记录容器
test_loss_mean = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
    name='test_accuracy')


# 计算训练集的梯度和损失
@tf.function
def train_step(image, label):
    with tf.GradientTape() as tape:
        predictions = model(image, training=True)
        train_loss = loss_function(label, predictions)
    gradients = tape.gradient(train_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss_mean(train_loss)
    train_accuracy(label, predictions)


# 计算测试集的损失
@tf.function
def test_step(image, label):
    predictions = model(image, training=False)
    tt_ls = loss_function(label, predictions)

    test_loss_mean(tt_ls)
    test_accuracy(label, predictions)


if __name__ == "__main__":
    # 第三阶段运行策略
    num_epochs = 200
    with tf.device('/GPU:0'):
        for epoch in range(num_epochs):
            start_time = time.time_ns()
            train_loss_mean.reset_states()
            train_accuracy.reset_states()
            test_loss_mean.reset_states()
            test_accuracy.reset_states()

            for images, labels in train_ds:
                train_step(images, labels)

            for test_images, test_labels in train_ds:
                test_step(test_images, test_labels)

            end_time = time.time_ns()
            print(f'Epoch {epoch + 1}, '
                  f'Loss:{train_loss_mean.result() : .8f}, '
                  f'Accuracy:{train_accuracy.result() * 100 : .2f}%, '
                  f'Test Loss:{test_loss_mean.result() : .8f}, '
                  f'Test Accuracy:{test_accuracy.result() * 100 : .2f}%, '
                  f'Take time:{(end_time-start_time) / 1e9 : .4f}s')
                  
    print("运行完成")

三、Jax

Jax 是由 Google 主导的包含自微分机制 Autograd 和加速线性代数库 XLA 的高性能开源计算平台

  • 适用于是高性能机器学习研究。JAX 可以自动区分本机 PythonNumPy 函数。它可以通过循环、分支、递归和闭包求导,还可以求导数的导数的导数
  • 可使用神经网络库 Flax 灵活的构建深度学习模型,可以任意顺序组合梯度的反向模差分(反向传播)和正向模差分(前向传播),梯度通过优化器库 Optax 实现优化与处理
  • Jax 性能强悍,充满活力

Jax 的环境部署

  • !pip install jax jaxlib flax optax

3.1 Jax 版本 0.x 阶段

from typing import Any, Callable, Sequence, Tuple
import numpy as np
import jax
import jax.numpy as jnp
from flax.training import train_state
from flax import linen as nn
import optax
from tqdm.auto import tqdm
from functools import partial


# 定义模型
class ResNetBlock(nn.Module):
    """ResNet block."""
    ModuleDef = Any
    filters: int
    conv: ModuleDef
    norm: ModuleDef
    act: Callable
    strides: Tuple[int, int] = (1, 1)

    @nn.compact
    def __call__(
        self,
        x,
    ):
        residual = x
        y = self.conv(self.filters, (3, 3), self.strides)(x)
        y = self.norm()(y)
        y = self.act(y)
        y = self.conv(self.filters, (3, 3))(y)
        y = self.norm(scale_init=nn.initializers.zeros)(y)

        if residual.shape != y.shape:
            residual = self.conv(self.filters, (1, 1),
                                 self.strides,
                                 name='conv_proj')(residual)
            residual = self.norm(name='norm_proj')(residual)

        return self.act(residual + y)


class BottleneckResNetBlock(nn.Module):
    """Bottleneck ResNet block."""
    ModuleDef = Any
    filters: int
    conv: ModuleDef
    norm: ModuleDef
    act: Callable
    strides: Tuple[int, int] = (1, 1)

    @nn.compact
    def __call__(self, x):
        residual = x
        y = self.conv(self.filters, (1, 1))(x)
        y = self.norm()(y)
        y = self.act(y)
        y = self.conv(self.filters, (3, 3), self.strides)(y)
        y = self.norm()(y)
        y = self.act(y)
        y = self.conv(self.filters * 4, (1, 1))(y)
        y = self.norm(scale_init=nn.initializers.zeros)(y)

        if residual.shape != y.shape:
            residual = self.conv(self.filters * 4, (1, 1),
                                 self.strides,
                                 name='conv_proj')(residual)
            residual = self.norm(name='norm_proj')(residual)

        return self.act(residual + y)


class ResNet(nn.Module):
    """ResNetV1."""
    ModuleDef = Any
    stage_sizes: Sequence[int]
    block_cls: ModuleDef
    num_classes: int
    num_filters: int = 64
    dtype: Any = jnp.float32
    act: Callable = nn.relu
    conv: ModuleDef = nn.Conv

    @nn.compact
    def __call__(self, x, train: bool = True):
        conv = partial(self.conv, use_bias=False, dtype=self.dtype)
        norm = partial(nn.BatchNorm,
                       use_running_average=not train,
                       momentum=0.9,
                       epsilon=1e-5,
                       dtype=self.dtype)

        x = conv(self.num_filters, (7, 7), (2, 2),
                 padding=[(3, 3), (3, 3)],
                 name='conv_init')(x)
        x = norm(name='bn_init')(x)
        x = nn.relu(x)
        x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
        for i, block_size in enumerate(self.stage_sizes):
            for j in range(block_size):
                strides = (2, 2) if i > 0 and j == 0 else (1, 1)
                x = self.block_cls(self.num_filters * 2**i,
                                   strides=strides,
                                   conv=conv,
                                   norm=norm,
                                   act=self.act)(x)
        x = jnp.mean(x, axis=(1, 2))
        x = nn.Dense(self.num_classes, dtype=self.dtype)(x)
        x = jnp.asarray(x, self.dtype)
        return x


ResNet18_c10 = partial(ResNet,
                       stage_sizes=[2, 2, 2, 2],
                       block_cls=ResNetBlock,
                       num_classes=10)


# 定义数据流
def data_flow(*, dataset, batch_size=1, prng=None):
    total_data = len(dataset)
    if prng is not None:
        index_order = np.array(range(total_data))
        index_shuffle = jax.random.permutation(prng,
                                               index_order,
                                               independent=True)
    else:
        index_order = np.array(range(total_data))
        index_shuffle = index_order

    total_batch = total_data // batch_size
    for idx in range(total_batch):
        batch_index = index_shuffle[idx * batch_size:(idx + 1) * batch_size]
        mini_batch = [dataset[k] for k in batch_index]
        images = np.expand_dims(np.stack([x['image'] for x in mini_batch]),
                                -1).astype('float') / 255
        labels = np.stack([x['label'] for x in mini_batch])
        yield {'image': images, 'label': labels}


dataset_mnist = np.load("datasets/mnist.npy", allow_pickle=True).item()


# 定义损失函数
def cross_entropy_loss(*, logits, labels):
    labels_onehot = jax.nn.one_hot(labels, num_classes=10)
    return optax.softmax_cross_entropy(logits=logits,
                                       labels=labels_onehot).mean()


# 定义评估指标
def compute_metrics(*, logits, labels):
    loss = cross_entropy_loss(logits=logits, labels=labels)
    accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
    metrics = {
        'loss': loss,
        'accuracy': accuracy,
    }
    return metrics


# 初始化模型进入训练状态
def create_train_state(*, prng, learning_rate, momentum):
    net = ResNet18_c10()
    params = net.init(prng, jnp.ones([1, 28, 28, 1]))['params']
    tx = optax.sgd(learning_rate, momentum)
    return train_state.TrainState.create(apply_fn=net.apply,
                                         params=params,
                                         tx=tx)


# 定义训练方法
# 定义训练的每步操作
@jax.jit
def train_step(state, batch_data):
    """
    state: 不仅包含参数信息还包含优化器的信息等
    batch_data: 批数据 (N, H, W, C)
    """

    def loss_fn(params):
        logits, _ = ResNet18_c10().apply({'params': params},
                                         batch_data['image'],
                                         mutable=['batch_stats'])
        loss = cross_entropy_loss(logits=logits, labels=batch_data['label'])
        return loss, logits

    grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
    (_, logits), grads = grad_fn(state.params)
    state = state.apply_gradients(grads=grads)
    metrics = compute_metrics(logits=logits, labels=batch_data['label'])
    return state, metrics


# 定义训练的执行逻辑
def train_model(state, epoch, batch_size, prng):
    batch_metrics = []
    train_dataset = dataset_mnist['train']
    total_batch = len(train_dataset) // batch_size

    with tqdm(data_flow(dataset=train_dataset,
                        batch_size=batch_size,
                        prng=prng),
              total=total_batch) as run_bar_set:
        for batch in run_bar_set:
            state, metrics = train_step(state, batch)
            batch_metrics.append(metrics)
            batch_metrics_jnp = jax.device_get(batch_metrics)
            epoch_metrics = {
                k: np.mean([metrics[k] for metrics in batch_metrics_jnp])
                for k in metrics.keys()
            }
            run_bar_set.set_description(
                f"train epoch: {epoch+1}, "
                f"loss: {epoch_metrics['loss']:.4f}, "
                f"accuracy: {(epoch_metrics['accuracy'] * 100):.2f}")

    return state


# 定义测试方法
# 定义测试的每步操作
@jax.jit
def test_step(params, batch_data):
    """
    params: 经过训练的参数
    batch_data: 批数据 (N, H, W, C)
    """
    logits, _ = ResNet18_c10().apply({'params': params},
                                     batch_data['image'],
                                     mutable=['batch_stats'])
    return compute_metrics(logits=logits, labels=batch_data['label'])


# 定义测试执行逻辑
def test_model(params, epoch, batch_size):
    batch_metrics = []
    test_dataset = dataset_mnist['test']
    total_batch = len(test_dataset) // batch_size

    with tqdm(data_flow(dataset=test_dataset, batch_size=batch_size),
              total=total_batch) as run_bar_set:
        for batch in run_bar_set:
            metrics = test_step(params, batch)
            batch_metrics.append(metrics)
            batch_metrics_jnp = jax.device_get(batch_metrics)
            epoch_metrics = {
                k: np.mean([metrics[k] for metrics in batch_metrics_jnp])
                for k in metrics.keys()
            }
            run_bar_set.set_description(
                f"train epoch: {epoch+1}, "
                f"loss: {epoch_metrics['loss']:.4f}, "
                f"accuracy: {(epoch_metrics['accuracy'] * 100):.2f}")

    return epoch_metrics


# 进行训练与测试
seed = 51
prng = jax.random.PRNGKey(seed)  # 通过种子获取随机数生成器密钥
prng, init_prng = jax.random.split(
    prng, 2)  # 拆分原随机数生成器密钥得到2个新的密钥,使用相同密钥随机函数将输出相同结果,用其实现可复现的权重初始化
num_epochs = 10
batch_size = 32
learning_rate = 0.1
momentum = 0.9
state = create_train_state(prng=init_prng,
                           learning_rate=learning_rate,
                           momentum=momentum)

for epoch in range(num_epochs):
    # 定义用于打乱数据顺序的伪随机数生成器
    prng, data_prng = jax.random.split(prng)
    for train_batch_data in data_flow(dataset=dataset_mnist['train'],
                                      batch_size=batch_size,
                                      prng=data_prng):
        print(train_batch_data['image'].shape, train_batch_data['image'].dtype)
        print(train_batch_data['label'].shape, train_batch_data['label'].dtype)
        break
    for test_batch_data in data_flow(dataset=dataset_mnist['test'],
                                     batch_size=batch_size):
        print(test_batch_data['image'].shape, test_batch_data['image'].dtype)
        print(test_batch_data['label'].shape, test_batch_data['label'].dtype)
        break
    # 训练模型
    state = train_model(state, epoch, batch_size, data_prng)
    # 测试模型
    params = state.params
    metrics = test_model(params, epoch, batch_size)

print("运行完成")
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

昊大侠

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值