Pennylane混合经典量子分类器-Mnist分类

运行环境

  • pytorch=1.13.1
  • pennylane=0.23.0
# install the latest released version of PennyLane
pip install pennylane --upgrade

# Visit https://pytorch.org/get-started/locally/ for instructions on installing PyTorch.

文件结构目录

|
——models
   | ____qmodel.py
|
——run.py

创建文件夹models,并进入文件夹创建qmodel.py,文件内容如下

qmodel.py

import torch
import torch.nn as nn
import pennylane as qml

n_qubits = 4                # Number of qubits
dev = qml.device("default.qubit", wires=n_qubits)
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
q_depth = 1
q_delta = 0.01

#定义量子网络
def H_layer(nqubits):
    """Layer of single-qubit Hadamard gates.
    """
    for idx in range(nqubits):
        qml.Hadamard(wires=idx)


def RY_layer(w):
    """Layer of parametrized qubit rotations around the y axis.
    """
    for idx, element in enumerate(w):
        qml.RY(element, wires=idx)


def entangling_layer(nqubits):
    """Layer of CNOTs followed by another shifted layer of CNOT.
    """
    # In other words it should apply something like :
    # CNOT  CNOT  CNOT  CNOT...  CNOT
    #   CNOT  CNOT  CNOT...  CNOT
    for i in range(0, nqubits - 1, 2):  # Loop over even indices: i=0,2,...N-2
        qml.CNOT(wires=[i, i + 1])
    for i in range(1, nqubits - 1, 2):  # Loop over odd indices:  i=1,3,...N-3
        qml.CNOT(wires=[i, i + 1])

@qml.qnode(dev, interface="torch")
def quantum_net(q_input_features, q_weights_flat):
    """
    The variational quantum circuit.
    """

    # Reshape weights
    q_weights = q_weights_flat.reshape(q_depth, n_qubits)

    # Start from state |+> , unbiased w.r.t. |0> and |1>
    H_layer(n_qubits)

    # Embed features in the quantum node
    RY_layer(q_input_features)

    # Sequence of trainable variational layers
    for k in range(q_depth):
        entangling_layer(n_qubits)
        RY_layer(q_weights[k])

    # Expectation values in the Z basis
    # exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(n_qubits)]
    exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(n_qubits)]
    return exp_vals

class DressedQuantumNet(nn.Module):
    """
    Torch module implementing the *dressed* quantum net.
    """

    def __init__(self):
        """
        Definition of the *dressed* layout.
        """

        super().__init__()
        self.flat_net = nn.Flatten()
        self.pre_net = nn.Linear(784, n_qubits)
        self.q_params = nn.Parameter(q_delta * torch.randn(q_depth * n_qubits))
        self.post_net = nn.Linear(n_qubits, 10)

    def forward(self, input_features):
        """
        Defining how tensors are supposed to move through the *dressed* quantum
        net.
        """
        # obtain the input features for the quantum circuit
        # by reducing the feature dimension from 784 to 4
        input_features = self.flat_net(input_features)
        pre_out = self.pre_net(input_features)

        pre_out = torch.arctan(pre_out)

        q_out = torch.Tensor(0, n_qubits)
        q_out = q_out.to(device)
        
        for elem in pre_out:
            q_out_elem = torch.Tensor(quantum_net(elem, self.q_params)).float().unsqueeze(0)
            q_out = torch.cat((q_out, q_out_elem))
        return self.post_net(q_out)

run.py

import time
import os

# PyTorch
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader


# Pennylane
import pennylane as qml
from pennylane import numpy as np

torch.manual_seed(42)
np.random.seed(42)


os.environ["OMP_NUM_THREADS"] = "1"

n_qubits = 4                # Number of qubits
step = 0.003               # Learning rate
batch_size = 32              # Number of samples for each training step
num_epochs = 50              # Number of training epochs
q_depth = 3                 # Depth of the quantum circuit (number of variational layers)
gamma_lr_scheduler = 0.1    # Learning rate reduction applied every 10 epochs.
q_delta = 0.01              # Initial spread of random quantum weights
start_time = time.time()    # Start of the computation timer

dev = qml.device("default.qubit", wires=n_qubits)
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")

#load Mnist dataset
# 载入训练集
train_dataset = torchvision.datasets.MNIST(
    root = 'dataset/',
    train = True,
    transform = transforms.ToTensor(),
    download=True
)
# 生成测试集
test_dataset = torchvision.datasets.MNIST(
    root = 'dataset/',
    train = False,
    transform = transforms.ToTensor(),
    download=True
)

train_data_size = len(train_dataset)
valid_data_size = len(test_dataset)

train_dataloader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_dataloader = DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)

from models.qmodel import DressedQuantumNet

model= DressedQuantumNet()

criterion = nn.CrossEntropyLoss()
optimizer= optim.Adam(model.parameters(), lr=step)

def train_model(model, criterion, optimizer, num_epochs):

    print("Training started:")

    best_acc = 0

    for epoch in range(num_epochs):
        epoch_start = time.time()

        print("Epoch: {}/{}".format(epoch + 1, num_epochs))
        model.train()
        train_loss = 0.0
        train_acc = 0.0
        valid_loss = 0.0
        valid_acc = 0.0

        for i,(inputs, labels) in enumerate(train_dataloader):

            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)

            loss = criterion(outputs, labels)

            loss.backward()

            optimizer.step()
            train_loss += loss.item() * inputs.size(0)

            ret, predictions = torch.max(outputs.data, 1)
            # print(ret)
            correct_counts = predictions.eq(labels.data.view_as(predictions))

            acc = torch.mean(correct_counts.type(torch.FloatTensor))

            train_acc += acc.item() * inputs.size(0)

        with torch.no_grad():
            model.eval()

            y_true=[]
            y_pred=[]
            for j, (inputs, labels) in enumerate(test_dataloader):
                inputs = inputs.to(device)
                labels = labels.to(device)

                outputs = model(inputs)

                loss = criterion(outputs, labels)

                valid_loss += loss.item() * inputs.size(0)

                ret, predictions = torch.max(outputs.data, 1)
                correct_counts = predictions.eq(labels.data.view_as(predictions))

                acc = torch.mean(correct_counts.type(torch.FloatTensor))

                valid_acc += acc.item() * inputs.size(0)

                y_true.extend(labels.cpu().numpy().tolist())
                y_pred.extend(predictions.cpu().numpy().tolist())

        avg_train_loss = train_loss / train_data_size
        avg_train_acc = train_acc / train_data_size

        avg_valid_loss = valid_loss / valid_data_size
        avg_valid_acc = valid_acc / valid_data_size

        if best_acc < avg_valid_acc:
            best_acc = avg_valid_acc
            best_epoch = epoch + 1

        epoch_end = time.time()

        print(
            "\t Training: Loss: {:.4f}, Accuracy: {:.4f}%, "
            "\n\t Validation: Loss: {:.4f}, Accuracy: {:.4f}%, Time: {:.3f}s".format(
                avg_train_loss, avg_train_acc * 100, avg_valid_loss, avg_valid_acc * 100,
                                epoch_end - epoch_start
            ))
        print("\t Best Accuracy for validation : {:.4f} at epoch {:03d}".format(best_acc, best_epoch))

train_model(model, criterion, optimizer, num_epochs)

训练结果,相比于纯经典的网络(94.49%)还是差一点

Training started:
Epoch: 1/50
	 Training: Loss: 1.1719, Accuracy: 55.0467%, 
	 Validation: Loss: 0.9824, Accuracy: 62.8900%, Time: 612.915s
	 Best Accuracy for validation : 0.6289 at epoch 001
Epoch: 2/50
	 Training: Loss: 0.8633, Accuracy: 70.1850%, 
	 Validation: Loss: 0.7958, Accuracy: 73.6900%, Time: 609.395s
	 Best Accuracy for validation : 0.7369 at epoch 002
Epoch: 3/50
	 Training: Loss: 0.7467, Accuracy: 76.1867%, 
	 Validation: Loss: 0.7379, Accuracy: 77.0200%, Time: 603.005s
	 Best Accuracy for validation : 0.7702 at epoch 003
Epoch: 4/50
	 Training: Loss: 0.6965, Accuracy: 78.6633%, 
	 Validation: Loss: 0.6963, Accuracy: 78.9100%, Time: 607.274s
	 Best Accuracy for validation : 0.7891 at epoch 004
Epoch: 5/50
	 Training: Loss: 0.6713, Accuracy: 79.6633%, 
	 Validation: Loss: 0.7107, Accuracy: 78.0700%, Time: 615.727s
	 Best Accuracy for validation : 0.7891 at epoch 004
Epoch: 6/50
	 Training: Loss: 0.6517, Accuracy: 80.3983%, 
	 Validation: Loss: 0.6842, Accuracy: 79.3700%, Time: 613.060s
	 Best Accuracy for validation : 0.7937 at epoch 006
Epoch: 7/50
	 Training: Loss: 0.6397, Accuracy: 80.5467%, 
	 Validation: Loss: 0.6647, Accuracy: 80.3300%, Time: 609.802s
	 Best Accuracy for validation : 0.8033 at epoch 007
Epoch: 8/50
	 Training: Loss: 0.6246, Accuracy: 81.0650%, 
	 Validation: Loss: 0.6638, Accuracy: 80.9200%, Time: 619.219s
	 Best Accuracy for validation : 0.8092 at epoch 008
Epoch: 9/50
	 Training: Loss: 0.6183, Accuracy: 81.3633%, 
	 Validation: Loss: 0.6591, Accuracy: 80.8900%, Time: 613.014s
	 Best Accuracy for validation : 0.8092 at epoch 008
Epoch: 10/50
	 Training: Loss: 0.6067, Accuracy: 81.9067%, 
	 Validation: Loss: 0.6495, Accuracy: 81.0200%, Time: 609.195s
	 Best Accuracy for validation : 0.8102 at epoch 010
Epoch: 11/50
	 Training: Loss: 0.6004, Accuracy: 82.1517%, 
	 Validation: Loss: 0.6396, Accuracy: 81.1100%, Time: 609.755s
	 Best Accuracy for validation : 0.8111 at epoch 011
Epoch: 12/50
	 Training: Loss: 0.5946, Accuracy: 82.2650%, 
	 Validation: Loss: 0.6305, Accuracy: 82.2600%, Time: 621.299s
	 Best Accuracy for validation : 0.8226 at epoch 012
Epoch: 13/50
	 Training: Loss: 0.5880, Accuracy: 82.7517%, 
	 Validation: Loss: 0.6329, Accuracy: 81.1800%, Time: 609.291s
	 Best Accuracy for validation : 0.8226 at epoch 012
Epoch: 14/50
	 Training: Loss: 0.5826, Accuracy: 82.8700%, 
	 Validation: Loss: 0.6086, Accuracy: 82.6100%, Time: 607.807s
	 Best Accuracy for validation : 0.8261 at epoch 014
Epoch: 15/50
	 Training: Loss: 0.5814, Accuracy: 82.9350%, 
	 Validation: Loss: 0.6105, Accuracy: 82.3100%, Time: 614.727s
	 Best Accuracy for validation : 0.8261 at epoch 014
Epoch: 16/50
	 Training: Loss: 0.5743, Accuracy: 83.2600%, 
	 Validation: Loss: 0.6109, Accuracy: 82.8700%, Time: 609.623s
	 Best Accuracy for validation : 0.8287 at epoch 016
Epoch: 17/50
	 Training: Loss: 0.5740, Accuracy: 83.2433%, 
	 Validation: Loss: 0.6122, Accuracy: 82.7100%, Time: 615.076s
	 Best Accuracy for validation : 0.8287 at epoch 016
Epoch: 18/50
	 Training: Loss: 0.5691, Accuracy: 83.4367%, 
	 Validation: Loss: 0.6257, Accuracy: 82.3700%, Time: 612.821s
	 Best Accuracy for validation : 0.8287 at epoch 016
Epoch: 19/50
	 Training: Loss: 0.5698, Accuracy: 83.5633%, 
	 Validation: Loss: 0.5976, Accuracy: 83.3600%, Time: 612.744s
	 Best Accuracy for validation : 0.8336 at epoch 019
Epoch: 20/50
	 Training: Loss: 0.5672, Accuracy: 83.4583%, 
	 Validation: Loss: 0.5923, Accuracy: 83.7300%, Time: 608.514s
	 Best Accuracy for validation : 0.8373 at epoch 020
Epoch: 21/50
	 Training: Loss: 0.5664, Accuracy: 83.6250%, 
	 Validation: Loss: 0.5903, Accuracy: 83.4800%, Time: 616.796s
	 Best Accuracy for validation : 0.8373 at epoch 020
Epoch: 22/50
	 Training: Loss: 0.5647, Accuracy: 83.7300%, 
	 Validation: Loss: 0.6010, Accuracy: 83.0700%, Time: 620.391s
	 Best Accuracy for validation : 0.8373 at epoch 020
Epoch: 23/50
	 Training: Loss: 0.5647, Accuracy: 83.7650%, 
	 Validation: Loss: 0.6070, Accuracy: 83.0500%, Time: 611.572s
	 Best Accuracy for validation : 0.8373 at epoch 020
Epoch: 24/50
	 Training: Loss: 0.5643, Accuracy: 83.7450%, 
	 Validation: Loss: 0.5994, Accuracy: 83.0500%, Time: 616.740s
	 Best Accuracy for validation : 0.8373 at epoch 020
Epoch: 25/50
	 Training: Loss: 0.5608, Accuracy: 83.8667%, 
	 Validation: Loss: 0.5872, Accuracy: 83.9300%, Time: 574.860s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 26/50
	 Training: Loss: 0.5619, Accuracy: 83.8317%, 
	 Validation: Loss: 0.5995, Accuracy: 83.4800%, Time: 580.807s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 27/50
	 Training: Loss: 0.5597, Accuracy: 83.9667%, 
	 Validation: Loss: 0.5981, Accuracy: 83.9100%, Time: 573.785s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 28/50
	 Training: Loss: 0.5609, Accuracy: 83.9600%, 
	 Validation: Loss: 0.5985, Accuracy: 83.0100%, Time: 581.108s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 29/50
	 Training: Loss: 0.5582, Accuracy: 84.0417%, 
	 Validation: Loss: 0.6095, Accuracy: 83.1200%, Time: 555.763s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 30/50
	 Training: Loss: 0.5573, Accuracy: 84.0017%, 
	 Validation: Loss: 0.6013, Accuracy: 83.2900%, Time: 553.698s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 31/50
	 Training: Loss: 0.5597, Accuracy: 83.9700%, 
	 Validation: Loss: 0.6226, Accuracy: 82.9000%, Time: 549.435s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 32/50
	 Training: Loss: 0.5583, Accuracy: 83.9933%, 
	 Validation: Loss: 0.6041, Accuracy: 83.4200%, Time: 546.375s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 33/50
	 Training: Loss: 0.5558, Accuracy: 84.0100%, 
	 Validation: Loss: 0.6008, Accuracy: 83.5900%, Time: 546.361s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 34/50
	 Training: Loss: 0.5553, Accuracy: 84.0717%, 
	 Validation: Loss: 0.6344, Accuracy: 82.0200%, Time: 545.937s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 35/50
	 Training: Loss: 0.5572, Accuracy: 83.9150%, 
	 Validation: Loss: 0.5921, Accuracy: 83.7000%, Time: 546.308s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 36/50
	 Training: Loss: 0.5554, Accuracy: 84.1750%, 
	 Validation: Loss: 0.5932, Accuracy: 83.5300%, Time: 546.791s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 37/50
	 Training: Loss: 0.5566, Accuracy: 84.0600%, 
	 Validation: Loss: 0.5954, Accuracy: 83.0300%, Time: 546.066s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 38/50
	 Training: Loss: 0.5540, Accuracy: 84.1700%, 
	 Validation: Loss: 0.5945, Accuracy: 83.8800%, Time: 545.714s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 39/50
	 Training: Loss: 0.5549, Accuracy: 84.2417%, 
	 Validation: Loss: 0.6054, Accuracy: 82.8800%, Time: 544.491s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 40/50
	 Training: Loss: 0.5560, Accuracy: 83.9600%, 
	 Validation: Loss: 0.6189, Accuracy: 82.7500%, Time: 541.625s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 41/50
	 Training: Loss: 0.5546, Accuracy: 84.1667%, 
	 Validation: Loss: 0.6133, Accuracy: 83.2700%, Time: 553.133s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 42/50
	 Training: Loss: 0.5548, Accuracy: 84.0717%, 
	 Validation: Loss: 0.5927, Accuracy: 83.4300%, Time: 576.668s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 43/50
	 Training: Loss: 0.5531, Accuracy: 84.1867%, 
	 Validation: Loss: 0.6155, Accuracy: 82.1500%, Time: 615.985s
	 Best Accuracy for validation : 0.8393 at epoch 025
Epoch: 44/50
	 Training: Loss: 0.5525, Accuracy: 84.2217%, 
	 Validation: Loss: 0.5902, Accuracy: 83.9600%, Time: 616.727s
	 Best Accuracy for validation : 0.8396 at epoch 044
Epoch: 45/50
	 Training: Loss: 0.5525, Accuracy: 84.1167%, 
	 Validation: Loss: 0.5843, Accuracy: 84.0900%, Time: 618.055s
	 Best Accuracy for validation : 0.8409 at epoch 045
Epoch: 46/50
	 Training: Loss: 0.5532, Accuracy: 84.2133%, 
	 Validation: Loss: 0.6115, Accuracy: 82.8800%, Time: 616.387s
	 Best Accuracy for validation : 0.8409 at epoch 045
Epoch: 47/50
	 Training: Loss: 0.5529, Accuracy: 84.0767%, 
	 Validation: Loss: 0.6046, Accuracy: 83.3400%, Time: 613.826s
	 Best Accuracy for validation : 0.8409 at epoch 045
Epoch: 48/50
	 Training: Loss: 0.5507, Accuracy: 84.3400%, 
	 Validation: Loss: 0.6280, Accuracy: 82.3800%, Time: 614.894s
	 Best Accuracy for validation : 0.8409 at epoch 045
Epoch: 49/50
	 Training: Loss: 0.5494, Accuracy: 84.2983%, 
	 Validation: Loss: 0.6100, Accuracy: 82.1000%, Time: 594.077s
	 Best Accuracy for validation : 0.8409 at epoch 045
Epoch: 50/50
	 Training: Loss: 0.5510, Accuracy: 84.3800%, 
	 Validation: Loss: 0.5850, Accuracy: 84.2000%, Time: 553.280s
	 Best Accuracy for validation : 0.8420 at epoch 050

附经典训练结果:

Training started:
Phase: train Epoch: 1/50 Loss: 1.7329 Acc: 0.3983        
Phase: train Epoch: 2/50 Loss: 1.1598 Acc: 0.5933        
Phase: train Epoch: 3/50 Loss: 0.9086 Acc: 0.7049        
Phase: train Epoch: 4/50 Loss: 0.7694 Acc: 0.7510        
Phase: train Epoch: 5/50 Loss: 0.6658 Acc: 0.7857        
Phase: train Epoch: 6/50 Loss: 0.6100 Acc: 0.8001        
Phase: train Epoch: 7/50 Loss: 0.5628 Acc: 0.8229        
Phase: train Epoch: 8/50 Loss: 0.5416 Acc: 0.8289        
Phase: train Epoch: 9/50 Loss: 0.5088 Acc: 0.8348        
Phase: train Epoch: 10/50 Loss: 0.4896 Acc: 0.8423        
Phase: train Epoch: 11/50 Loss: 0.4722 Acc: 0.8447        
Phase: train Epoch: 12/50 Loss: 0.4476 Acc: 0.8502        
Phase: train Epoch: 13/50 Loss: 0.4314 Acc: 0.8576        
Phase: train Epoch: 14/50 Loss: 0.4223 Acc: 0.8606        
Phase: train Epoch: 15/50 Loss: 0.4111 Acc: 0.8636        
Phase: train Epoch: 16/50 Loss: 0.3913 Acc: 0.8686        
Phase: train Epoch: 17/50 Loss: 0.3753 Acc: 0.8770        
Phase: train Epoch: 18/50 Loss: 0.3662 Acc: 0.8800        
Phase: train Epoch: 19/50 Loss: 0.3604 Acc: 0.8755        
Phase: train Epoch: 20/50 Loss: 0.3464 Acc: 0.8854        
Phase: train Epoch: 21/50 Loss: 0.3391 Acc: 0.8844        
Phase: train Epoch: 22/50 Loss: 0.3262 Acc: 0.8938        
Phase: train Epoch: 23/50 Loss: 0.3265 Acc: 0.8874        
Phase: train Epoch: 24/50 Loss: 0.3079 Acc: 0.9058        
Phase: train Epoch: 25/50 Loss: 0.3049 Acc: 0.9008        
Phase: train Epoch: 26/50 Loss: 0.2941 Acc: 0.9028        
Phase: train Epoch: 27/50 Loss: 0.2876 Acc: 0.9082        
Phase: train Epoch: 28/50 Loss: 0.2831 Acc: 0.9043        
Phase: train Epoch: 29/50 Loss: 0.2684 Acc: 0.9072        
Phase: train Epoch: 30/50 Loss: 0.2616 Acc: 0.9152        
Phase: train Epoch: 31/50 Loss: 0.2676 Acc: 0.9072        
Phase: train Epoch: 32/50 Loss: 0.2544 Acc: 0.9092        
Phase: train Epoch: 33/50 Loss: 0.2480 Acc: 0.9147        
Phase: train Epoch: 34/50 Loss: 0.2429 Acc: 0.9216        
Phase: train Epoch: 35/50 Loss: 0.2351 Acc: 0.9187        
Phase: train Epoch: 36/50 Loss: 0.2238 Acc: 0.9231        
Phase: train Epoch: 37/50 Loss: 0.2375 Acc: 0.9191        
Phase: train Epoch: 38/50 Loss: 0.2153 Acc: 0.9286        
Phase: train Epoch: 39/50 Loss: 0.2098 Acc: 0.9276        
Phase: train Epoch: 40/50 Loss: 0.2016 Acc: 0.9330        
Phase: train Epoch: 41/50 Loss: 0.1988 Acc: 0.9330        
Phase: train Epoch: 42/50 Loss: 0.2062 Acc: 0.9271        
Phase: train Epoch: 43/50 Loss: 0.1990 Acc: 0.9301        
Phase: train Epoch: 44/50 Loss: 0.1907 Acc: 0.9320        
Phase: train Epoch: 45/50 Loss: 0.1798 Acc: 0.9375        
Phase: train Epoch: 46/50 Loss: 0.1777 Acc: 0.9325        
Phase: train Epoch: 47/50 Loss: 0.1770 Acc: 0.9365        
Phase: train Epoch: 48/50 Loss: 0.1668 Acc: 0.9390        
Phase: train Epoch: 49/50 Loss: 0.1683 Acc: 0.9395        
Phase: train Epoch: 50/50 Loss: 0.1582 Acc: 0.9449        
  • 4
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值