pytorch1.1.0-python3.6-CUDA9.0-神经网络可视化问题

----更新----

pytorch1.1.0

必须配合pip install hiddenlayer==0.2

否则会出现错误:

AttributeError: module 'torch.jit' has no attribute 'get_trace_graph


目录

pytorch-cnn-visualizations

A-journey-into-Convolutional-Neural-Network-visualization-

PyTorchViz

hiddenlayer 0.4.1

onnx 0.4.1

tensorwatch 

VisualDL


tensorboardX

https://blog.csdn.net/sz793919425/article/details/84305669

https://www.jianshu.com/p/46eb3004beca

http://blog.itpub.net/29829936/viewspace-2637425/


 VisualDL

https://github.com/PaddlePaddle/VisualDL/blob/develop/demo/pytorch/pytorch_cifar10.py

https://blog.csdn.net/cxiazaiyu/article/details/91129657

Pytorch1.1版本pytorch模型转onnx的bug

https://www.cnblogs.com/wizardforcel/p/10350623.html

torch.onnx.export(model, dummy_input, "alexnet.onnx", verbose=True, input_names=input_names, output_names=output_names)
import torch
import torchvision

dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
model = torchvision.models.alexnet(pretrained=True).cuda()

# 可以根据模块图形的数值设置输入输出的显示名称。这些设置不会改变此图形的语义。只是会变得更加可读了。
#该网络的输入包含了输入的扁平表(flat list)。也就是说传入forward()里面的值,其后是扁平表的参数。你可以指定一部分名字,例如指定一个比该模块输入数量更少的表,随后我们会从一开始就设定名字。
input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ]
output_names = [ "output1" ]

torch.onnx.export(model, dummy_input, "alexnet.onnx", verbose=True, input_names=input_names, output_names=output_names)

得到的 alexnet.onnx 是一个 protobuf 二值文件, 它包含所导出模型 ( 这里是 AlexNet )中网络架构和网络参数. 关键参数 verbose=True 会使导出过程中打印出的网络更可读:

#这些是网络的输入和参数,包含了我们之前设定的名称。
graph(%actual_input_1 : Float(10, 3, 224, 224)
      %learned_0 : Float(64, 3, 11, 11)
      %learned_1 : Float(64)
      %learned_2 : Float(192, 64, 5, 5)
      %learned_3 : Float(192)
      # ---- 为了简介可以省略 ----
      %learned_14 : Float(1000, 4096)
      %learned_15 : Float(1000)) {
  # 每个声明都包含了一些输出张量以及他们的类型,以及即将运行的操作符(并且包含它的属性,例如核部分,步长等等)它的输入张量(%actual_input_1, %learned_0, %learned_1)
  %17 : Float(10, 64, 55, 55) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[11, 11], pads=[2, 2, 2, 2], strides=[4, 4]](%actual_input_1, %learned_0, %learned_1), scope: AlexNet/Sequential[features]/Conv2d[0]

我的代码:

from torchsummary import summary
from torchstat import stat
from tensorboardX import SummaryWriter
writer = SummaryWriter('log')

from torchviz import make_dot, make_dot_from_trace
import hiddenlayer as hl
from hiddenlayer import transforms as ht
import os



model = EfficientNet.from_pretrained('efficientnet-b0')#.to(device)#.cuda()

#dummy_input = torch.rand(1, 3, 224, 224).requires_grad_(True)
#writer.add_graph(model, (dummy_input,))
#vis_graph = make_dot(model(dummy_input), params=dict(model.named_parameters()))
#vis_graph = make_dot(model(dummy_input), params=dict(list(model.named_parameters()) + [('x', dummy_input)]))
#vis_graph.view()








print(model)
model.to(device)
#summary(model, (3, 224, 224))
#model.cpu()
#stat(model,  (3, 224, 224))
#model.to(device)
print("-------------------------------------------")

#-*-coding:utf-8-*-
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, models
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch import utils
from MyDataset import MyDataset
from torchsummary import summary
from torchstat import stat
from tensorboardX import SummaryWriter
writer = SummaryWriter('log')

from torchviz import make_dot, make_dot_from_trace
import hiddenlayer as hl
from hiddenlayer import transforms as ht
import os
import torch.onnx

import tensorwatch as tw


def train(args, model, device, train_loader, optimizer, epoch):
    model.train()
    #for batch_idx, (data, target) in enumerate(train_loader):
    for batch_idx, data_ynh in enumerate(train_loader):
        # 获取图片和标签
        data, target = data_ynh
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        output1 = torch.nn.functional.log_softmax(output, dim=1)
        loss = F.nll_loss(output1, target)
        #loss = F.l1_loss(output, target)
        loss.backward()
        optimizer.step()

        #new ynh
        #每10个batch画个点用于loss曲线
        if batch_idx % 10 == 0:
            niter = epoch * len(train_loader) + batch_idx
            writer.add_scalar('Train/Loss', loss.data, niter)

        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


def test(args, model, device, test_loader, epoch):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        #for data, target in test_loader:
        for data_ynh in test_loader:
            # 获取图片和标签
            data, target = data_ynh
            data, target = data.to(device), target.to(device)
            output = model(data)
            output1 = torch.nn.functional.log_softmax(output, dim=1)
            test_loss += F.nll_loss(output1, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    # new ynh
    writer.add_scalar('Test/Accu', test_loss, epoch)


    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=10, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=10, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # -------------------------------------------- step 1/5 : 加载数据 -------------------------------------------
    train_txt_path = './Data/train.txt'
    valid_txt_path = './Data/valid.txt'
    # 数据预处理设置
    #normMean = [0.4948052, 0.48568845, 0.44682974]
    #normStd = [0.24580306, 0.24236229, 0.2603115]
    normMean = [104, 117, 123]
    normStd = [1, 1, 1]
    normTransform = transforms.Normalize(normMean, normStd)
    trainTransform = transforms.Compose([
        transforms.Resize(224),
        #transforms.RandomCrop(224, padding=4),
        transforms.ToTensor(),
        #normTransform
    ])

    validTransform = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        #normTransform
    ])

    # 构建MyDataset实例 img_path是一种可在txt图片路径前面加入的一种机制
    train_data = MyDataset(img_path = '', txt_path=train_txt_path, transform=trainTransform)
    valid_data = MyDataset(img_path = '', txt_path=valid_txt_path, transform=validTransform)

    # 构建DataLoder
    train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=16, shuffle=True, num_workers=2)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_data, batch_size=16, num_workers=2)




    #blocks_args, global_params = utils.get_model_params('efficientnet-b0', override_params=None)
    #model = EfficientNet(blocks_args, global_params)
    model = EfficientNet.from_pretrained('efficientnet-b0').to(device)#.cuda()


    #dummy_input = torch.rand(1, 3, 224, 224).requires_grad_(True)
    #writer.add_graph(model, (dummy_input,))

    # no cuda()
    #vis_graph = make_dot(model(dummy_input), params=dict(model.named_parameters()))
    #vis_graph = make_dot(model(dummy_input), params=dict(list(model.named_parameters()) + [('x', dummy_input)]))
    #vis_graph.view()

    #no cuda()
    #input = torch.zeros([1, 3, 224, 224])
    #hl_graph = hl.build_graph(model, input)
    #hl_graph.theme = hl.graph.THEMES["blue"].copy()
    #hl_graph.save(os.path.join("/home/boyun/PycharmProjects/EfficientNet-1852", "pytorch_resnet_bloks.pdf"))



    #netron这个工具来可视化(读取ONNX文件)
    #https://discuss.pytorch.org/t/onnx-export-failed-couldnt-export-operator-aten-adaptive-avg-pool1d/30204
    #https://ptorch.com/news/95.html
    #model.train(False)
    #dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
    #torch_out = torch.onnx._export(model, dummy_input, "./efficientnet-b0.onnx", export_params=True, verbose=True)


    #no .cuda()
    #tw_graph = tw.draw_model(model, [1, 3, 224, 224])

    #print(model)
    #model.to(device)
    #summary(model, (3, 224, 224))
    #model.cpu()
    #stat(model,  (3, 224, 224))
    print("-------------------------------------------")



    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, valid_loader, epoch)

    if (args.save_model):
        torch.save(model.state_dict(), "mnist_cnn.pt")

    writer.close()


if __name__ == '__main__':
    main()

之前是左右显示,现在变成上下显示 

    import hiddenlayer as hl
    from hiddenlayer import transforms as ht

    # no cuda()
    input = torch.zeros([1, 3, 224, 224], device='cuda')
    hl_graph = hl.build_graph(model, input)
    hl_graph.theme = hl.graph.THEMES["blue"].copy()
    #hl_graph.save("/home/boyun/PycharmProjects/EfficientNet-PyTorch/pytorch_resnet_bloks.pdf")
    dot = hl_graph.build_dot()
    dot.attr("graph", rankdir="TD")  # Topdown
    dot.format = "pdf"
    directory, file_name = "/home/boyun/PycharmProjects/EfficientNet-PyTorch/","pytorch_resnet_bloks.pdf"
    # Remove extension from file name. dot.render() adds it.
    file_name = file_name.replace("." + "pdf", "")
    dot.render(file_name, directory=directory, cleanup=True)
    model = ptcv_get_model("fcn8sd_resnetd50b_voc", pretrained=False).cuda()
    dummy_input = torch.randn(1, 3, 224, 224, device='cuda')
    torch.onnx.export(model, dummy_input, "./fcn8sd_resnetd50b_voc.onnx", verbose=True)

    #stat(model, (3, 224, 224))
    #model.to(device)
    #summary(model, (3, 224, 224))

    import hiddenlayer as hl
    from hiddenlayer import transforms as ht

    # no cuda()
    input = torch.zeros([1, 3, 224, 224], device='cuda')
    hl_graph = hl.build_graph(model, input)
    hl_graph.theme = hl.graph.THEMES["blue"].copy()
    #hl_graph.save("/home/boyun/PycharmProjects/EfficientNet-PyTorch/pytorch_resnet_bloks.pdf")
    dot = hl_graph.build_dot()
    dot.attr("graph", rankdir="TD")  # Topdown
    dot.format = "pdf"
    directory, file_name = "/home/boyun/PycharmProjects/EfficientNet-PyTorch/","pytorch_resnet_bloks.pdf"
    # Remove extension from file name. dot.render() adds it.
    file_name = file_name.replace("." + "pdf", "")
    dot.render(file_name, directory=directory, cleanup=True)

 

pytorch-cnn-visualizations

https://github.com/utkuozbulak/pytorch-cnn-visualizations

Gradient Visualization

A-journey-into-Convolutional-Neural-Network-visualization-

https://github.com/FrancescoSaverioZuppichini/A-journey-into-Convolutional-Neural-Network-visualization-

PyTorchViz

sudo apt-get install graphviz

pip install torchviz

或者:

sudo apt-get install graphviz

pip install git+https://github.com/szagoruyko/pytorchviz

from torchviz import make_dot, make_dot_from_trace
dummy_input = torch.rand(1, 3, 224, 224).requires_grad_(True)
#vis_graph = make_dot(model(dummy_input), params=dict(model.named_parameters()))
vis_graph = make_dot(model(dummy_input), params=dict(list(model.named_parameters()) + [('x', dummy_input)]))
vis_graph.view()

hiddenlayer

https://github.com/waleedka/hiddenlayer

pip install git+https://github.com/waleedka/hiddenlayer.git
import hiddenlayer as hl
from hiddenlayer import transforms as ht

input = torch.zeros([1, 3, 224, 224])
model1 = models.resnet50(pretrained=True)
hl_graph = hl.build_graph(model1, input)
hl_graph.theme = hl.graph.THEMES["blue"].copy()
hl_graph.save(os.path.join("/home/boyun/PycharmProjects/EfficientNet-1852", "pytorch_resnet_bloks.pdf"))

error:

Traceback (most recent call last):
  File "/home/boyun/PycharmProjects/EfficientNet-1852/train_new_io.py", line 186, in <module>
    main()
  File "/home/boyun/PycharmProjects/EfficientNet-1852/train_new_io.py", line 157, in main
    hl_graph = hl.build_graph(model, input)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/hiddenlayer/graph.py", line 143, in build_graph
    import_graph(g, model, args)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/hiddenlayer/pytorch_builder.py", line 71, in import_graph
    torch.onnx._optimize_trace(trace, torch.onnx.OperatorExportTypes.ONNX)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/__init__.py", line 40, in _optimize_trace
    trace.set_graph(utils._optimize_graph(trace.graph(), operator_export_type))
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/utils.py", line 188, in _optimize_graph
    graph = torch._C._jit_pass_onnx(graph, operator_export_type)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/__init__.py", line 50, in _run_symbolic_function
    return utils._run_symbolic_function(*args, **kwargs)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/utils.py", line 589, in _run_symbolic_function
    return fn(g, *inputs, **attrs)
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/symbolic.py", line 130, in wrapper
    args = [_parse_arg(arg, arg_desc) for arg, arg_desc in zip(args, arg_descriptors)]
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/symbolic.py", line 130, in <listcomp>
    args = [_parse_arg(arg, arg_desc) for arg, arg_desc in zip(args, arg_descriptors)]
  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/symbolic.py", line 90, in _parse_arg
    raise RuntimeError("Failed to export an ONNX attribute, "
RuntimeError: Failed to export an ONNX attribute, since it's not constant, please try to make things (e.g., kernel size) static if possible

换成其他版本torch-0.4.1,可解决这个问题,但是其他可视化工具失效, 

onnx

切换回0.4.1

https://github.com/onnx/onnx

https://oldpan.me/archives/talk-about-onnx

https://github.com/lutzroeder/Netron 

pip install netron

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, models



import torch.onnx


model = EfficientNet.from_pretrained('efficientnet-b0').to(device)#.cuda()
#netron这个工具来可视化(读取ONNX文件)
model.train(False)
dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
torch_out = torch.onnx._export(model,  # model being run
                           dummy_input,  # model input (or a tuple for multiple inputs)
                           "efficientnet-b0.onnx",
                           # where to save the model (can be a file or file-like object)
                           export_params=True)  # store the trained parameter weights inside the model fi

https://blog.csdn.net/rock4you/article/details/89045181

pytorch转caffe2 之 onnx转caffe2报错的解决方法 ValueError: Don't know how to translate op Unsqueeze

https://blog.csdn.net/cxiazaiyu/article/details/91129657

RuntimeError: Failed to export an ONNX attribute, since it's not constant, please try to make things (e.g., kernel size) static if possible

https://github.com/onnx/onnx/blob/master/docs/Operators.md

支持的运算:

error: 

  File "/home/boyun/anaconda3/envs/pytorch1_1_0/lib/python3.6/site-packages/torch/onnx/utils.py", line 232, in _export
    proto, export_map = graph.export(params, _onnx_opset_version, defer_weight_export, operator_export_type)
RuntimeError: ONNX export failed: Couldn't export operator aten::adaptive_avg

由于我的模型里有最新的层,无法识别

于是改动一些东西,使其适应

tensorwatch 

https://github.com/microsoft/tensorwatch

https://zhuanlan.zhihu.com/p/68613993

这个可视化工具是在torchvision的基础上进行的

pip install graphviz -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install torchvision -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install tensorwatch -i https://pypi.tuna.tsinghua.edu.cn/simple
import sys
import torch
import torchvision.models


import tensorwatch as tw
(pytorch1_1_0) boyun@boyun:~/software/ubuntu-wine$ pip list
Package              Version 
-------------------- --------
absl-py              0.7.1   
astor                0.8.0   
attrs                19.1.0  
backcall             0.1.0   
bleach               3.1.0   
certifi              2019.3.9
chardet              3.0.4   
crc32c               1.7     
cycler               0.10.0  
decorator            4.4.0   
defusedxml           0.6.0   
entrypoints          0.3     
gast                 0.2.2   
graphviz             0.11    
grpcio               1.21.1  
h5py                 2.9.0   
hiddenlayer          0.2     
idna                 2.8     
imageio              2.5.0   
ipykernel            5.1.1   
ipython              7.5.0   
ipython-genutils     0.2.0   
ipywidgets           7.4.2   
jedi                 0.13.3  
Jinja2               2.10.1  
joblib               0.13.2  
jsonschema           3.0.1   
jupyter              1.0.0   
jupyter-client       5.2.4   
jupyter-console      6.0.0   
jupyter-core         4.4.0   
Keras-Applications   1.0.8   
Keras-Preprocessing  1.1.0   
kiwisolver           1.1.0   
Markdown             3.1.1   
MarkupSafe           1.1.1   
matplotlib           3.1.0   
mistune              0.8.4   
mock                 3.0.5   
nbconvert            5.5.0   
nbformat             4.4.0   
networkx             2.3     
notebook             5.7.8   
numpy                1.16.4  
onnx                 1.5.0   
opencv-python        4.1.0.25
pandas               0.24.2  
pandocfilters        1.4.2   
parso                0.4.0   
pexpect              4.7.0   
pickleshare          0.7.5   
Pillow               6.0.0   
pip                  19.1.1  
plotly               3.10.0  
prometheus-client    0.6.0   
prompt-toolkit       2.0.9   
protobuf             3.8.0   
ptyprocess           0.6.0   
Pygments             2.4.2   
pyparsing            2.4.0   
pyrsistent           0.14.11 
python-dateutil      2.8.0   
pytz                 2019.1  
PyWavelets           1.0.3   
pyzmq                18.0.0  
qtconsole            4.5.1   
requests             2.22.0  
retrying             1.3.3   
scikit-image         0.15.0  
scikit-learn         0.21.2  
scipy                1.3.0   
seaborn              0.9.0   
Send2Trash           1.5.0   
setuptools           41.0.1  
six                  1.12.0  
sklearn              0.0     
tensorboard          1.13.1  
tensorboardX         1.7     
tensorflow           1.13.1  
tensorflow-estimator 1.13.0  
tensorwatch          0.8.5   
termcolor            1.1.0   
terminado            0.8.2   
testpath             0.4.2   
torch                0.4.1   
torchstat            0.0.7   
torchsummary         1.5.1   
torchvision          0.2.1   
torchviz             0.0.1   
tornado              6.0.2   
traitlets            4.3.2   
typing               3.6.6   
typing-extensions    3.7.2   
urllib3              1.25.3  
wcwidth              0.1.7   
webencodings         0.5.1   
Werkzeug             0.15.4  
wheel                0.33.4  
widgetsnbextension   3.4.2  
    #blocks_args, global_params = utils.get_model_params('efficientnet-b0', override_params=None)
    #model = EfficientNet(blocks_args, global_params)
    model = EfficientNet.from_pretrained('efficientnet-b0').to(device)#.cuda()


    dummy_input = torch.rand(1, 3, 224, 224).requires_grad_(True)
    #writer.add_graph(model, (dummy_input,))

    # no cuda()
    #vis_graph = make_dot(model(dummy_input), params=dict(model.named_parameters()))
    #vis_graph = make_dot(model(dummy_input), params=dict(list(model.named_parameters()) + [('x', dummy_input)]))
    #vis_graph.view()

    #no cuda()
    #input = torch.zeros([1, 3, 224, 224])
    #hl_graph = hl.build_graph(model, input)
    #hl_graph.theme = hl.graph.THEMES["blue"].copy()
    #hl_graph.save(os.path.join("/home/boyun/PycharmProjects/EfficientNet-1852", "pytorch_resnet_bloks.pdf"))



    #netron这个工具来可视化(读取ONNX文件)
    #model.train(False)
    #dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
    #torch_out = torch.onnx._export(model, dummy_input, "./efficientnet-b0.onnx", export_params=True)


    #no .cuda()
    #tw_graph = tw.draw_model(model, [1, 3, 224, 224])

    #print(model)
    #model.to(device)
    #summary(model, (3, 224, 224))
    #model.cpu()
    #stat(model,  (3, 224, 224))
    print("-------------------------------------------")

 

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值