Darknet框架的权重文件.weights类型转换为keras框架的权重文件类型.h5

YOLO:Darknet框架的.weights模型转keras框架的.h5模型

前言

由于存在多种框架下的YOLO算法,例如模型压缩大部分直接使用Darknet53,而计算mAP等可能需要使用keras等框架,这个时候需要将训练所得的.weights模型类型转换为.h5。本文给出了转换代码。

所需材料

1、待转换模型的算法框架,往往是.cfg文件;
2、待转换模型.weights文件;
3、转换代码 convert.py;
4、python对应的包。

实现转换的代码

import argparse
import configparser
import io
import os
from collections import defaultdict

import numpy as np
from keras import backend as K
from keras.layers import (Conv2D, Input, ZeroPadding2D, Add,
                          UpSampling2D, MaxPooling2D, Concatenate)
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from keras.utils.vis_utils import plot_model as plot


parser = argparse.ArgumentParser(description='Darknet To Keras Converter.')
parser.add_argument('config_path', help='Path to Darknet cfg file.')
parser.add_argument('weights_path', help='Path to Darknet weights file.')
parser.add_argument('output_path', help='Path to output Keras model file.')
parser.add_argument(
    '-p',
    '--plot_model',
    help='Plot generated Keras model and save as image.',
    action='store_true')
parser.add_argument(
    '-w',
    '--weights_only',
    help='Save as Keras weights file instead of model file.',
    action='store_true')

def unique_config_sections(config_file):
    """Convert all config sections to have unique names.
    Adds unique suffixes to config sections for compability with configparser.
    """
    section_counters = defaultdict(int)
    output_stream = io.StringIO()
    with open(config_file) as fin:
        for line in fin:
            if line.startswith('['):
                section = line.strip().strip('[]')
                _section = section + '_' + str(section_counters[section])
                section_counters[section] += 1
                line = line.replace(section, _section)
            output_stream.write(line)
    output_stream.seek(0)
    return output_stream

# %%
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    major, minor, revision = np.ndarray(
        shape=(3, ), dtype='int32', buffer=weights_file.read(12))
    if (major*10+minor)>=2 and major<1000 and minor<1000:
        seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))
    else:
        seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))
    print('Weights Header: ', major, minor, revision, seen)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    input_layer = Input(shape=(None, None, 3))
    prev_layer = input_layer
    all_layers = []

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    out_index = []
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            padding = 'same' if pad == 1 and stride == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)

            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            # Create Conv2D layer
            if stride>1:
                # Darknet uses left and top padding instead of 'same' mode
                prev_layer = ZeroPadding2D(((1,0),(1,0)))(prev_layer)
            conv_layer = (Conv2D(
                filters, (size, size),
                strides=(stride, stride),
                kernel_regularizer=l2(weight_decay),
                use_bias=not batch_normalize,
                weights=conv_weights,
                activation=act_fn,
                padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)
            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = Concatenate()(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    pool_size=(size, size),
                    strides=(stride, stride),
                    padding='same')(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('shortcut'):
            index = int(cfg_parser[section]['from'])
            activation = cfg_parser[section]['activation']
            assert activation == 'linear', 'Only linear activation supported.'
            all_layers.append(Add()([all_layers[index], prev_layer]))
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            assert stride == 2, 'Only stride=2 supported.'
            all_layers.append(UpSampling2D(stride)(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            out_index.append(len(all_layers)-1)
            all_layers.append(None)
            prev_layer = all_layers[-1]

        elif section.startswith('net'):
            pass

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    if len(out_index)==0: out_index.append(len(all_layers)-1)
    model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
    print(model.summary())
    if args.weights_only:
        model.save_weights('{}'.format(output_path))
        print('Saved Keras weights to {}'.format(output_path))
    else:
        model.save('{}'.format(output_path))
        print('Saved Keras model to {}'.format(output_path))

    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(count, count +
                                                       remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))


if __name__ == '__main__':
    _main(parser.parse_args())

应用举例

例如,需要将进行模型压缩所得的权重文件.weights转换为keras可以使用的.h5类型的权重文件。假设进行剪枝、压缩后的模型为yolov3.cfg,权重文件为yolov3.weights,需要转换为yolov3.h5则在命令行输入:

python convert.py yolov3.cfg yolov3.weights model_data/yolov3.ht

后面三个参数分别就是,需要转换的模型文件,权重文件,输出的路径。

.weights转.pt格式

类似的,可以通过下面的代码实现weights格式转为pt格式:

python -c "from models import *; convert('cfg/yolov3.cfg', 'weights/last.pt')"

参考信息

https://github.com/huangbinz/yolov3-weights2h5

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
要将 YOLOv3 的权重文件转换Keras 可以使用的 .h5 文件,需要进行以下步骤: 1. 下载并安装 `keras` 和 `tensorflow` 库: ``` pip install keras tensorflow ``` 2. 下载 YOLOv3 的权重文件 `yolov3.weights` 和配置文件 `yolov3.cfg`。 3. 使用 `yolo_weights_convert.py` 脚本将权重文件转换Keras 模型: ``` python yolo_weights_convert.py yolov3.cfg yolov3.weights model_data/yolo.h5 ``` 其中,`yolov3.cfg` 是 YOLOv3 的配置文件路径,`yolov3.weights` 是权重文件路径,`model_data/yolo.h5` 是转换后的 Keras 模型保存路径。 以下是 `yolo_weights_convert.py` 的代码: ```python import argparse import numpy as np import struct import os from keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, LeakyReLU, UpSampling2D from keras.layers.merge import add, concatenate from keras.models import Model from keras.engine.topology import Layer from keras import backend as K class YoloLayer(Layer): def __init__(self, anchors, max_grid, batch_size, warmup_batches, ignore_thresh, grid_scale, obj_scale, noobj_scale, xywh_scale, class_scale, **kwargs): self.ignore_thresh = ignore_thresh self.warmup_batches = warmup_batches self.anchors = anchors self.grid_scale = grid_scale self.obj_scale = obj_scale self.noobj_scale = noobj_scale self.xywh_scale = xywh_scale self.class_scale = class_scale self.batch_size = batch_size self.true_boxes = K.placeholder(shape=(self.batch_size, 1, 1, 1, 50, 4)) super(YoloLayer, self).__init__(**kwargs) def build(self, input_shape): super(YoloLayer, self).build(input_shape) def get_grid_size(self, net_h, net_w): return net_h // 32, net_w // 32 def call(self, x): input_image, y_pred, y_true = x self.net_h, self.net_w = input_image.shape.as_list()[1:3] self.grid_h, self.grid_w = self.get_grid_size(self.net_h, self.net_w) # adjust the shape of the y_predict [batch, grid_h, grid_w, 3, 4+1+80] y_pred = K.reshape(y_pred, (self.batch_size, self.grid_h, self.grid_w, 3, 4 + 1 + 80)) # convert the coordinates to absolute coordinates box_xy = K.sigmoid(y_pred[..., :2]) box_wh = K.exp(y_pred[..., 2:4]) box_confidence = K.sigmoid(y_pred[..., 4:5]) box_class_probs = K.softmax(y_pred[..., 5:]) # adjust the shape of the y_true [batch, 50, 4+1] object_mask = y_true[..., 4:5] true_class_probs = y_true[..., 5:] # true_boxes[..., 0:2] = center, true_boxes[..., 2:4] = wh true_boxes = self.true_boxes[..., 0:4] # shape=[batch, 50, 4] true_xy = true_boxes[..., 0:2] * [self.grid_w, self.grid_h] # shape=[batch, 50, 2] true_wh = true_boxes[..., 2:4] * [self.net_w, self.net_h] # shape=[batch, 50, 2] true_wh_half = true_wh / 2. true_mins = true_xy - true_wh_half true_maxes = true_xy + true_wh_half # calculate the Intersection Over Union (IOU) pred_xy = K.expand_dims(box_xy, 4) pred_wh = K.expand_dims(box_wh, 4) pred_wh_half = pred_wh / 2. pred_mins = pred_xy - pred_wh_half pred_maxes = pred_xy + pred_wh_half intersect_mins = K.maximum(pred_mins, true_mins) intersect_maxes = K.minimum(pred_maxes, true_maxes) intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.) intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1] pred_areas = pred_wh[..., 0] * pred_wh[..., 1] true_areas = true_wh[..., 0] * true_wh[..., 1] union_areas = pred_areas + true_areas - intersect_areas iou_scores = intersect_areas / union_areas # calculate the best IOU, set the object mask and update the class probabilities best_ious = K.max(iou_scores, axis=4) object_mask_bool = K.cast(best_ious >= self.ignore_thresh, K.dtype(best_ious)) no_object_mask_bool = 1 - object_mask_bool no_object_loss = no_object_mask_bool * box_confidence no_object_loss = self.noobj_scale * K.mean(no_object_loss) true_box_class = true_class_probs * object_mask true_box_confidence = object_mask true_box_xy = true_boxes[..., 0:2] * [self.grid_w, self.grid_h] - pred_mins true_box_wh = K.log(true_boxes[..., 2:4] * [self.net_w, self.net_h] / pred_wh) true_box_wh = K.switch(object_mask, true_box_wh, K.zeros_like(true_box_wh)) # avoid log(0)=-inf true_box_xy = K.switch(object_mask, true_box_xy, K.zeros_like(true_box_xy)) # avoid log(0)=-inf box_loss_scale = 2 - true_boxes[..., 2:3] * true_boxes[..., 3:4] xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(true_box_xy, box_xy) wh_loss = object_mask * box_loss_scale * 0.5 * K.square(true_box_wh - box_wh) confidence_loss = true_box_confidence * K.binary_crossentropy(box_confidence, true_box_confidence) \ + (1 - true_box_confidence) * K.binary_crossentropy(box_confidence, true_box_confidence) \ * no_object_mask_bool class_loss = object_mask * K.binary_crossentropy(true_box_class, box_class_probs) xy_loss = K.mean(K.sum(xy_loss, axis=[1, 2, 3, 4])) wh_loss = K.mean(K.sum(wh_loss, axis=[1, 2, 3, 4])) confidence_loss = K.mean(K.sum(confidence_loss, axis=[1, 2, 3, 4])) class_loss = K.mean(K.sum(class_loss, axis=[1, 2, 3, 4])) loss = self.grid_scale * (xy_loss + wh_loss) + confidence_loss * self.obj_scale + no_object_loss \ + class_loss * self.class_scale # warm up training batch_no = K.cast(self.batch_size / 2, dtype=K.dtype(object_mask)) warmup_steps = self.warmup_batches warmup_lr = batch_no / warmup_steps batch_no = K.cast(K.minimum(warmup_steps, batch_no), dtype=K.dtype(object_mask)) lr = self.batch_size / (batch_no * warmup_steps) warmup_decay = (1 - batch_no / warmup_steps) ** 4 lr = lr * (1 - warmup_decay) + warmup_lr * warmup_decay self.add_loss(loss) self.add_metric(loss, name='loss', aggregation='mean') self.add_metric(xy_loss, name='xy_loss', aggregation='mean') self.add_metric(wh_loss, name='wh_loss', aggregation='mean') self.add_metric(confidence_loss, name='confidence_loss', aggregation='mean') self.add_metric(class_loss, name='class_loss', aggregation='mean') self.add_metric(lr, name='lr', aggregation='mean') return y_pred def compute_output_shape(self, input_shape): return input_shape[1] def get_config(self): config = { 'ignore_thresh': self.ignore_thresh, 'warmup_batches': self.warmup_batches, 'anchors': self.anchors, 'grid_scale': self.grid_scale, 'obj_scale': self.obj_scale, 'noobj_scale': self.noobj_scale, 'xywh_scale': self.xywh_scale, 'class_scale': self.class_scale } base_config = super(YoloLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) def _conv_block(inp, convs, skip=True): x = inp count = 0 for conv in convs: if count == (len(convs) - 2) and skip: skip_connection = x count += 1 if conv['stride'] > 1: x = ZeroPadding2D(((1, 0), (1, 0)))(x) # unlike tensorflow darknet prefer left and top paddings x = Conv2D(conv['filter'], conv['kernel'], strides=conv['stride'], padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings name='conv_' + str(conv['layer_idx']), use_bias=False if conv['bnorm'] else True)(x) if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x) if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x) return add([skip_connection, x]) if skip else x def make_yolov3_model(): input_image = Input(shape=(None, None, 3)) true_boxes = Input(shape=(1, 1, 1, 50, 4)) # Layer 0 => 4 x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True}, {'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 5 => 8 x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True}, {'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 9 => 11 x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 12 => 15 x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True}, {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 16 => 36 for i in range(7): x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) skip_36 = x # Layer 37 => 40 x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 41 => 61 for i in range(7): x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) skip_61 = x # Layer 62 => 65 x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 66 => 74 for i in range(3): x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 75 => 79 x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}]) # Layer 80 => 82 yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False}], skip=False) # Layer 83 => 86 x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}], skip=False) x = UpSampling2D(2)(x) x = concatenate([x, skip_61]) # Layer 87 => 91 x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}], skip=False) # Layer 92 => 94 yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False}], skip=False) # Layer 95 => 98 x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}], skip=False) x = UpSampling2D(2)(x) x = concatenate([x, skip_36]) # Layer 99 => 106 yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True}, {'filter

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值