glibc下malloc与free的实现原理(四):tcache机制

glibc下malloc与free的实现原理(四): tcache

一、概述

tcacheglibc某个版本后引入的新机制,目的是提高堆管理的性能。

事实上,在我们现在用pwndbg调试与堆有关的程序时,就会发现许多free chunk被归入了tcache

许多漏洞的利用也和tcache机制有关,因此研究tcache的机制是有必要的。

我们先不论tcache机制到底是什么(按照字面意思,有“高速缓存”之意),先就从tcache相关的新增代码的分析入手,最后就能总结出tcache机制究竟是什么。

二、新的相关数据结构与宏

0x00 一些宏定义常数

# define TCACHE_MAX_BINS                64

0x01 tcache管理相关

/* We overlay this structure on the user-data portion of a chunk when
   the chunk is stored in the per-thread cache.  */
/* 在这个chunk存储在每一个线程的缓冲区中时, 我们把这个结构体覆盖在chunk的user-data区域里*/
typedef struct tcache_entry
{
  struct tcache_entry *next;
  /* This field exists to detect double frees.  */
  /* 这个字段是为了用于防范double free */
  struct tcache_perthread_struct *key;
} tcache_entry;
/* There is one of these for each thread, which contains the
   per-thread cache (hence "tcache_perthread_struct").  Keeping
   overall size low is mildly important.  Note that COUNTS and ENTRIES
   are redundant (we could have just counted the linked list each
   time), this is for performance reasons.  */
/* 这个结构体, 每个线程都有一个, 包含线程的cache(因此, "tcache_perthread_struct")
   整体保持比较小的size是有些重要的。
   记录counts和entrys是冗余的()*/
typedef struct tcache_perthread_struct
{
  char counts[TCACHE_MAX_BINS];
  tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;

# define TCACHE_MAX_BINS                64

static __thread tcache_perthread_struct *tcache = NULL;

注意:这里就定义了全局变量tcache

0x02 malloc_par(包含tcache相关内容)

static struct malloc_par mp_ =
{
  .top_pad = DEFAULT_TOP_PAD,
  .n_mmaps_max = DEFAULT_MMAP_MAX,
  .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
  .trim_threshold = DEFAULT_TRIM_THRESHOLD,
#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
  .arena_test = NARENAS_FROM_NCORES (1)
#if USE_TCACHE
  ,
  .tcache_count = TCACHE_FILL_COUNT,
  .tcache_bins = TCACHE_MAX_BINS,
  .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
  .tcache_unsorted_limit = 0 /* No limit.  */
#endif
};

0x01 size与index换算相关

/* Only used to pre-fill the tunables.  */
# define tidx2usize(idx)        (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
/* When "x" is from chunksize().  */
# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
/* When "x" is a user-provided size.  */
# define usize2tidx(x) csize2tidx (request2size (x))

三、__libc_malloc中的新增代码

代码新增位置: arena_get(ar_ptr, bytes);前面

#if USE_TCACHE
  /* int_free also calls request2size, be careful to not pad twice.  */
  size_t tbytes;
  checked_request2size (bytes, tbytes);
  size_t tc_idx = csize2tidx (tbytes);
  MAYBE_INIT_TCACHE ();
  DIAG_PUSH_NEEDS_COMMENT;
  if (tc_idx < mp_.tcache_bins
      /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
      && tcache
      && tcache->entries[tc_idx] != NULL)
    {
      return tcache_get (tc_idx);
    }
  DIAG_POP_NEEDS_COMMENT;
#endif

//-=-=-=-=-=-=-=以下代码虽然相比旧版略有改动,但功能基本一致-=-=-=-=-=-=-=-=-
  if (SINGLE_THREAD_P)
    {
      victim = _int_malloc (&main_arena, bytes);
      assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
              &main_arena == arena_for_chunk (mem2chunk (victim)));
      return victim;
    }

可以看到,从tcache中取出chunk的优先级甚至高于通过_int_malloc获取chunk。分析从tcache中获取chunk的步骤:

首先根据申请的实际内存大小计算出对应的tcache索引值,(如果索引值在支持的范围中)就尝试从该tcache中取出一个chunk,直接执行这个步骤的是tcache_get,这是一个函数,其定义为:

/* Caller must ensure that we know tc_idx is valid and there's
   available chunks to remove.  */
/* 调用者必须保证索引值是有效的且对应的tcache中有可用chunk */
static __always_inline void * tcache_get (size_t tc_idx)
{
  tcache_entry *e = tcache->entries[tc_idx];
  assert (tc_idx < TCACHE_MAX_BINS);
  assert (tcache->counts[tc_idx] > 0);
  tcache->entries[tc_idx] = e->next;
  --(tcache->counts[tc_idx]);
  e->key = NULL;
  return (void *) e;
}

可以看到,这里是把对应tcache中entry指向的第一块内存给返回。

这块内存的数据类型被标记为tcache_entry,而tcache_entry的结构显然是一个单链表的节点,由此我们推测tcache是LIFO表

这里tcache_get函数并没有对从tcache中取出的内存做任何加工而是直接强制转型后返回,因此推测tcache_entry是由malloc_chunk变化而来,这些tcache_entry指针直接指向了对应于fd字段的地址。

四、重新分析_int_malloc函数

0x00 简介

由于引入了tcache机制,_int_malloc许多地方都得到了修改。

在这里,我们重新分析一次_int_malloc函数,

着重分析与旧版本glibc中的_int_malloc函数不同的位置、与和tcache机制有关的代码

部分代码作用与旧版本相比几乎没变的,只贴代码,不再进行仔细分析。

0x01 变量定义、初始检查

static void *_int_malloc (mstate av, size_t bytes) {
  INTERNAL_SIZE_T nb;               /* normalized request size */
  unsigned int idx;                 /* associated bin index */
  mbinptr bin;                      /* associated bin */

  mchunkptr victim;                 /* inspected/selected chunk */
  INTERNAL_SIZE_T size;             /* its size */
  int victim_index;                 /* its bin index */

  mchunkptr remainder;              /* remainder from a split */
  unsigned long remainder_size;     /* its size */

  unsigned int block;               /* bit map traverser */
  unsigned int bit;                 /* bit map traverser */
  unsigned int map;                 /* current word of binmap */

  mchunkptr fwd;                    /* misc temp for linking */
  mchunkptr bck;                    /* misc temp for linking */
    
#if USE_TCACHE
  size_t tcache_unsorted_count;            /* count of unsorted chunks processed */
#endif
  /*
     Convert request size to internal form by adding SIZE_SZ bytes
     overhead plus possibly more to obtain necessary alignment and/or
     to obtain a size of at least MINSIZE, the smallest allocatable
     size. Also, checked_request2size traps (returning 0) request sizes
     that are so large that they wrap around zero when padded and
     aligned.
   */
  checked_request2size (bytes, nb);
  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
     mmap.  */
  if (__glibc_unlikely (av == NULL))
    {
      void *p = sysmalloc (nb, av);
      if (p != NULL)
        alloc_perturb (p, bytes);
      return p;
    }

可以看到,这里定义的一系列变量中,相比老版多了一个size_t tcache_unsorted_count

0x02 尝试从fastbin中获取chunk

  /*
     If the size qualifies as a fastbin, first check corresponding bin.
     This code is safe to execute even if av is not yet initialized, so we
     can try it without checking, which saves some time on this fast path.
   */
#define REMOVE_FB(fb, victim, pp)             \
  do{                                         \
      victim = pp;                            \
      if (victim == NULL)                     \
        break;                                \
    }                                         \
  while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) != victim); \

  if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) {
      idx = fastbin_index (nb);
      mfastbinptr *fb = &fastbin (av, idx);
      mchunkptr pp;
      victim = *fb;
      if (victim != NULL) {
          if (SINGLE_THREAD_P)
            *fb = victim->fd;
          else
            REMOVE_FB (fb, pp, victim);
          if (__glibc_likely (victim != NULL)) {
              size_t victim_idx = fastbin_index (chunksize (victim));
              if (__builtin_expect (victim_idx != idx, 0))
                malloc_printerr ("malloc(): memory corruption (fast)");
              check_remalloced_chunk (av, victim, nb);
#if USE_TCACHE
              /* While we're here, if we see other chunks of the same size,
                 stash them in the tcache.  */
              /* 当我们运行到这里,如果我们发现其他size相同的chunk,就会将其存入tcache */
              size_t tc_idx = csize2tidx (nb);
              if (tcache && tc_idx < mp_.tcache_bins) {
                  mchunkptr tc_victim;
                  /* While bin not empty and tcache not full, copy chunks.  */
                  /* 当fastbin不为空且tcache没有满,就复制chunk */
                  while (tcache->counts[tc_idx] < mp_.tcache_count
                         && (tc_victim = *fb) != NULL) {
                      if (SINGLE_THREAD_P)
                        *fb = tc_victim->fd;
                      else {
                          REMOVE_FB (fb, pp, tc_victim);
                          if (__glibc_unlikely (tc_victim == NULL))
                            break;
                        }
                      tcache_put (tc_victim, tc_idx);
                    }
                }
#endif
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }
        }
    }

可以看到,把原先从fastbin链表中最后加入的chunk取出的操作被单独拉出来实现成了一个宏

这里重点解释一下#if USE_TCACHE这段代码:

这段代码的意义是,把当前fastbin中的chunk尽量地装入tcache

因为一个fastbin中的chunk的size被期望都是相等的,因此fastbin有唯一对应的tcache

会尽量从fastbin中取chunk存入tcache,直到fastbin被取空或者tcache被装满

重点分析这里的tcache_put(tc_victim,tc_idx),这实际上是一个函数:

/* Caller must ensure that we know tc_idx is valid and there's room
   for more chunks.  */
/* 调用时必须保证输入的tc_idx是合法的,且当前tcache有给chunk的空闲空间 */
static __always_inline void tcache_put (mchunkptr chunk, size_t tc_idx)
{
  tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
  assert (tc_idx < TCACHE_MAX_BINS);
  /* Mark this chunk as "in the tcache" so the test in _int_free will
     detect a double free.  */
  e->key = tcache;
  e->next = tcache->entries[tc_idx];
  tcache->entries[tc_idx] = e;
  ++(tcache->counts[tc_idx]);
}

在这个函数中,chunkfd字段的地址被存储在了当前tcache的链表中。

0x03 尝试从small bin中获取chunk

  /*
     If a small request, check regular bin.  Since these "smallbins"
     hold one size each, no searching within bins is necessary.
     (For a large request, we need to wait until unsorted chunks are
     processed to find best fit. But for small ones, fits are exact
     anyway, so we can check now, which is faster.)
   */
  if (in_smallbin_range (nb))
    {
      idx = smallbin_index (nb);
      bin = bin_at (av, idx);
      if ((victim = last (bin)) != bin)
        {
          bck = victim->bk;
          if (__glibc_unlikely (bck->fd != victim))
            malloc_printerr ("malloc(): smallbin double linked list corrupted");
          set_inuse_bit_at_offset (victim, nb);
          bin->bk = bck;
          bck->fd = bin;
          if (av != &main_arena)
            set_non_main_arena (victim);
          check_malloced_chunk (av, victim, nb);
#if USE_TCACHE
          /* While we're here, if we see other chunks of the same size,
             stash them in the tcache.  */
          size_t tc_idx = csize2tidx (nb);
          if (tcache && tc_idx < mp_.tcache_bins)
            {
              mchunkptr tc_victim;
              /* While bin not empty and tcache not full, copy chunks over.  */
              while (tcache->counts[tc_idx] < mp_.tcache_count
                     && (tc_victim = last (bin)) != bin)
                {
                  if (tc_victim != 0)
                    {
                      bck = tc_victim->bk;
                      set_inuse_bit_at_offset (tc_victim, nb);
                      if (av != &main_arena)
                        set_non_main_arena (tc_victim);
                      bin->bk = bck;
                      bck->fd = bin;
                      tcache_put (tc_victim, tc_idx);
                    }
                }
            }
#endif
          void *p = chunk2mem (victim);
          alloc_perturb (p, bytes);
          return p;
        }
    }

其他代码都和旧版差不多,但是添加了一段tcache

这里tcache这一段是尽量从small bin中取出chunk加入tcache

在这里,我们看到,这里在把对应chunk加入tcache时,对chunk做了set_inuse的操作。

这或许表明,tcache中存储的chunk是“开袋即食”的

0x04 unsorted bin大循环

老规矩,只分析tcache相关内容。

1. 定义tcache机制相关变量
#if USE_TCACHE
  INTERNAL_SIZE_T tcache_nb = 0;
  size_t tc_idx = csize2tidx (nb);
  if (tcache && tc_idx < mp_.tcache_bins)
    tcache_nb = nb;
  int return_cached = 0;
  tcache_unsorted_count = 0;
#endif

这里首先定义了几个会用到的变量

2. “无关”代码(一)
  for (;; )
    {
      int iters = 0;
      while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
        {
          bck = victim->bk;
          size = chunksize (victim);
          mchunkptr next = chunk_at_offset (victim, size);
          if (__glibc_unlikely (size <= 2 * SIZE_SZ)
              || __glibc_unlikely (size > av->system_mem))
            malloc_printerr ("malloc(): invalid size (unsorted)");
          if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
              || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
            malloc_printerr ("malloc(): invalid next size (unsorted)");
          if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
            malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
          if (__glibc_unlikely (bck->fd != victim)
              || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
            malloc_printerr ("malloc(): unsorted double linked list corrupted");
          if (__glibc_unlikely (prev_inuse (next)))
            malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");

注意:这里相比旧版本多了很多检测

          /*
             If a small request, try to use last remainder if it is the
             only chunk in unsorted bin.  This helps promote locality for
             runs of consecutive small requests. This is the only
             exception to best-fit, and applies only when there is
             no exact fit for a small chunk.
           */
          if (in_smallbin_range (nb) &&
              bck == unsorted_chunks (av) &&
              victim == av->last_remainder &&
              (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
            {
              /* split and reattach remainder */
              remainder_size = size - nb;
              remainder = chunk_at_offset (victim, nb);
              unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
              av->last_remainder = remainder;
              remainder->bk = remainder->fd = unsorted_chunks (av);
              if (!in_smallbin_range (remainder_size))
                {
                  remainder->fd_nextsize = NULL;
                  remainder->bk_nextsize = NULL;
                }
              set_head (victim, nb | PREV_INUSE |
                        (av != &main_arena ? NON_MAIN_ARENA : 0));
              set_head (remainder, remainder_size | PREV_INUSE);
              set_foot (remainder, remainder_size);
              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
            }

这一段相比旧版基本没变化。

3. 向tcache中填充chunk
          /* remove from unsorted list */
          if (__glibc_unlikely (bck->fd != victim))
            malloc_printerr ("malloc(): corrupted unsorted chunks 3");
          unsorted_chunks (av)->bk = bck;
          bck->fd = unsorted_chunks (av);
          /* Take now instead of binning if exact fit */
          if (size == nb)
            {
              set_inuse_bit_at_offset (victim, size);
              if (av != &main_arena)
                set_non_main_arena (victim);
#if USE_TCACHE
              /* Fill cache first, return to user only if cache fills.
                 We may return one of these chunks later.  */
              /* 优先填充tcache, 只有tcache得到填充,才有可能返回其中一个chunk */
              if (tcache_nb
                  && tcache->counts[tc_idx] < mp_.tcache_count)
                {
                  tcache_put (victim, tc_idx);
                  return_cached = 1;
                  continue;
                }
              else
                {
#endif
              check_malloced_chunk (av, victim, nb);
              void *p = chunk2mem (victim);
              alloc_perturb (p, bytes);
              return p;
#if USE_TCACHE
                }
#endif
            }

注意:这里是在当前遍历的chunk的size恰好与实际申请的内存大小相等的情况下执行的,在旧版中,这样的chunk会直接返回,而在这里,这个chunk会首先被存入对应的tcache,如果对应的tcache满了,才会直接返回。

4. “无关”代码(二):把当前chunk放入large bin
          /* place chunk in bin */
          if (in_smallbin_range (size))
            {
              victim_index = smallbin_index (size);
              bck = bin_at (av, victim_index);
              fwd = bck->fd;
            }
          else
            {
              victim_index = largebin_index (size);
              bck = bin_at (av, victim_index);
              fwd = bck->fd;
              /* maintain large bins in sorted order */
              if (fwd != bck)
                {
                  /* Or with inuse bit to speed comparisons */
                  size |= PREV_INUSE;
                  /* if smaller than smallest, bypass loop below */
                  assert (chunk_main_arena (bck->bk));
                  if ((unsigned long) (size)
                      < (unsigned long) chunksize_nomask (bck->bk))
                    {
                      fwd = bck;
                      bck = bck->bk;
                      victim->fd_nextsize = fwd->fd;
                      victim->bk_nextsize = fwd->fd->bk_nextsize;
                      fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
                    }
                  else
                    {
                      assert (chunk_main_arena (fwd));
                      while ((unsigned long) size < chunksize_nomask (fwd))
                        {
                          fwd = fwd->fd_nextsize;
                          assert (chunk_main_arena (fwd));
                        }
                      if ((unsigned long) size
                          == (unsigned long) chunksize_nomask (fwd))
                        /* Always insert in the second position.  */
                        fwd = fwd->fd;
                      else
                        {
                          victim->fd_nextsize = fwd;
                          victim->bk_nextsize = fwd->bk_nextsize;
                          if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
                            malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
                          fwd->bk_nextsize = victim;
                          victim->bk_nextsize->fd_nextsize = victim;
                        }
                      bck = fwd->bk;
                      if (bck->fd != fwd)
                        malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
                    }
                }
              else
                victim->fd_nextsize = victim->bk_nextsize = victim;
            }
          mark_bin (av, victim_index);
          victim->bk = bck;
          victim->fd = fwd;
          fwd->bk = victim;
          bck->fd = victim;

这里与原版也几乎没有什么改动。

5. tcache细枝末节的处理
#if USE_TCACHE
      /* If we've processed as many chunks as we're allowed while
         filling the cache, return one of the cached ones.  */
      /* 如果填充tcache时我们已经处理了与我们所允许的最大值相当的数量的chunk后,返回其中一个*/
      
      ++tcache_unsorted_count;
      if (return_cached
          && mp_.tcache_unsorted_limit > 0
          && tcache_unsorted_count > mp_.tcache_unsorted_limit)
        {
          return tcache_get (tc_idx);
        }
#endif
#define MAX_ITERS       10000
          if (++iters >= MAX_ITERS)
            break;
        }
#if USE_TCACHE
      /* If all the small chunks we found ended up cached, return one now.  */
      /* 如果我们找到的所有small chunk都被放入了tcache,那么返回一个 */
      if (return_cached)
        {
          return tcache_get (tc_idx);
        }
#endif

0x05 剩余部分代码

剩余部分就没有tcache相关代码了,且与旧版没有太大改动。故不再赘述。

五、__libc_free中的新增代码

    munmap_chunk(p);
    return;
  }
  MAYBE_INIT_TCACHE();
  ar_ptr = arena_for_chunk(p);
  _int_free(ar_ptr, p, 0);
}

相比旧版只多了一句 MAYBE_INIT_TCACHE();

六、_int_free中的新增代码

_int_free (mstate av, mchunkptr p, int have_lock)
{
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr *fb;             /* associated fastbin */
  mchunkptr nextchunk;         /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int nextinuse;               /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr bck;               /* misc temp for linking */
  mchunkptr fwd;               /* misc temp for linking */
  size = chunksize (p);
  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
      || __builtin_expect (misaligned_chunk (p), 0))
    malloc_printerr ("free(): invalid pointer");
  /* We know that each chunk is at least MINSIZE bytes in size or a
     multiple of MALLOC_ALIGNMENT.  */
  if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
    malloc_printerr ("free(): invalid size");
  check_inuse_chunk(av, p);
    
#if USE_TCACHE
  {
    size_t tc_idx = csize2tidx (size);
    if (tcache != NULL && tc_idx < mp_.tcache_bins)
      {
        /* Check to see if it's already in the tcache.  */
        tcache_entry *e = (tcache_entry *) chunk2mem (p);
        /* This test succeeds on double free.  However, we don't 100%
           trust it (it also matches random payload data at a 1 in
           2^<size_t> chance), so verify it's not an unlikely
           coincidence before aborting.  */
        if (__glibc_unlikely (e->key == tcache))
          {
            tcache_entry *tmp;
            LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
            for (tmp = tcache->entries[tc_idx];
                 tmp;
                 tmp = tmp->next)
              if (tmp == e)
                malloc_printerr ("free(): double free detected in tcache 2");
            /* If we get here, it was a coincidence.  We've wasted a
               few cycles, but don't abort.  */
          }
        if (tcache->counts[tc_idx] < mp_.tcache_count)
          {
            tcache_put (p, tc_idx);
            return;
          }
      }
  }
#endif

相比旧版,只多了一段位置十分靠前的tcache相关代码。

接下来重点精析这段tcache相关代码

首先,它明明调用了chunk2mem这个宏,却将其类型强制转换为tcache_entry指针类型。这也印证了我们之前的猜想,那就是tcache链表中每一个元素的地址就是对应chunk的fd字段的地址

定义LIBC_PROBE的地方有一段注释:

/* Evaluate all the arguments and verify that N matches their number.  */
#  define LIBC_PROBE(name, n, ...) STAP_PROBE##n (__VA_ARGS__)

令人费解。

接下来,程序遍历了整个tcache链表,以此来阻止double-free的发生

最后,如果tcache没有满,那么把这个chunk放入这个tcache中。

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值