ART虚拟机GC清除SoftReference(软引用)源码解析

引言

SoftReference保存的对象实例,除非JVM即将OutOfMemory,否则不会被GC回收。下面我们就从Android 8.0的源码来探索一下具体清空过程和条件,有关GC原理不在讨论范围。

触发因kGcCauseForAlloc导致的GC(内存不够的情况下引起的GC)

在Activity写个循环创建大量的java对象触发kGcCauseForAlloc

对应的java方法

public class MainActivity extends AppCompatActivity {
    List<SoftReference<List<String>>> softReferenceList = new ArrayList<>();

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        
        TextView contentText = (TextView) findViewById(R.id.sample_text);
        contentText.setText("Allocate");
        contentText.setOnClickListener(new View.OnClickListener() {
            @Override
            public void onClick(View view) {
                for (int i = 0; i < 10000; i++) {
                    softReferenceList.add(new SoftReference<List<String>>(new ArrayList<String>(10240)));
                }
            }
        }); 
    }
}

点击“Allocate”按钮触发大量Java对象生成。运行一会后最终会触发ART中的调试断点

断点调试ART GC的调用栈

art::mirror::Object::SetFieldObjectWithoutWriteBarrier<false, true, (art::VerifyObjectFlags)0, true> object-inl.h:771
art::mirror::Object::SetFieldObject<false, true, (art::VerifyObjectFlags)0, true> object-inl.h:781
art::mirror::Object::SetFieldObjectVolatile<false, true, (art::VerifyObjectFlags)0> object-inl.h:792
art::mirror::Reference::ClearReferent<false> reference.h:75
art::gc::ReferenceQueue::ClearWhiteReferences reference_queue.cc:144
art::gc::ReferenceProcessor::ProcessReferences reference_processor.cc:168
art::gc::collector::ConcurrentCopying::ProcessReferences concurrent_copying.cc:2648
art::gc::collector::ConcurrentCopying::MarkingPhase concurrent_copying.cc:888
art::gc::collector::ConcurrentCopying::RunPhases concurrent_copying.cc:179
art::gc::collector::GarbageCollector::Run garbage_collector.cc:96
art::gc::Heap::CollectGarbageInternal heap.cc:2607
art::gc::Heap::AllocateInternalWithGc heap.cc:1658(对应下面源码中的第89)
art::gc::Heap::AllocObjectWithAllocator<false, true, art::mirror::SetLengthVisitor> heap-inl.h:116
art::mirror::Array::Alloc<false, false> array-inl.h:183
art::AllocArrayFromCodeResolved<false> entrypoint_utils-inl.h:313
artAllocArrayFromCodeResolvedRegionTLAB quick_alloc_entrypoints.cc:128
art_quick_alloc_array_resolved32_region_tlab quick_entrypoints_x86.S:1264
...

分析调用栈

执行GC然后再尝试分配内存

art/runtime/gc/heap.cc

//在AllocObjectWithAllocator调用这个函数之前。已经尝试使用TryToAllocate分配内存,但是失败了。本函数将在GC后再次调用TryToAllocate分配内存
mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
                                             AllocatorType allocator,
                                             bool instrumented,
                                             size_t alloc_size,
                                             size_t* bytes_allocated,
                                             size_t* usable_size,
                                             size_t* bytes_tl_bulk_allocated,
                                             ObjPtr<mirror::Class>* klass) {
  bool was_default_allocator = allocator == GetCurrentAllocator();
  // Make sure there is no pending exception since we may need to throw an OOME.
  self->AssertNoPendingException();
  DCHECK(klass != nullptr);
  StackHandleScope<1> hs(self);
  HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
  // The allocation failed. If the GC is running, block until it completes, and then retry the
  // allocation.
  collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
  // If we were the default allocator but the allocator changed while we were suspended,
  // abort the allocation.
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  if (last_gc != collector::kGcTypeNone) {
  	//在此之前有一次GC,我们先试试看能不能Allocate成功,成功的话,我们就不需要GC了。
    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                     usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
      return ptr;
    }
  }

  collector::GcType tried_type = next_gc_type_;
  const bool gc_ran =
      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  if (gc_ran) {//gc已经执行
    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                     usable_size, bytes_tl_bulk_allocated);
    if (ptr != nullptr) {
      return ptr;
    }
  }

	//循环尝试跟当前tried_type不同的GC,直到获取到足够的空闲内存空间或者循环结束退出
  // Loop through our different Gc types and try to Gc until we get enough free memory.
  for (collector::GcType gc_type : gc_plan_) {
    if (gc_type == tried_type) {
      continue;
    }
    // Attempt to run the collector, if we succeed, re-try the allocation.
    const bool plan_gc_ran =
        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
    if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
        (!instrumented && EntrypointsInstrumented())) {
      return nullptr;
    }
    if (plan_gc_ran) {//gc执行完成
      // Did we free sufficient memory for the allocation to succeed?
      //再次尝试Allocate
      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
                                                       usable_size, bytes_tl_bulk_allocated);
      if (ptr != nullptr) {
        return ptr;
      }
    }
  }
  // Allocations have failed after GCs;  this is an exceptional state.
  // Try harder, growing the heap if necessary.
  // 尝试增长堆大小,控制开关是TryToAllocate<true, /*const bool kGrow*/true>第二个泛型参数kGrow
  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                                  usable_size, bytes_tl_bulk_allocated);
  if (ptr != nullptr) {
    return ptr;
  }
  //大多数分配在到达这里之前应该已经成功了。运行到这里说明堆真的很满,很碎片化,
  //或者要求分配的大小真的很大。 做一次与以往不同的GC,这次将会回收SoftReferences的空间。
  //VM规范要求在抛出OOME之前收集并清除所有SoftReferences
  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
           << " allocation";
  // TODO: Run finalization, but this may cause more allocations to occur.
  // We don't need a WaitForGcToComplete here either.
  DCHECK(!gc_plan_.empty());
  //请求GC清除所有SoftReferences,控制开关是/*clear_soft_references*/,如字面意思当它为true时清空所有SoftReferences
  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, /*clear_soft_references*/true);
  if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
      (!instrumented && EntrypointsInstrumented())) {
    return nullptr;
  }
  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
                                  bytes_tl_bulk_allocated);
  if (ptr == nullptr) {
    const uint64_t current_time = NanoTime();
    switch (allocator) {
      case kAllocatorTypeRosAlloc:
        // Fall-through.
      case kAllocatorTypeDlMalloc: {
        if (use_homogeneous_space_compaction_for_oom_ &&
            current_time - last_time_homogeneous_space_compaction_by_oom_ >
            min_interval_homogeneous_space_compaction_by_oom_) {
          last_time_homogeneous_space_compaction_by_oom_ = current_time;
          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
          // Thread suspension could have occurred.
          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
              (!instrumented && EntrypointsInstrumented())) {
            return nullptr;
          }
          switch (result) {
            case HomogeneousSpaceCompactResult::kSuccess:
              // If the allocation succeeded, we delayed an oom.
              ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                              usable_size, bytes_tl_bulk_allocated);
              if (ptr != nullptr) {
                count_delayed_oom_++;
              }
              break;
            case HomogeneousSpaceCompactResult::kErrorReject:
              // Reject due to disabled moving GC.
              break;
            case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
              // Throw OOM by default.
              break;
            default: {
              UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
                  << static_cast<size_t>(result);
              UNREACHABLE();
            }
          }
          // Always print that we ran homogeneous space compation since this can cause jank.
          VLOG(heap) << "Ran heap homogeneous space compaction, "
                    << " requested defragmentation "
                    << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " performed defragmentation "
                    << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " ignored homogeneous space compaction "
                    << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
                    << " delayed count = "
                    << count_delayed_oom_.LoadSequentiallyConsistent();
        }
        break;
      }
      case kAllocatorTypeNonMoving: {
        if (kUseReadBarrier) {
          // DisableMovingGc() isn't compatible with CC.
          break;
        }
        // Try to transition the heap if the allocation failure was due to the space being full.
        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
          // If we aren't out of memory then the OOM was probably from the non moving space being
          // full. Attempt to disable compaction and turn the main space into a non moving space.
          DisableMovingGc();
          // Thread suspension could have occurred.
          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
              (!instrumented && EntrypointsInstrumented())) {
            return nullptr;
          }
          // If we are still a moving GC then something must have caused the transition to fail.
          if (IsMovingGc(collector_type_)) {
            MutexLock mu(self, *gc_complete_lock_);
            // If we couldn't disable moving GC, just throw OOME and return null.
            LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
                         << disable_moving_gc_count_;
          } else {
            LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
                                            usable_size, bytes_tl_bulk_allocated);
          }
        }
        break;
      }
      default: {
        // Do nothing for others allocators.
      }
    }
  }
  // If the allocation hasn't succeeded by this point, throw an OOM error.
  if (ptr == nullptr) {
  	// 抛出OOM错误
    ThrowOutOfMemoryError(self, alloc_size, allocator);
  }
  return ptr;
}

该函数在执行了各种GC后,仍旧无法Allocate时,会尝试增长heap。然而还是无法Allocate,就会执行清空SoftReference的GC。

GC执行的过程中会调用ProcessReferences函数处理Reference

art/runtime/gc/reference_processor.cc

// Process reference class instances and schedule finalizations.
void ReferenceProcessor::ProcessReferences(bool concurrent,
                                           TimingLogger* timings,
                                           bool clear_soft_references,
                                           collector::GarbageCollector* collector) {
  TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
  Thread* self = Thread::Current();
  {
    MutexLock mu(self, *Locks::reference_processor_lock_);
    collector_ = collector;
    if (!kUseReadBarrier) {
      CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    } else {
      // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
      CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    }
  }
  if (kIsDebugBuild && collector->IsTransactionActive()) {
    // In transaction mode, we shouldn't enqueue any Reference to the queues.
    // See DelayReferenceReferent().
    DCHECK(soft_reference_queue_.IsEmpty());
    DCHECK(weak_reference_queue_.IsEmpty());
    DCHECK(finalizer_reference_queue_.IsEmpty());
    DCHECK(phantom_reference_queue_.IsEmpty());
  }
  // Unless required to clear soft references with white references, preserve some white referents.
  if (!clear_soft_references) {//不清除SoftReference
    TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
        "(Paused)ForwardSoftReferences", timings);
    if (concurrent) {
      StartPreservingReferences(self);
    }
    // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    // mark if the SoftReference is supposed to be preserved.
    // 标记soft_reference_queue_中的SoftReference,这样就不会被清除了
    soft_reference_queue_.ForwardSoftReferences(collector);
    collector->ProcessMarkStack();
    if (concurrent) {
      StopPreservingReferences(self);
    }
  }
  // Clear all remaining soft and weak references with white referents.
  soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  {
    TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
        "(Paused)EnqueueFinalizerReferences", timings);
    if (concurrent) {
      StartPreservingReferences(self);
    }
    // Preserve all white objects with finalize methods and schedule them for finalization.
    finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    collector->ProcessMarkStack();
    if (concurrent) {
      StopPreservingReferences(self);
    }
  }
  // Clear all finalizer referent reachable soft and weak references with white referents.
  soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  // Clear all phantom references with white referents.
  phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
  // At this point all reference queues other than the cleared references should be empty.
  DCHECK(soft_reference_queue_.IsEmpty());
  DCHECK(weak_reference_queue_.IsEmpty());
  DCHECK(finalizer_reference_queue_.IsEmpty());
  DCHECK(phantom_reference_queue_.IsEmpty());
  {
    MutexLock mu(self, *Locks::reference_processor_lock_);
    // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    // could result in a stale is_marked_callback_ being called before the reference processing
    // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    // callback isn't yet set.
    collector_ = nullptr;
    if (!kUseReadBarrier && concurrent) {
      // Done processing, disable the slow path and broadcast to the waiters.
      DisableSlowPath(self);
    }
  }
}

以上函数会在标记阶段调用。函数根据clear_soft_references函数参数决定是否要标记所有已经插入到ReferenceQueue队列中的SoftReference,true则不标记,flase则标记。被标记的SoftReference将会被保留(不被GC清除)。由此可见clear_soft_references就如字面意思所表示的,是控制是否清空SoftReference的开关

art/runtime/gc/reference_queue.cc

void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
                                          collector::GarbageCollector* collector) {
  while (!IsEmpty()) {
    ObjPtr<mirror::Reference> ref = DequeuePendingReference();
    mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
    // do_atomic_update is false because this happens during the reference processing phase where
    // Reference.clear() would block.
    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
      // Referent is white, clear it.
      if (Runtime::Current()->IsActiveTransaction()) {
        ref->ClearReferent<true>();
      } else {
        ref->ClearReferent<false>();
      }
      // 加入到cleared_references
      cleared_references->EnqueueReference(ref);
    }
    // Delay disabling the read barrier until here so that the ClearReferent call above in
    // transaction mode will trigger the read barrier.
    DisableReadBarrierForReference(ref);
  }
}

art/runtime/mirror/reference.h

  template<bool kTransactionActive>
  void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
  	//将SoftReference.java中的volatile成员变量reference置为null
    SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
  }

art/runtime/gc/reference_queue.cc

void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
  DCHECK(ref != nullptr);
  CHECK(ref->IsUnprocessed());
  if (IsEmpty()) {
    // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
    list_ = ref.Ptr();
  } else {
    // The list is owned by the GC, everything that has been inserted must already be at least
    // gray.
    ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
    DCHECK(head != nullptr);
    ref->SetPendingNext(head);
  }
  // Add the reference in the middle to preserve the cycle.
  list_->SetPendingNext(ref);
}

总结

从上面的代码分析中,验证了这个结论——SoftReference的确是只有在即将抛出OutOfMemoryError的情况下,才会被GC回收。

当我们希望被缓存的对象最好始终常驻内存,但是如果JVM内存吃紧,为了不发生OutOfMemoryError导致系统崩溃,允许JVM回收Cache的内存,然后这个缓存对象在回收后不会立即需要加载到内存中。这个时候就是使用SoftReference的最佳场景。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值