sssssss42


  if (changed) {
    compute_space_boundaries(eden()->used(),
                             SpaceDecorator::Clear,
                             SpaceDecorator::DontMangle);
    MemRegion cmr((HeapWord*)_virtual_space.low(),
                  (HeapWord*)_virtual_space.high());
    Universe::heap()->barrier_set()->resize_covered_region(cmr);
    if (Verbose && PrintGC) {
      size_t new_size_after  = _virtual_space.committed_size();
      size_t eden_size_after = eden()->capacity();
      size_t survivor_size_after = from()->capacity();
      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
        SIZE_FORMAT "K [eden="
        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
        new_size_before/K, new_size_after/K,
        eden_size_after/K, survivor_size_after/K);
      if (WizardMode) {
        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
          thread_increase_size/K, threads_count);
      }
      gclog_or_tty->cr();
    }
  }
}
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
  assert(false, "NYI -- are you sure you want to call this?");
}
size_t DefNewGeneration::capacity() const {
  return eden()->capacity()
       + from()->capacity();  // to() is only used during scavenge
}
size_t DefNewGeneration::used() const {
  return eden()->used()
       + from()->used();      // to() is only used during scavenge
}
size_t DefNewGeneration::free() const {
  return eden()->free()
       + from()->free();      // to() is only used during scavenge
}
size_t DefNewGeneration::max_capacity() const {
  const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
  const size_t reserved_bytes = reserved().byte_size();
  return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
}
size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
  return eden()->free();
}
size_t DefNewGeneration::capacity_before_gc() const {
  return eden()->capacity();
}
size_t DefNewGeneration::contiguous_available() const {
  return eden()->free();
}
HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
void DefNewGeneration::object_iterate(ObjectClosure* blk) {
  eden()->object_iterate(blk);
  from()->object_iterate(blk);
}
void DefNewGeneration::space_iterate(SpaceClosure* blk,
                                     bool usedOnly) {
  blk->do_space(eden());
  blk->do_space(from());
  blk->do_space(to());
}
HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
  HeapWord* result = NULL;
  if (Verbose && PrintGCDetails) {
    gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
                        "  will_fail: %s"
                        "  heap_lock: %s"
                        "  free: " SIZE_FORMAT,
                        size,
                        GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
                          "true" : "false",
                        Heap_lock->is_locked() ? "locked" : "unlocked",
                        from()->free());
  }
  if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
    if (Heap_lock->owned_by_self() ||
        (SafepointSynchronize::is_at_safepoint() &&
         Thread::current()->is_VM_thread())) {
      result = from()->allocate(size);
    } else if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("  Heap_lock is not owned by self");
    }
  } else if (PrintGC && Verbose) {
    gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
  }
  if (PrintGC && Verbose) {
    gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
  }
  return result;
}
HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
                                                bool   is_tlab,
                                                bool   parallel) {
  return allocate(size, is_tlab);
}
void DefNewGeneration::adjust_desired_tenuring_threshold(GCTracer &tracer) {
  _tenuring_threshold =
    age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, tracer);
}
void DefNewGeneration::collect(bool   full,
                               bool   clear_all_soft_refs,
                               size_t size,
                               bool   is_tlab) {
  assert(full || size > 0, "otherwise we don't want to collect");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  _gc_timer->register_gc_start();
  DefNewTracer gc_tracer;
  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
  _next_gen = gch->next_gen(this);
  if (!collection_attempt_is_safe()) {
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: Collection attempt not safe :: ");
    }
    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    return;
  }
  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
  init_assuming_no_promotion_failure();
  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
  size_t gch_prev_used = gch->used();
  gch->trace_heap_before_gc(&gc_tracer);
  SpecializationStats::clear();
  IsAliveClosure is_alive(this);
  ScanWeakRefClosure scan_weak_ref(this);
  age_table()->clear();
  to()->clear(SpaceDecorator::Mangle);
  gch->rem_set()->prepare_for_younger_refs_iterate(false);
  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");
  CollectorPolicy* cp = gch->collector_policy();
  FastScanClosure fsc_with_no_gc_barrier(this, false);
  FastScanClosure fsc_with_gc_barrier(this, true);
  KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                      gch->rem_set()->klass_rem_set());
  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
                                           &fsc_with_no_gc_barrier,
                                           false);
  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
  FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
                                                  &fsc_with_no_gc_barrier,
                                                  &fsc_with_gc_barrier);
  assert(gch->no_allocs_since_save_marks(0),
         "save marks have not been newly set.");
  gch->gen_process_roots(_level,
                         true,  // Process younger gens, if any,
                         true,  // activate StrongRootsScope
                         GenCollectedHeap::SO_ScavengeCodeCache,
                         GenCollectedHeap::StrongAndWeakRoots,
                         &fsc_with_no_gc_barrier,
                         &fsc_with_gc_barrier,
                         &cld_scan_closure);
  evacuate_followers.do_void();
  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  ReferenceProcessor* rp = ref_processor();
  rp->setup_policy(clear_all_soft_refs);
  const ReferenceProcessorStats& stats =
  rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                    NULL, _gc_timer, gc_tracer.gc_id());
  gc_tracer.report_gc_reference_stats(stats);
  if (!_promotion_failed) {
    eden()->clear(SpaceDecorator::Mangle);
    from()->clear(SpaceDecorator::Mangle);
    if (ZapUnusedHeapArea) {
      to()->mangle_unused_area();
    }
    swap_spaces();
    assert(to()->is_empty(), "to space should be empty now");
    adjust_desired_tenuring_threshold(gc_tracer);
    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
    size_policy->reset_gc_overhead_limit_count();
    if (PrintGC && !PrintGCDetails) {
      gch->print_heap_change(gch_prev_used);
    }
    assert(!gch->incremental_collection_failed(), "Should be clear");
  } else {
    assert(_promo_failure_scan_stack.is_empty(), "post condition");
    _promo_failure_scan_stack.clear(true); // Clear cached segments.
    remove_forwarding_pointers();
    if (PrintGCDetails) {
      gclog_or_tty->print(" (promotion failed) ");
    }
    swap_spaces();   // For uniformity wrt ParNewGeneration.
    from()->set_next_compaction_space(to());
    gch->set_incremental_collection_failed();
    _next_gen->promotion_failure_occurred();
    gc_tracer.report_promotion_failed(_promotion_failed_info);
    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
  }
  from()->set_concurrent_iteration_safe_limit(from()->top());
  to()->set_concurrent_iteration_safe_limit(to()->top());
  SpecializationStats::print();
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  update_time_of_last_gc(now);
  gch->trace_heap_after_gc(&gc_tracer);
  gc_tracer.report_tenuring_threshold(tenuring_threshold());
  _gc_timer->register_gc_end();
  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
}
class RemoveForwardPointerClosure: public ObjectClosure {
public:
  void do_object(oop obj) {
    obj->init_mark();
  }
};
void DefNewGeneration::init_assuming_no_promotion_failure() {
  _promotion_failed = false;
  _promotion_failed_info.reset();
  from()->set_next_compaction_space(NULL);
}
void DefNewGeneration::remove_forwarding_pointers() {
  RemoveForwardPointerClosure rspc;
  eden()->object_iterate(&rspc);
  from()->object_iterate(&rspc);
  assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
         "should be the same");
  while (!_objs_with_preserved_marks.is_empty()) {
    oop obj   = _objs_with_preserved_marks.pop();
    markOop m = _preserved_marks_of_objs.pop();
    obj->set_mark(m);
  }
  _objs_with_preserved_marks.clear(true);
  _preserved_marks_of_objs.clear(true);
}
void DefNewGeneration::preserve_mark(oop obj, markOop m) {
  assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
         "Oversaving!");
  _objs_with_preserved_marks.push(obj);
  _preserved_marks_of_objs.push(m);
}
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
  if (m->must_be_preserved_for_promotion_failure(obj)) {
    preserve_mark(obj, m);
  }
}
void DefNewGeneration::handle_promotion_failure(oop old) {
  if (PrintPromotionFailure && !_promotion_failed) {
    gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
                        old->size());
  }
  _promotion_failed = true;
  _promotion_failed_info.register_copy_failure(old->size());
  preserve_mark_if_necessary(old, old->mark());
  old->forward_to(old);
  _promo_failure_scan_stack.push(old);
  if (!_promo_failure_drain_in_progress) {
    _promo_failure_drain_in_progress = true;
    drain_promo_failure_scan_stack();
    _promo_failure_drain_in_progress = false;
  }
}
oop DefNewGeneration::copy_to_survivor_space(oop old) {
  assert(is_in_reserved(old) && !old->is_forwarded(),
         "shouldn't be scavenging this oop");
  size_t s = old->size();
  oop obj = NULL;
  if (old->age() < tenuring_threshold()) {
    obj = (oop) to()->allocate_aligned(s);
  }
  if (obj == NULL) {
    obj = _next_gen->promote(old, s);
    if (obj == NULL) {
      handle_promotion_failure(old);
      return old;
    }
  } else {
    const intx interval = PrefetchCopyIntervalInBytes;
    Prefetch::write(obj, interval);
    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
    obj->incr_age();
    age_table()->add(obj, s);
  }
  old->forward_to(obj);
  return obj;
}
void DefNewGeneration::drain_promo_failure_scan_stack() {
  while (!_promo_failure_scan_stack.is_empty()) {
     oop obj = _promo_failure_scan_stack.pop();
     obj->oop_iterate(_promo_failure_scan_stack_closure);
  }
}
void DefNewGeneration::save_marks() {
  eden()->set_saved_mark();
  to()->set_saved_mark();
  from()->set_saved_mark();
}
void DefNewGeneration::reset_saved_marks() {
  eden()->reset_saved_mark();
  to()->reset_saved_mark();
  from()->reset_saved_mark();
}
bool DefNewGeneration::no_allocs_since_save_marks() {
  assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
  assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
  return to()->saved_mark_at_top();
}
#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
                                                                \
void DefNewGeneration::                                         \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
  cl->set_generation(this);                                     \
  eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
  to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
  from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
  cl->reset_generation();                                       \
  save_marks();                                                 \
}
ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
#undef DefNew_SINCE_SAVE_MARKS_DEFN
void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
                                         size_t max_alloc_words) {
  if (requestor == this || _promotion_failed) return;
  assert(requestor->level() > level(), "DefNewGeneration must be youngest");
  if (to_space->top() > to_space->bottom()) {
    trace("to_space not empty when contribute_scratch called");
  }
  ContiguousSpace* to_space = to();
  assert(to_space->end() >= to_space->top(), "pointers out of order");
  size_t free_words = pointer_delta(to_space->end(), to_space->top());
  if (free_words >= MinFreeScratchWords) {
    ScratchBlock* sb = (ScratchBlock*)to_space->top();
    sb->num_words = free_words;
    sb->next = list;
    list = sb;
  }
}
void DefNewGeneration::reset_scratch() {
  if (ZapUnusedHeapArea) {
    to()->mangle_unused_area_complete();
  }
}
bool DefNewGeneration::collection_attempt_is_safe() {
  if (!to()->is_empty()) {
    if (Verbose && PrintGCDetails) {
      gclog_or_tty->print(" :: to is not empty :: ");
    }
    return false;
  }
  if (_next_gen == NULL) {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    _next_gen = gch->next_gen(this);
  }
  return _next_gen->promotion_attempt_is_safe(used());
}
void DefNewGeneration::gc_epilogue(bool full) {
  DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
  assert(!GC_locker::is_active(), "We should not be executing here");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  if (full) {
    DEBUG_ONLY(seen_incremental_collection_failed = false;)
    if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
      if (Verbose && PrintGCDetails) {
        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
                            GCCause::to_string(gch->gc_cause()));
      }
      gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
      set_should_allocate_from_space(); // we seem to be running out of space
    } else {
      if (Verbose && PrintGCDetails) {
        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
                            GCCause::to_string(gch->gc_cause()));
      }
      gch->clear_incremental_collection_failed(); // We just did a full collection
      clear_should_allocate_from_space(); // if set
    }
  } else {
#ifdef ASSERT
    if (!seen_incremental_collection_failed &&
        gch->incremental_collection_failed()) {
      if (Verbose && PrintGCDetails) {
        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
                            GCCause::to_string(gch->gc_cause()));
      }
      seen_incremental_collection_failed = true;
    } else if (seen_incremental_collection_failed) {
      if (Verbose && PrintGCDetails) {
        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
                            GCCause::to_string(gch->gc_cause()));
      }
      assert(gch->gc_cause() == GCCause::_scavenge_alot ||
             (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
             !gch->incremental_collection_failed(),
             "Twice in a row");
      seen_incremental_collection_failed = false;
    }
#endif // ASSERT
  }
  if (ZapUnusedHeapArea) {
    eden()->check_mangled_unused_area_complete();
    from()->check_mangled_unused_area_complete();
    to()->check_mangled_unused_area_complete();
  }
  if (!CleanChunkPoolAsync) {
    Chunk::clean_chunk_pool();
  }
  update_counters();
  gch->collector_policy()->counters()->update_counters();
}
void DefNewGeneration::record_spaces_top() {
  assert(ZapUnusedHeapArea, "Not mangling unused space");
  eden()->set_top_for_allocations();
  to()->set_top_for_allocations();
  from()->set_top_for_allocations();
}
void DefNewGeneration::ref_processor_init() {
  Generation::ref_processor_init();
}
void DefNewGeneration::update_counters() {
  if (UsePerfData) {
    _eden_counters->update_all();
    _from_counters->update_all();
    _to_counters->update_all();
    _gen_counters->update_all();
  }
}
void DefNewGeneration::verify() {
  eden()->verify();
  from()->verify();
    to()->verify();
}
void DefNewGeneration::print_on(outputStream* st) const {
  Generation::print_on(st);
  st->print("  eden");
  eden()->print_on(st);
  st->print("  from");
  from()->print_on(st);
  st->print("  to  ");
  to()->print_on(st);
}
const char* DefNewGeneration::name() const {
  return "def new generation";
}
CompactibleSpace* DefNewGeneration::first_compaction_space() const {
  return eden();
}
HeapWord* DefNewGeneration::allocate(size_t word_size,
                                     bool is_tlab) {
  HeapWord* result = eden()->par_allocate(word_size);
  if (result != NULL) {
    if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
      _next_gen->sample_eden_chunk();
    }
    return result;
  }
  do {
    HeapWord* old_limit = eden()->soft_end();
    if (old_limit < eden()->end()) {
      HeapWord* new_limit =
        next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
      if (new_limit != NULL) {
        Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
      } else {
        assert(eden()->soft_end() == eden()->end(),
               "invalid state after allocation_limit_reached returned null");
      }
    } else {
      assert(old_limit == eden()->end(), "sanity check");
      break;
    }
    result = eden()->par_allocate(word_size);
  } while (result == NULL);
  if (result == NULL) {
    result = allocate_from_space(word_size);
  } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
    _next_gen->sample_eden_chunk();
  }
  return result;
}
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
                                         bool is_tlab) {
  HeapWord* res = eden()->par_allocate(word_size);
  if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
    _next_gen->sample_eden_chunk();
  }
  return res;
}
void DefNewGeneration::gc_prologue(bool full) {
  eden()->set_soft_end(eden()->end());
}
size_t DefNewGeneration::tlab_capacity() const {
  return eden()->capacity();
}
size_t DefNewGeneration::tlab_used() const {
  return eden()->used();
}
size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
  return unsafe_max_alloc_nogc();
}
C:\hotspot-69087d08d473\src\share\vm/memory/defNewGeneration.hpp
#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
#define SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/cSpaceCounters.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/generation.inline.hpp"
#include "utilities/stack.hpp"
class EdenSpace;
class ContiguousSpace;
class ScanClosure;
class STWGCTimer;
class DefNewGeneration: public Generation {
  friend class VMStructs;
protected:
  Generation* _next_gen;
  uint        _tenuring_threshold;   // Tenuring threshold for next collection.
  ageTable    _age_table;
  size_t      _pretenure_size_threshold_words;
  ageTable*   age_table() { return &_age_table; }
  void   init_assuming_no_promotion_failure();
  bool   _promotion_failed;
  bool   promotion_failed() { return _promotion_failed; }
  PromotionFailedInfo _promotion_failed_info;
  void handle_promotion_failure(oop);
  void remove_forwarding_pointers();
  void   preserve_mark_if_necessary(oop obj, markOop m);
  void   preserve_mark(oop obj, markOop m);    // work routine used by the above
  Stack<oop, mtGC>     _objs_with_preserved_marks;
  Stack<markOop, mtGC> _preserved_marks_of_objs;
  ExtendedOopClosure *_promo_failure_scan_stack_closure;
  void set_promo_failure_scan_stack_closure(ExtendedOopClosure *scan_stack_closure) {
    _promo_failure_scan_stack_closure = scan_stack_closure;
  }
  Stack<oop, mtGC> _promo_failure_scan_stack;
  void drain_promo_failure_scan_stack(void);
  bool _promo_failure_drain_in_progress;
  GenerationCounters*  _gen_counters;
  CSpaceCounters*      _eden_counters;
  CSpaceCounters*      _from_counters;
  CSpaceCounters*      _to_counters;
  size_t               _max_eden_size;
  size_t               _max_survivor_size;
  bool _should_allocate_from_space;
  bool should_allocate_from_space() const {
    return _should_allocate_from_space;
  }
  void clear_should_allocate_from_space() {
    _should_allocate_from_space = false;
  }
  void set_should_allocate_from_space() {
    _should_allocate_from_space = true;
  }
  void adjust_desired_tenuring_threshold(GCTracer &tracer);
  EdenSpace*       _eden_space;
  ContiguousSpace* _from_space;
  ContiguousSpace* _to_space;
  STWGCTimer* _gc_timer;
  enum SomeProtectedConstants {
    MinFreeScratchWords = 100
  };
  size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
    size_t n = gen_size / (SurvivorRatio + 2);
    return n > alignment ? align_size_down(n, alignment) : alignment;
  }
 public:  // was "protected" but caused compile error on win32
  class IsAliveClosure: public BoolObjectClosure {
    Generation* _g;
  public:
    IsAliveClosure(Generation* g);
    bool do_object_b(oop p);
  };
  class KeepAliveClosure: public OopClosure {
  protected:
    ScanWeakRefClosure* _cl;
    CardTableRS* _rs;
    template <class T> void do_oop_work(T* p);
  public:
    KeepAliveClosure(ScanWeakRefClosure* cl);
    virtual void do_oop(oop* p);
    virtual void do_oop(narrowOop* p);
  };
  class FastKeepAliveClosure: public KeepAliveClosure {
  protected:
    HeapWord* _boundary;
    template <class T> void do_oop_work(T* p);
  public:
    FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
    virtual void do_oop(oop* p);
    virtual void do_oop(narrowOop* p);
  };
  class EvacuateFollowersClosure: public VoidClosure {
    GenCollectedHeap* _gch;
    int _level;
    ScanClosure* _scan_cur_or_nonheap;
    ScanClosure* _scan_older;
  public:
    EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
                             ScanClosure* cur, ScanClosure* older);
    void do_void();
  };
  class FastEvacuateFollowersClosure: public VoidClosure {
    GenCollectedHeap* _gch;
    int _level;
    DefNewGeneration* _gen;
    FastScanClosure* _scan_cur_or_nonheap;
    FastScanClosure* _scan_older;
  public:
    FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
                                 DefNewGeneration* gen,
                                 FastScanClosure* cur,
                                 FastScanClosure* older);
    void do_void();
  };
 public:
  DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
                   const char* policy="Copy");
  virtual void ref_processor_init();
  virtual Generation::Name kind() { return Generation::DefNew; }
  EdenSpace*       eden() const           { return _eden_space; }
  ContiguousSpace* from() const           { return _from_space;  }
  ContiguousSpace* to()   const           { return _to_space;    }
  virtual CompactibleSpace* first_compaction_space() const;
  size_t capacity() const;
  size_t used() const;
  size_t free() const;
  size_t max_capacity() const;
  size_t capacity_before_gc() const;
  size_t unsafe_max_alloc_nogc() const;
  size_t contiguous_available() const;
  size_t max_eden_size() const              { return _max_eden_size; }
  size_t max_survivor_size() const          { return _max_survivor_size; }
  bool supports_inline_contig_alloc() const { return true; }
  HeapWord** top_addr() const;
  HeapWord** end_addr() const;
  bool supports_tlab_allocation() const { return true; }
  size_t tlab_capacity() const;
  size_t tlab_used() const;
  size_t unsafe_max_tlab_alloc() const;
  bool expand(size_t bytes);
  virtual bool is_maximal_no_gc() const { return true; }
  void object_iterate(ObjectClosure* blk);
  void younger_refs_iterate(OopsInGenClosure* cl);
  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  virtual bool should_allocate(size_t word_size, bool is_tlab) {
    assert(UseTLAB || !is_tlab, "Should not allocate tlab");
    size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
    const bool non_zero      = word_size > 0;
    const bool overflows     = word_size >= overflow_limit;
    const bool check_too_big = _pretenure_size_threshold_words > 0;
    const bool not_too_big   = word_size < _pretenure_size_threshold_words;
    const bool size_ok       = is_tlab || !check_too_big || not_too_big;
    bool result = !overflows &&
                  non_zero   &&
                  size_ok;
    return result;
  }
  HeapWord* allocate(size_t word_size, bool is_tlab);
  HeapWord* allocate_from_space(size_t word_size);
  HeapWord* par_allocate(size_t word_size, bool is_tlab);
  virtual void gc_prologue(bool full);
  virtual void gc_epilogue(bool full);
  virtual void record_spaces_top();
  virtual bool performs_in_place_marking() const { return false; }
  void save_marks();
  void reset_saved_marks();
  bool no_allocs_since_save_marks();
#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
  ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL)
#undef DefNew_SINCE_SAVE_MARKS_DECL
  virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
                          size_t max_alloc_words);
  virtual void reset_scratch();
  virtual void compute_new_size();
  virtual bool collection_attempt_is_safe();
  virtual void collect(bool   full,
                       bool   clear_all_soft_refs,
                       size_t size,
                       bool   is_tlab);
  HeapWord* expand_and_allocate(size_t size,
                                bool is_tlab,
                                bool parallel = false);
  oop copy_to_survivor_space(oop old);
  uint tenuring_threshold() { return _tenuring_threshold; }
  void update_counters();
  virtual const char* name() const;
  virtual const char* short_name() const { return "DefNew"; }
  bool must_be_youngest() const { return true; }
  bool must_be_oldest() const { return false; }
  void print_on(outputStream* st) const;
  void verify();
  bool promo_failure_scan_is_complete() const {
    return _promo_failure_scan_stack.is_empty();
  }
 protected:
  void compute_space_boundaries(uintx minimum_eden_size,
                                bool clear_space,
                                bool mangle_space);
  void swap_spaces();
};
#endif // SHARE_VM_MEMORY_DEFNEWGENERATION_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/defNewGeneration.inline.hpp
#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
#define SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
#include "memory/cardTableRS.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/space.hpp"
template <class T>
inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
#ifdef ASSERT
  {
    assert (!oopDesc::is_null(*p), "expected non-null ref");
    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    assert (obj->is_oop(), "expected an oop while scanning weak refs");
  }
#endif // ASSERT
  _cl->do_oop_nv(p);
  if (Universe::heap()->is_in_reserved(p)) {
    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    _rs->inline_write_ref_field_gc(p, obj);
  }
}
template <class T>
inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
#ifdef ASSERT
  {
    assert (!oopDesc::is_null(*p), "expected non-null ref");
    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    assert (obj->is_oop(), "expected an oop while scanning weak refs");
  }
#endif // ASSERT
  _cl->do_oop_nv(p);
  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
  if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
    _rs->inline_write_ref_field_gc(p, obj);
  }
}
#endif // SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/filemap.cpp
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/altHashing.hpp"
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp"
# include <sys/stat.h>
# include <errno.h>
#ifndef O_BINARY       // if defined (Win32) use binary files.
#define O_BINARY 0     // otherwise do nothing.
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
extern address JVM_FunctionAtStart();
extern address JVM_FunctionAtEnd();
static void fail(const char *msg, va_list ap) {
  jio_fprintf(defaultStream::error_stream(),
              "An error has occurred while processing the"
              " shared archive file.\n");
  jio_vfprintf(defaultStream::error_stream(), msg, ap);
  jio_fprintf(defaultStream::error_stream(), "\n");
  vm_exit_during_initialization("Unable to use shared archive.", NULL);
}
void FileMapInfo::fail_stop(const char *msg, ...) {
        va_list ap;
  va_start(ap, msg);
  fail(msg, ap);        // Never returns.
  va_end(ap);           // for completeness.
}
void FileMapInfo::fail_continue(const char *msg, ...) {
  va_list ap;
  va_start(ap, msg);
  MetaspaceShared::set_archive_loading_failed();
  if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) {
    tty->print("[");
    tty->vprint(msg, ap);
    tty->print_cr("]");
  } else {
    if (RequireSharedSpaces) {
      fail(msg, ap);
    } else {
      if (PrintSharedSpaces) {
        tty->print_cr("UseSharedSpaces: %s", msg);
      }
    }
    UseSharedSpaces = false;
    assert(current_info() != NULL, "singleton must be registered");
    current_info()->close();
  }
  va_end(ap);
}
template <int N> static void get_header_version(char (&header_version) [N]) {
  assert(N == JVM_IDENT_MAX, "Bad header_version size");
  const char *vm_version = VM_Version::internal_vm_info_string();
  const int version_len = (int)strlen(vm_version);
  if (version_len < (JVM_IDENT_MAX-1)) {
    strcpy(header_version, vm_version);
  } else {
    uint32_t hash = AltHashing::halfsiphash_32(8191, (const uint8_t*)vm_version, version_len);
    strncpy(header_version, vm_version, JVM_IDENT_MAX-9);
    sprintf(&header_version[JVM_IDENT_MAX-9], "%08x", hash);
    header_version[JVM_IDENT_MAX-1] = 0;  // Null terminate.
  }
}
FileMapInfo::FileMapInfo() {
  assert(_current_info == NULL, "must be singleton"); // not thread safe
  _current_info = this;
  memset(this, 0, sizeof(FileMapInfo));
  _file_offset = 0;
  _file_open = false;
  _header = SharedClassUtil::allocate_file_map_header();
  _header->_version = _invalid_version;
}
FileMapInfo::~FileMapInfo() {
  assert(_current_info == this, "must be singleton"); // not thread safe
  _current_info = NULL;
}
void FileMapInfo::populate_header(size_t alignment) {
  _header->populate(this, alignment);
}
size_t FileMapInfo::FileMapHeader::data_size() {
  return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
}
void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
  _magic = 0xf00baba2;
  _version = _current_version;
  _alignment = alignment;
  _obj_alignment = ObjectAlignmentInBytes;
  _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
  _classpath_entry_table = mapinfo->_classpath_entry_table;
  _classpath_entry_size = mapinfo->_classpath_entry_size;
  get_header_version(_jvm_ident);
}
void FileMapInfo::allocate_classpath_entry_table() {
  int bytes = 0;
  int count = 0;
  char* strptr = NULL;
  char* strptr_max = NULL;
  Thread* THREAD = Thread::current();
  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
  size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
  for (int pass=0; pass<2; pass++) {
    ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
    for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
      const char *name = cpe->name();
      int name_bytes = (int)(strlen(name) + 1);
      if (pass == 0) {
        count ++;
        bytes += (int)entry_size;
        bytes += name_bytes;
        if (TraceClassPaths || (TraceClassLoading && Verbose)) {
          tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
        }
      } else {
        SharedClassPathEntry* ent = shared_classpath(cur_entry);
        if (cpe->is_jar_file()) {
          struct stat st;
          if (os::stat(name, &st) != 0) {
            FileMapInfo::fail_stop("Unable to open jar file %s.", name);
          }
          EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
          SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
        } else {
          ent->_filesize  = -1;
          if (!os::dir_is_empty(name)) {
            ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
          }
        }
        ent->_name = strptr;
        if (strptr + name_bytes <= strptr_max) {
          strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
          strptr += name_bytes;
        } else {
          assert(0, "miscalculated buffer size");
        }
      }
    }
    if (pass == 0) {
      EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
      Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
      strptr = (char*)(arr->data());
      strptr_max = strptr + bytes;
      SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
      strptr += entry_size * count;
      _classpath_entry_table_size = count;
      _classpath_entry_table = table;
      _classpath_entry_size = entry_size;
    }
  }
}
bool FileMapInfo::validate_classpath_entry_table() {
  _validating_classpath_entry_table = true;
  int count = _header->_classpath_entry_table_size;
  _classpath_entry_table = _header->_classpath_entry_table;
  _classpath_entry_size = _header->_classpath_entry_size;
  for (int i=0; i<count; i++) {
    SharedClassPathEntry* ent = shared_classpath(i);
    struct stat st;
    const char* name = ent->_name;
    bool ok = true;
    if (TraceClassPaths || (TraceClassLoading && Verbose)) {
      tty->print_cr("[Checking shared classpath entry: %s]", name);
    }
    if (os::stat(name, &st) != 0) {
      fail_continue("Required classpath entry does not exist: %s", name);
      ok = false;
    } else if (ent->is_dir()) {
      if (!os::dir_is_empty(name)) {
        fail_continue("directory is not empty: %s", name);
        ok = false;
      }
    } else {
      if (ent->_timestamp != st.st_mtime ||
          ent->_filesize != st.st_size) {
        ok = false;
        if (PrintSharedArchiveAndExit) {
          fail_continue(ent->_timestamp != st.st_mtime ?
                        "Timestamp mismatch" :
                        "File size mismatch");
        } else {
          fail_continue("A jar file is not the one used while building"
                        " the shared archive file: %s", name);
        }
      }
    }
    if (ok) {
      if (TraceClassPaths || (TraceClassLoading && Verbose)) {
        tty->print_cr("[ok]");
      }
    } else if (!PrintSharedArchiveAndExit) {
      _validating_classpath_entry_table = false;
      return false;
    }
  }
  _classpath_entry_table_size = _header->_classpath_entry_table_size;
  _validating_classpath_entry_table = false;
  return true;
}
bool FileMapInfo::init_from_file(int fd) {
  size_t sz = _header->data_size();
  char* addr = _header->data();
  size_t n = os::read(fd, addr, (unsigned int)sz);
  if (n != sz) {
    fail_continue("Unable to read the file header.");
    return false;
  }
  if (_header->_version != current_version()) {
    fail_continue("The shared archive file has the wrong version.");
    return false;
  }
  size_t info_size = _header->_paths_misc_info_size;
  _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
  if (_paths_misc_info == NULL) {
    fail_continue("Unable to read the file header.");
    return false;
  }
  n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
  if (n != info_size) {
    fail_continue("Unable to read the shared path info header.");
    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
    _paths_misc_info = NULL;
    return false;
  }
  size_t len = lseek(fd, 0, SEEK_END);
  struct FileMapInfo::FileMapHeader::space_info* si =
    &_header->_space[MetaspaceShared::mc];
  if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
    fail_continue("The shared archive file has been truncated.");
    return false;
  }
  _file_offset += (long)n;
  return true;
}
bool FileMapInfo::open_for_read() {
  _full_path = Arguments::GetSharedArchivePath();
  int fd = open(_full_path, O_RDONLY | O_BINARY, 0);
  if (fd < 0) {
    if (errno == ENOENT) {
      fail_continue("Specified shared archive not found.");
    } else {
      fail_continue("Failed to open shared archive file (%s).",
                    strerror(errno));
    }
    return false;
  }
  _fd = fd;
  _file_open = true;
  return true;
}
void FileMapInfo::open_for_write() {
 _full_path = Arguments::GetSharedArchivePath();
  if (PrintSharedSpaces) {
    tty->print_cr("Dumping shared data to file: ");
    tty->print_cr("   %s", _full_path);
  }
#ifdef _WINDOWS  // On Windows, need WRITE permission to remove the file.
  chmod(_full_path, _S_IREAD | _S_IWRITE);
#endif
  remove(_full_path);
  int fd = open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
  if (fd < 0) {
    fail_stop("Unable to create shared archive file %s.", _full_path);
  }
  _fd = fd;
  _file_offset = 0;
  _file_open = true;
}
void FileMapInfo::write_header() {
  int info_size = ClassLoader::get_shared_paths_misc_info_size();
  _header->_paths_misc_info_size = info_size;
  align_file_position();
  size_t sz = _header->data_size();
  char* addr = _header->data();
  write_bytes(addr, (int)sz); // skip the C++ vtable
  write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size);
  align_file_position();
}
void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
  align_file_position();
  size_t used = space->used_bytes_slow(Metaspace::NonClassType);
  size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
  write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
}
void FileMapInfo::write_region(int region, char* base, size_t size,
                               size_t capacity, bool read_only,
                               bool allow_exec) {
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
  if (_file_open) {
    guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
    if (PrintSharedSpaces) {
      tty->print_cr("Shared file region %d: 0x%6x bytes, addr " INTPTR_FORMAT
                    " file offset 0x%6x", region, size, base, _file_offset);
    }
  } else {
    si->_file_offset = _file_offset;
  }
  si->_base = base;
  si->_used = size;
  si->_capacity = capacity;
  si->_read_only = read_only;
  si->_allow_exec = allow_exec;
  si->_crc = ClassLoader::crc32(0, base, (jint)size);
  write_bytes_aligned(base, (int)size);
}
void FileMapInfo::write_bytes(const void* buffer, int nbytes) {
  if (_file_open) {
    int n = ::write(_fd, buffer, nbytes);
    if (n != nbytes) {
      close();
      remove(_full_path);
      fail_stop("Unable to write to shared archive file.");
    }
  }
  _file_offset += nbytes;
}
void FileMapInfo::align_file_position() {
  size_t new_file_offset = align_size_up(_file_offset,
                                         os::vm_allocation_granularity());
  if (new_file_offset != _file_offset) {
    _file_offset = new_file_offset;
    if (_file_open) {
      _file_offset -= 1;
      if (lseek(_fd, (long)_file_offset, SEEK_SET) < 0) {
        fail_stop("Unable to seek.");
      }
      char zero = 0;
      write_bytes(&zero, 1);
    }
  }
}
void FileMapInfo::write_bytes_aligned(const void* buffer, int nbytes) {
  align_file_position();
  write_bytes(buffer, nbytes);
  align_file_position();
}
void FileMapInfo::close() {
  if (_file_open) {
    if (::close(_fd) < 0) {
      fail_stop("Unable to close the shared archive file.");
    }
    _file_open = false;
    _fd = -1;
  }
}
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
  if (!si->_read_only) {
    return true;
  }
  size_t used = si->_used;
  size_t size = align_size_up(used, os::vm_allocation_granularity());
  if (!open_for_read()) {
    return false;
  }
  char *base = os::remap_memory(_fd, _full_path, si->_file_offset,
                                si->_base, size, false /* !read_only */,
                                si->_allow_exec);
  close();
  if (base == NULL) {
    fail_continue("Unable to remap shared readonly space (errno=%d).", errno);
    return false;
  }
  if (base != si->_base) {
    fail_continue("Unable to remap shared readonly space at required address.");
    return false;
  }
  si->_read_only = false;
  return true;
}
ReservedSpace FileMapInfo::reserve_shared_memory() {
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
  char* requested_addr = si->_base;
  size_t size = FileMapInfo::shared_spaces_size();
  ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
  if (!rs.is_reserved()) {
    fail_continue("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr);
    return rs;
  }
  MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
  return rs;
}
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
char* FileMapInfo::map_region(int i) {
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
  size_t used = si->_used;
  size_t alignment = os::vm_allocation_granularity();
  size_t size = align_size_up(used, alignment);
  char *requested_addr = si->_base;
  char *base = os::map_memory(_fd, _full_path, si->_file_offset,
                              requested_addr, size, si->_read_only,
                              si->_allow_exec);
  if (base == NULL || base != si->_base) {
    fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
    return NULL;
  }
#ifdef _WINDOWS
  MemTracker::record_virtual_memory_type((address)base, mtClassShared);
#endif
  return base;
}
bool FileMapInfo::verify_region_checksum(int i) {
  if (!VerifySharedSpaces) {
    return true;
  }
  const char* buf = _header->_space[i]._base;
  size_t sz = _header->_space[i]._used;
  int crc = ClassLoader::crc32(0, buf, (jint)sz);
  if (crc != _header->_space[i]._crc) {
    fail_continue("Checksum verification failed.");
    return false;
  }
  return true;
}
void FileMapInfo::unmap_region(int i) {
  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
  size_t used = si->_used;
  size_t size = align_size_up(used, os::vm_allocation_granularity());
  if (!os::unmap_memory(si->_base, size)) {
    fail_stop("Unable to unmap shared space.");
  }
}
void FileMapInfo::assert_mark(bool check) {
  if (!check) {
    fail_stop("Mark mismatch while restoring from shared file.");
  }
}
FileMapInfo* FileMapInfo::_current_info = NULL;
SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
int FileMapInfo::_classpath_entry_table_size = 0;
size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
bool FileMapInfo::_validating_classpath_entry_table = false;
bool FileMapInfo::initialize() {
  assert(UseSharedSpaces, "UseSharedSpaces expected.");
  if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space()) {
    fail_continue("Tool agent requires sharing to be disabled.");
    return false;
  }
  if (!open_for_read()) {
    return false;
  }
  init_from_file(_fd);
  if (!validate_header()) {
    return false;
  }
  SharedReadOnlySize =  _header->_space[0]._capacity;
  SharedReadWriteSize = _header->_space[1]._capacity;
  SharedMiscDataSize =  _header->_space[2]._capacity;
  SharedMiscCodeSize =  _header->_space[3]._capacity;
  return true;
}
int FileMapInfo::FileMapHeader::compute_crc() {
  char* header = data();
  char* buf = (char*)&_crc + sizeof(int);
  size_t sz = data_size() - (buf - header);
  int crc = ClassLoader::crc32(0, buf, (jint)sz);
  return crc;
}
int FileMapInfo::compute_header_crc() {
  return _header->compute_crc();
}
bool FileMapInfo::FileMapHeader::validate() {
  if (_magic != (int)0xf00baba2) {
    FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
    return false;
  }
  if (VerifySharedSpaces && compute_crc() != _crc) {
    fail_continue("Header checksum verification failed.");
    return false;
  }
  if (_version != current_version()) {
    FileMapInfo::fail_continue("The shared archive file is the wrong version.");
    return false;
  }
  char header_version[JVM_IDENT_MAX];
  get_header_version(header_version);
  if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
    if (TraceClassPaths) {
      tty->print_cr("Expected: %s", header_version);
      tty->print_cr("Actual:   %s", _jvm_ident);
    }
    FileMapInfo::fail_continue("The shared archive file was created by a different"
                  " version or build of HotSpot");
    return false;
  }
  if (_obj_alignment != ObjectAlignmentInBytes) {
    FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
                  " does not equal the current ObjectAlignmentInBytes of %d.",
                  _obj_alignment, ObjectAlignmentInBytes);
    return false;
  }
  return true;
}
bool FileMapInfo::validate_header() {
  bool status = _header->validate();
  if (status) {
    if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
      if (!PrintSharedArchiveAndExit) {
        fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
        status = false;
      }
    }
  }
  if (_paths_misc_info != NULL) {
    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
    _paths_misc_info = NULL;
  }
  return status;
}
bool FileMapInfo::is_in_shared_space(const void* p) {
  for (int i = 0; i < MetaspaceShared::n_regions; i++) {
    if (p >= _header->_space[i]._base &&
        p < _header->_space[i]._base + _header->_space[i]._used) {
      return true;
    }
  }
  return false;
}
void FileMapInfo::print_shared_spaces() {
  gclog_or_tty->print_cr("Shared Spaces:");
  for (int i = 0; i < MetaspaceShared::n_regions; i++) {
    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
    gclog_or_tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
                        shared_region_name[i],
                        si->_base, si->_base + si->_used);
  }
}
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
  FileMapInfo *map_info = FileMapInfo::current_info();
  if (map_info) {
    map_info->fail_continue("%s", msg);
    for (int i = 0; i < MetaspaceShared::n_regions; i++) {
      if (map_info->_header->_space[i]._base != NULL) {
        map_info->unmap_region(i);
        map_info->_header->_space[i]._base = NULL;
      }
    }
  } else if (DumpSharedSpaces) {
    fail_stop("%s", msg);
  }
}
C:\hotspot-69087d08d473\src\share\vm/memory/filemap.hpp
#ifndef SHARE_VM_MEMORY_FILEMAP_HPP
#define SHARE_VM_MEMORY_FILEMAP_HPP
#include "memory/metaspaceShared.hpp"
#include "memory/metaspace.hpp"
static const int JVM_IDENT_MAX = 256;
class Metaspace;
class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
public:
  const char *_name;
  time_t _timestamp;          // jar timestamp,  0 if is directory
  long   _filesize;           // jar file size, -1 if is directory
  bool is_dir() {
    return _filesize == -1;
  }
};
class FileMapInfo : public CHeapObj<mtInternal> {
private:
  friend class ManifestStream;
  enum {
    _invalid_version = -1,
    _current_version = 2
  };
  bool  _file_open;
  int   _fd;
  size_t  _file_offset;
private:
  static SharedClassPathEntry* _classpath_entry_table;
  static int                   _classpath_entry_table_size;
  static size_t                _classpath_entry_size;
  static bool                  _validating_classpath_entry_table;
public:
  struct FileMapHeaderBase : public CHeapObj<mtClass> {
    virtual bool validate() = 0;
    virtual void populate(FileMapInfo* info, size_t alignment) = 0;
  };
  struct FileMapHeader : FileMapHeaderBase {
    static size_t data_size();
    char* data() {
      return ((char*)this) + sizeof(FileMapHeaderBase);
    }
    int    _magic;                    // identify file type.
    int    _crc;                      // header crc checksum.
    int    _version;                  // (from enum, above.)
    size_t _alignment;                // how shared archive should be aligned
    int    _obj_alignment;            // value of ObjectAlignmentInBytes
    struct space_info {
      int    _crc;           // crc checksum of the current space
      size_t _file_offset;   // sizeof(this) rounded to vm page size
      char*  _base;          // copy-on-write base address
      size_t _capacity;      // for validity checking
      size_t _used;          // for setting space top on read
      bool   _read_only;     // read only space?
      bool   _allow_exec;    // executable code in space?
    } _space[MetaspaceShared::n_regions];
    char  _jvm_ident[JVM_IDENT_MAX];      // identifier for jvm
    int _paths_misc_info_size;
    int _classpath_entry_table_size;
    size_t _classpath_entry_size;
    SharedClassPathEntry* _classpath_entry_table;
    virtual bool validate();
    virtual void populate(FileMapInfo* info, size_t alignment);
    int compute_crc();
  };
  FileMapHeader * _header;
  const char* _full_path;
  char* _paths_misc_info;
  static FileMapInfo* _current_info;
  bool  init_from_file(int fd);
  void  align_file_position();
  bool  validate_header_impl();
public:
  FileMapInfo();
  ~FileMapInfo();
  static int current_version()        { return _current_version; }
  int    compute_header_crc();
  void   set_header_crc(int crc)      { _header->_crc = crc; }
  void   populate_header(size_t alignment);
  bool   validate_header();
  void   invalidate();
  int    version()                    { return _header->_version; }
  size_t alignment()                  { return _header->_alignment; }
  size_t space_capacity(int i)        { return _header->_space[i]._capacity; }
  char*  region_base(int i)           { return _header->_space[i]._base; }
  struct FileMapHeader* header()      { return _header; }
  static FileMapInfo* current_info() {
    CDS_ONLY(return _current_info;)
    NOT_CDS(return NULL;)
  }
  static void assert_mark(bool check);
  bool  initialize() NOT_CDS_RETURN_(false);
  bool  open_for_read();
  void  open_for_write();
  void  write_header();
  void  write_space(int i, Metaspace* space, bool read_only);
  void  write_region(int region, char* base, size_t size,
                     size_t capacity, bool read_only, bool allow_exec);
  void  write_bytes(const void* buffer, int count);
  void  write_bytes_aligned(const void* buffer, int count);
  char* map_region(int i);
  void  unmap_region(int i);
  bool  verify_region_checksum(int i);
  void  close();
  bool  is_open() { return _file_open; }
  ReservedSpace reserve_shared_memory();
  bool  remap_shared_readonly_as_readwrite();
  static void fail_stop(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
  static void fail_continue(const char *msg, ...) ATTRIBUTE_PRINTF(1, 2);
  bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
  void print_shared_spaces() NOT_CDS_RETURN;
  static size_t shared_spaces_size() {
    return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
                         SharedMiscDataSize + SharedMiscCodeSize,
                         os::vm_allocation_granularity());
  }
  static void stop_sharing_and_unmap(const char* msg);
  static void allocate_classpath_entry_table();
  bool validate_classpath_entry_table();
  static SharedClassPathEntry* shared_classpath(int index) {
    char* p = (char*)_classpath_entry_table;
    p += _classpath_entry_size * index;
    return (SharedClassPathEntry*)p;
  }
  static const char* shared_classpath_name(int index) {
    return shared_classpath(index)->_name;
  }
  static int get_number_of_share_classpaths() {
    return _classpath_entry_table_size;
  }
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/freeBlockDictionary.cpp
#include "precompiled.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // INCLUDE_ALL_GCS
#include "memory/freeBlockDictionary.hpp"
#include "memory/metachunk.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/macros.hpp"
#ifndef PRODUCT
template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
  return _lock;
}
template <class Chunk> void FreeBlockDictionary<Chunk>::set_par_lock(Mutex* lock) {
  _lock = lock;
}
template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() const {
#ifdef ASSERT
  if (ParallelGCThreads > 0) {
    Thread* my_thread = Thread::current();
    if (my_thread->is_GC_task_thread()) {
      assert(par_lock() != NULL, "Should be using locking?");
      assert_lock_strong(par_lock());
    }
  }
#endif // ASSERT
}
#endif
template class FreeBlockDictionary<Metablock>;
template class FreeBlockDictionary<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeBlockDictionary<FreeChunk>;
#endif // INCLUDE_ALL_GCS
C:\hotspot-69087d08d473\src\share\vm/memory/freeBlockDictionary.hpp
#ifndef SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
#define SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
template <class Chunk>
class FreeBlockDictionary: public CHeapObj<mtGC> {
 public:
  enum Dither {
    atLeast,
    exactly,
    roughly
  };
  enum DictionaryChoice {
    dictionaryBinaryTree = 0,
    dictionarySplayTree  = 1,
    dictionarySkipList   = 2
  };
 private:
  NOT_PRODUCT(Mutex* _lock;)
 public:
  virtual void       remove_chunk(Chunk* fc) = 0;
  virtual Chunk*     get_chunk(size_t size, Dither dither = atLeast) = 0;
  virtual void       return_chunk(Chunk* chunk) = 0;
  virtual size_t     total_chunk_size(debug_only(const Mutex* lock)) const = 0;
  virtual size_t     max_chunk_size()   const = 0;
  virtual size_t     min_size()        const = 0;
  virtual void       reset(HeapWord* addr, size_t size) = 0;
  virtual void       reset() = 0;
  virtual void       dict_census_update(size_t size, bool split, bool birth) = 0;
  virtual bool       coal_dict_over_populated(size_t size) = 0;
  virtual void       begin_sweep_dict_census(double coalSurplusPercent,
                       float inter_sweep_current, float inter_sweep_estimate,
                       float intra__sweep_current) = 0;
  virtual void       end_sweep_dict_census(double splitSurplusPercent) = 0;
  virtual Chunk*     find_largest_dict() const = 0;
  virtual bool verify_chunk_in_free_list(Chunk* tc) const = 0;
  virtual double sum_of_squared_block_sizes() const = 0;
  virtual Chunk* find_chunk_ends_at(HeapWord* target) const = 0;
  virtual void inc_total_size(size_t v) = 0;
  virtual void dec_total_size(size_t v) = 0;
  NOT_PRODUCT (
    virtual size_t   sum_dict_returned_bytes() = 0;
    virtual void     initialize_dict_returned_bytes() = 0;
    virtual size_t   total_count() = 0;
  )
  virtual void       report_statistics() const {
    gclog_or_tty->print("No statistics available");
  }
  virtual void       print_dict_census() const = 0;
  virtual void       print_free_lists(outputStream* st) const = 0;
  virtual void       verify()         const = 0;
  Mutex* par_lock()                const PRODUCT_RETURN0;
  void   set_par_lock(Mutex* lock)       PRODUCT_RETURN;
  void   verify_par_locked()       const PRODUCT_RETURN;
};
#endif // SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/freeList.cpp
#include "precompiled.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/metachunk.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // INCLUDE_ALL_GCS
template <class Chunk>
FreeList<Chunk>::FreeList() :
  _head(NULL), _tail(NULL)
#ifdef ASSERT
  , _protecting_lock(NULL)
#endif
{
  _size         = 0;
  _count        = 0;
}
template <class Chunk>
void FreeList<Chunk>::link_head(Chunk* v) {
  assert_proper_lock_protection();
  set_head(v);
  if (v != NULL) {
    v->link_prev(NULL);
  }
}
template <class Chunk>
void FreeList<Chunk>::reset() {
  set_count(0);
  set_head(NULL);
  set_tail(NULL);
}
template <class Chunk>
void FreeList<Chunk>::initialize() {
#ifdef ASSERT
  set_protecting_lock(NULL);
#endif
  reset();
  set_size(0);
}
template <class Chunk_t>
Chunk_t* FreeList<Chunk_t>::get_chunk_at_head() {
  assert_proper_lock_protection();
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  Chunk_t* fc = head();
  if (fc != NULL) {
    Chunk_t* nextFC = fc->next();
    if (nextFC != NULL) {
      nextFC->link_prev(NULL);
    } else { // removed tail of list
      link_tail(NULL);
    }
    link_head(nextFC);
    decrement_count();
  }
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  return fc;
}
template <class Chunk>
void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
  assert_proper_lock_protection();
  assert(fl->count() == 0, "Precondition");
  if (count() > 0) {
    int k = 1;
    fl->set_head(head()); n--;
    Chunk* tl = head();
    while (tl->next() != NULL && n > 0) {
      tl = tl->next(); n--; k++;
    }
    assert(tl != NULL, "Loop Inv.");
    Chunk* new_head = tl->next();
    set_head(new_head);
    set_count(count() - k);
    if (new_head == NULL) {
      set_tail(NULL);
    } else {
      new_head->link_prev(NULL);
    }
    tl->link_next(NULL);
    fl->set_tail(tl);
    fl->set_count(k);
  }
}
template <class Chunk>
void FreeList<Chunk>::remove_chunk(Chunk*fc) {
   assert_proper_lock_protection();
   assert(head() != NULL, "Remove from empty list");
   assert(fc != NULL, "Remove a NULL chunk");
   assert(size() == fc->size(), "Wrong list");
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
   Chunk* prevFC = fc->prev();
   Chunk* nextFC = fc->next();
   if (nextFC != NULL) {
     nextFC->link_prev(prevFC);
   } else { // removed tail of list
     link_tail(prevFC);
   }
   if (prevFC == NULL) { // removed head of list
     link_head(nextFC);
     assert(nextFC == NULL || nextFC->prev() == NULL,
       "Prev of head should be NULL");
   } else {
     prevFC->link_next(nextFC);
     assert(tail() != prevFC || prevFC->next() == NULL,
       "Next of tail should be NULL");
   }
   decrement_count();
   assert(((head() == NULL) + (tail() == NULL) + (count() == 0)) % 3 == 0,
          "H/T/C Inconsistency");
   NOT_PRODUCT(
     fc->link_prev(NULL);
     fc->link_next(NULL);
   )
   assert(fc->is_free(), "Should still be a free chunk");
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
   assert(head() == NULL || head()->size() == size(), "wrong item on list");
   assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
}
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
  assert_proper_lock_protection();
  assert(chunk != NULL, "insert a NULL chunk");
  assert(size() == chunk->size(), "Wrong size");
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  Chunk* oldHead = head();
  assert(chunk != oldHead, "double insertion");
  chunk->link_after(oldHead);
  link_head(chunk);
  if (oldHead == NULL) { // only chunk in list
    assert(tail() == NULL, "inconsistent FreeList");
    link_tail(chunk);
  }
  increment_count(); // of # of chunks in list
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  assert(head() == NULL || head()->size() == size(), "wrong item on list");
  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
}
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
  assert_proper_lock_protection();
  return_chunk_at_head(chunk, true);
}
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
  assert_proper_lock_protection();
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  assert(chunk != NULL, "insert a NULL chunk");
  assert(size() == chunk->size(), "wrong size");
  Chunk* oldTail = tail();
  assert(chunk != oldTail, "double insertion");
  if (oldTail != NULL) {
    oldTail->link_after(chunk);
  } else { // only chunk in list
    assert(head() == NULL, "inconsistent FreeList");
    link_head(chunk);
  }
  link_tail(chunk);
  increment_count();  // of # of chunks in list
  assert(head() == NULL || head()->prev() == NULL, "list invariant");
  assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  assert(head() == NULL || head()->size() == size(), "wrong item on list");
  assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
}
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
  return_chunk_at_tail(chunk, true);
}
template <class Chunk>
void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
  assert_proper_lock_protection();
  if (fl->count() > 0) {
    if (count() == 0) {
      set_head(fl->head());
      set_tail(fl->tail());
      set_count(fl->count());
    } else {
      Chunk* fl_tail = fl->tail();
      Chunk* this_head = head();
      assert(fl_tail->next() == NULL, "Well-formedness of fl");
      fl_tail->link_next(this_head);
      this_head->link_prev(fl_tail);
      set_head(fl->head());
      set_count(count() + fl->count());
    }
    fl->set_head(NULL);
    fl->set_tail(NULL);
    fl->set_count(0);
  }
}
template <class Chunk>
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
  guarantee(fc->size() == size(), "Wrong list is being searched");
  Chunk* curFC = head();
  while (curFC) {
    guarantee(size() == curFC->size(), "Chunk is in wrong list.");
    if (fc == curFC) {
      return true;
    }
    curFC = curFC->next();
  }
  return false;
}
#ifndef PRODUCT
template <class Chunk>
void FreeList<Chunk>::assert_proper_lock_protection_work() const {
  assert(protecting_lock() != NULL, "Don't call this directly");
  assert(ParallelGCThreads > 0, "Don't call this directly");
  Thread* thr = Thread::current();
  if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
  } else if (thr->is_GC_task_thread()) {
    assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
  } else if (thr->is_Java_thread()) {
    assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
  } else {
    ShouldNotReachHere();  // unaccounted thread type?
  }
}
#endif
template <class Chunk>
void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
  st->print("%16s\t", c);
  st->print("%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"
            "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "%14s\t"    "\n",
            "bfrsurp", "surplus", "desired", "prvSwep", "bfrSwep",
            "count",   "cBirths", "cDeaths", "sBirths", "sDeaths");
}
template <class Chunk_t>
void FreeList<Chunk_t>::print_on(outputStream* st, const char* c) const {
  if (c != NULL) {
    st->print("%16s", c);
  } else {
    st->print(SIZE_FORMAT_W(16), size());
  }
}
template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>;
#endif // INCLUDE_ALL_GCS
C:\hotspot-69087d08d473\src\share\vm/memory/freeList.hpp
#ifndef SHARE_VM_MEMORY_FREELIST_HPP
#define SHARE_VM_MEMORY_FREELIST_HPP
#include "gc_implementation/shared/allocationStats.hpp"
class CompactibleFreeListSpace;
class Mutex;
template <class Chunk_t>
class FreeList VALUE_OBJ_CLASS_SPEC {
  friend class CompactibleFreeListSpace;
  friend class VMStructs;
 private:
  Chunk_t*      _head;          // Head of list of free chunks
  Chunk_t*      _tail;          // Tail of list of free chunks
  size_t        _size;          // Size in Heap words of each chunk
  ssize_t       _count;         // Number of entries in list
 protected:
#ifdef ASSERT
  Mutex*        _protecting_lock;
#endif
  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
  void assert_proper_lock_protection() const {
#ifdef ASSERT
    if (_protecting_lock != NULL)
      assert_proper_lock_protection_work();
#endif
  }
  void increment_count()    {
    _count++;
  }
  void decrement_count() {
    _count--;
    assert(_count >= 0, "Count should not be negative");
  }
 public:
  FreeList();
  void initialize();
  void reset();
#ifdef ASSERT
  Mutex* protecting_lock() const { return _protecting_lock; }
  void set_protecting_lock(Mutex* v) {
    _protecting_lock = v;
  }
#endif
  Chunk_t* head() const {
    assert_proper_lock_protection();
    return _head;
  }
  void set_head(Chunk_t* v) {
    assert_proper_lock_protection();
    _head = v;
    assert(!_head || _head->size() == _size, "bad chunk size");
  }
  void link_head(Chunk_t* v);
  Chunk_t* tail() const {
    assert_proper_lock_protection();
    return _tail;
  }
  void set_tail(Chunk_t* v) {
    assert_proper_lock_protection();
    _tail = v;
    assert(!_tail || _tail->size() == _size, "bad chunk size");
  }
  void link_tail(Chunk_t* v) {
    assert_proper_lock_protection();
    set_tail(v);
    if (v != NULL) {
      v->clear_next();
    }
  }
  size_t size() const {
    return _size;
  }
  void set_size(size_t v) {
    assert_proper_lock_protection();
    _size = v;
  }
  ssize_t count() const { return _count; }
  void set_count(ssize_t v) { _count = v;}
  size_t get_better_size() { return size(); }
  size_t returned_bytes() const { ShouldNotReachHere(); return 0; }
  void set_returned_bytes(size_t v) {}
  void increment_returned_bytes_by(size_t v) {}
  Chunk_t* get_chunk_at_head();
  void getFirstNChunksFromList(size_t n, FreeList<Chunk_t>* fl);
  void remove_chunk(Chunk_t* fc);
  void return_chunk_at_head(Chunk_t* fc);
  void return_chunk_at_tail(Chunk_t* fc);
  void return_chunk_at_head(Chunk_t* fc, bool record_return);
  void return_chunk_at_tail(Chunk_t* fc, bool record_return);
  void prepend(FreeList<Chunk_t>* fl);
  bool verify_chunk_in_free_list(Chunk_t* fc) const;
  static void print_labels_on(outputStream* st, const char* c);
  void print_on(outputStream* st, const char* c = NULL) const;
};
#endif // SHARE_VM_MEMORY_FREELIST_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/gcLocker.cpp
#include "precompiled.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/thread.inline.hpp"
volatile jint GC_locker::_jni_lock_count = 0;
volatile bool GC_locker::_needs_gc       = false;
volatile bool GC_locker::_doing_gc       = false;
unsigned int  GC_locker::_total_collections = 0;
#ifdef ASSERT
volatile jint GC_locker::_debug_jni_lock_count = 0;
#endif
#ifdef ASSERT
void GC_locker::verify_critical_count() {
  if (SafepointSynchronize::is_at_safepoint()) {
    assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
    int count = 0;
    for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
      if (thr->in_critical()) {
        count++;
      }
    }
    if (_jni_lock_count != count) {
      tty->print_cr("critical counts don't match: %d != %d", _jni_lock_count, count);
      for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
        if (thr->in_critical()) {
          tty->print_cr(INTPTR_FORMAT " in_critical %d", p2i(thr), thr->in_critical());
        }
      }
    }
    assert(_jni_lock_count == count, "must be equal");
  }
}
#endif
bool GC_locker::check_active_before_gc() {
  assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
  if (is_active() && !_needs_gc) {
    verify_critical_count();
    _needs_gc = true;
    if (PrintJNIGCStalls && PrintGCDetails) {
      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
      gclog_or_tty->print_cr("%.3f: Setting _needs_gc. Thread \"%s\" %d locked.",
                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
    }
  }
  return is_active();
}
void GC_locker::stall_until_clear() {
  assert(!JavaThread::current()->in_critical(), "Would deadlock");
  MutexLocker   ml(JNICritical_lock);
  if (needs_gc()) {
    if (PrintJNIGCStalls && PrintGCDetails) {
      ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
      gclog_or_tty->print_cr("%.3f: Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
    }
  }
  while (needs_gc()) {
    JNICritical_lock->wait();
  }
}
bool GC_locker::should_discard(GCCause::Cause cause, uint total_collections) {
  return (cause == GCCause::_gc_locker) &&
         (_total_collections != total_collections);
}
void GC_locker::jni_lock(JavaThread* thread) {
  assert(!thread->in_critical(), "shouldn't currently be in a critical region");
  MutexLocker mu(JNICritical_lock);
  while (is_active_and_needs_gc() || _doing_gc) {
    JNICritical_lock->wait();
  }
  thread->enter_critical();
  _jni_lock_count++;
  increment_debug_jni_lock_count();
}
void GC_locker::jni_unlock(JavaThread* thread) {
  assert(thread->in_last_critical(), "should be exiting critical region");
  MutexLocker mu(JNICritical_lock);
  _jni_lock_count--;
  decrement_debug_jni_lock_count();
  thread->exit_critical();
  if (needs_gc() && !is_active_internal()) {
    _total_collections = Universe::heap()->total_collections();
    _doing_gc = true;
    {
      MutexUnlocker munlock(JNICritical_lock);
      if (PrintJNIGCStalls && PrintGCDetails) {
        ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
        gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
            gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
      }
      Universe::heap()->collect(GCCause::_gc_locker);
    }
    _doing_gc = false;
    _needs_gc = false;
    JNICritical_lock->notify_all();
  }
}
#ifdef ASSERT
No_GC_Verifier::No_GC_Verifier(bool verifygc) {
  _verifygc = verifygc;
  if (_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    _old_invocations = h->total_collections();
  }
}
No_GC_Verifier::~No_GC_Verifier() {
  if (_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    if (_old_invocations != h->total_collections()) {
      fatal("collection in a No_GC_Verifier secured function");
    }
  }
}
Pause_No_GC_Verifier::Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {
  _ngcv = ngcv;
  if (_ngcv->_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    if (_ngcv->_old_invocations != h->total_collections()) {
      fatal("collection in a No_GC_Verifier secured function");
    }
  }
}
Pause_No_GC_Verifier::~Pause_No_GC_Verifier() {
  if (_ngcv->_verifygc) {
    CollectedHeap* h = Universe::heap();
    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
    _ngcv->_old_invocations = h->total_collections();
  }
}
JRT_Leaf_Verifier::JRT_Leaf_Verifier()
  : No_Safepoint_Verifier(true, JRT_Leaf_Verifier::should_verify_GC())
{
}
JRT_Leaf_Verifier::~JRT_Leaf_Verifier()
{
}
bool JRT_Leaf_Verifier::should_verify_GC() {
  switch (JavaThread::current()->thread_state()) {
  case _thread_in_Java:
    return true;
  case _thread_in_native:
    return false;
  default:
    ShouldNotReachHere();
    return false;
  }
}
#endif
C:\hotspot-69087d08d473\src\share\vm/memory/gcLocker.hpp
#ifndef SHARE_VM_MEMORY_GCLOCKER_HPP
#define SHARE_VM_MEMORY_GCLOCKER_HPP
#include "gc_interface/collectedHeap.hpp"
#include "gc_interface/gcCause.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/universe.hpp"
#include "oops/oop.hpp"
#include "runtime/thread.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
class GC_locker: public AllStatic {
 private:
  static volatile jint _jni_lock_count;  // number of jni active instances.
  static volatile bool _needs_gc;        // heap is filling, we need a GC
  static volatile bool _doing_gc;        // unlock_critical() is doing a GC
  static uint _total_collections;        // value for _gc_locker collection
#ifdef ASSERT
  static volatile jint _debug_jni_lock_count;
#endif
  static void verify_critical_count() NOT_DEBUG_RETURN;
  static void jni_lock(JavaThread* thread);
  static void jni_unlock(JavaThread* thread);
  static bool is_active_internal() {
    verify_critical_count();
    return _jni_lock_count > 0;
  }
 public:
  static bool is_active() {
    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    return is_active_internal();
  }
  static bool needs_gc()       { return _needs_gc;                        }
  static bool is_active_and_needs_gc() {
    return needs_gc() && is_active_internal();
  }
  static void increment_debug_jni_lock_count() {
#ifdef ASSERT
    assert(_debug_jni_lock_count >= 0, "bad value");
    Atomic::inc(&_debug_jni_lock_count);
#endif
  }
  static void decrement_debug_jni_lock_count() {
#ifdef ASSERT
    assert(_debug_jni_lock_count > 0, "bad value");
    Atomic::dec(&_debug_jni_lock_count);
#endif
  }
  static void set_jni_lock_count(int count) {
    _jni_lock_count = count;
    verify_critical_count();
  }
  static bool check_active_before_gc();
  static bool should_discard(GCCause::Cause cause, uint total_collections);
  static void stall_until_clear();
  static void lock_critical(JavaThread* thread);
  static void unlock_critical(JavaThread* thread);
  static address needs_gc_address() { return (address) &_needs_gc; }
};
class No_GC_Verifier: public StackObj {
 friend class Pause_No_GC_Verifier;
 protected:
  bool _verifygc;
  unsigned int _old_invocations;
 public:
#ifdef ASSERT
  No_GC_Verifier(bool verifygc = true);
  ~No_GC_Verifier();
#else
  No_GC_Verifier(bool verifygc = true) {}
  ~No_GC_Verifier() {}
#endif
};
class Pause_No_GC_Verifier: public StackObj {
 private:
  No_GC_Verifier * _ngcv;
 public:
#ifdef ASSERT
  Pause_No_GC_Verifier(No_GC_Verifier * ngcv);
  ~Pause_No_GC_Verifier();
#else
  Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {}
  ~Pause_No_GC_Verifier() {}
#endif
};
class No_Safepoint_Verifier : public No_GC_Verifier {
 friend class Pause_No_Safepoint_Verifier;
 private:
  bool _activated;
  Thread *_thread;
 public:
#ifdef ASSERT
  No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) :
    No_GC_Verifier(verifygc),
    _activated(activated) {
    _thread = Thread::current();
    if (_activated) {
      _thread->_allow_allocation_count++;
      _thread->_allow_safepoint_count++;
    }
  }
  ~No_Safepoint_Verifier() {
    if (_activated) {
      _thread->_allow_allocation_count--;
      _thread->_allow_safepoint_count--;
    }
  }
#else
  No_Safepoint_Verifier(bool activated = true, bool verifygc = true) : No_GC_Verifier(verifygc){}
  ~No_Safepoint_Verifier() {}
#endif
};
class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier {
 private:
  No_Safepoint_Verifier * _nsv;
 public:
#ifdef ASSERT
  Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv)
    : Pause_No_GC_Verifier(nsv) {
    _nsv = nsv;
    if (_nsv->_activated) {
      _nsv->_thread->_allow_allocation_count--;
      _nsv->_thread->_allow_safepoint_count--;
    }
  }
  ~Pause_No_Safepoint_Verifier() {
    if (_nsv->_activated) {
      _nsv->_thread->_allow_allocation_count++;
      _nsv->_thread->_allow_safepoint_count++;
    }
  }
#else
  Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv)
    : Pause_No_GC_Verifier(nsv) {}
  ~Pause_No_Safepoint_Verifier() {}
#endif
};
class SkipGCALot : public StackObj {
  private:
   bool _saved;
   Thread* _t;
  public:
#ifdef ASSERT
    SkipGCALot(Thread* t) : _t(t) {
      _saved = _t->skip_gcalot();
      _t->set_skip_gcalot(true);
    }
    ~SkipGCALot() {
      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
      _t->set_skip_gcalot(_saved);
    }
#else
    SkipGCALot(Thread* t) { }
    ~SkipGCALot() { }
#endif
};
class JRT_Leaf_Verifier : public No_Safepoint_Verifier {
  static bool should_verify_GC();
 public:
#ifdef ASSERT
  JRT_Leaf_Verifier();
  ~JRT_Leaf_Verifier();
#else
  JRT_Leaf_Verifier() {}
  ~JRT_Leaf_Verifier() {}
#endif
};
class No_Alloc_Verifier : public StackObj {
 private:
  bool  _activated;
 public:
#ifdef ASSERT
  No_Alloc_Verifier(bool activated = true) {
    _activated = activated;
    if (_activated) Thread::current()->_allow_allocation_count++;
  }
  ~No_Alloc_Verifier() {
    if (_activated) Thread::current()->_allow_allocation_count--;
  }
#else
  No_Alloc_Verifier(bool activated = true) {}
  ~No_Alloc_Verifier() {}
#endif
};
#endif // SHARE_VM_MEMORY_GCLOCKER_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/gcLocker.inline.hpp
#ifndef SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP
#define SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP
#include "memory/gcLocker.hpp"
inline void GC_locker::lock_critical(JavaThread* thread) {
  if (!thread->in_critical()) {
    if (needs_gc()) {
      jni_lock(thread);
      return;
    }
    increment_debug_jni_lock_count();
  }
  thread->enter_critical();
}
inline void GC_locker::unlock_critical(JavaThread* thread) {
  if (thread->in_last_critical()) {
    if (needs_gc()) {
      jni_unlock(thread);
      return;
    }
    decrement_debug_jni_lock_count();
  }
  thread->exit_critical();
}
#endif // SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genCollectedHeap.cpp
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/vmError.hpp"
#include "utilities/workgroup.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
#endif // INCLUDE_ALL_GCS
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
#endif // INCLUDE_JFR
GenCollectedHeap* GenCollectedHeap::_gch;
NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
enum GCH_strong_roots_tasks {
  GCH_PS_Universe_oops_do,
  GCH_PS_JNIHandles_oops_do,
  GCH_PS_ObjectSynchronizer_oops_do,
  GCH_PS_FlatProfiler_oops_do,
  GCH_PS_Management_oops_do,
  GCH_PS_SystemDictionary_oops_do,
  GCH_PS_ClassLoaderDataGraph_oops_do,
  GCH_PS_jvmti_oops_do,
  GCH_PS_CodeCache_oops_do,
  GCH_PS_younger_gens,
  GCH_PS_NumElements
};
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
  SharedHeap(policy),
  _gen_policy(policy),
  _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
  _full_collections_completed(0)
{
  assert(policy != NULL, "Sanity check");
}
jint GenCollectedHeap::initialize() {
  CollectedHeap::pre_initialize();
  int i;
  _n_gens = gen_policy()->number_of_generations();
  guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  size_t gen_alignment = Generation::GenGrain;
  _gen_specs = gen_policy()->generations();
  for (i = 0; i < _n_gens; i++) {
    _gen_specs[i]->align(gen_alignment);
  }
  char* heap_address;
  size_t total_reserved = 0;
  int n_covered_regions = 0;
  ReservedSpace heap_rs;
  size_t heap_alignment = collector_policy()->heap_alignment();
  heap_address = allocate(heap_alignment, &total_reserved,
                          &n_covered_regions, &heap_rs);
  if (!heap_rs.is_reserved()) {
    vm_shutdown_during_initialization(
      "Could not reserve enough space for object heap");
    return JNI_ENOMEM;
  }
  _reserved = MemRegion((HeapWord*)heap_rs.base(),
                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
  _reserved.set_word_size(0);
  _reserved.set_start((HeapWord*)heap_rs.base());
  size_t actual_heap_size = heap_rs.size();
  _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
  _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
  set_barrier_set(rem_set()->bs());
  _gch = this;
  for (i = 0; i < _n_gens; i++) {
    ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
    _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
    heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
  }
  clear_incremental_collection_failed();
#if INCLUDE_ALL_GCS
  if (collector_policy()->is_concurrent_mark_sweep_policy()) {
    bool success = create_cms_collector();
    if (!success) return JNI_ENOMEM;
  }
#endif // INCLUDE_ALL_GCS
  return JNI_OK;
}
char* GenCollectedHeap::allocate(size_t alignment,
                                 size_t* _total_reserved,
                                 int* _n_covered_regions,
                                 ReservedSpace* heap_rs){
  const char overflow_msg[] = "The size of the object heap + VM data exceeds "
    "the maximum representable size";
  size_t total_reserved = 0;
  int n_covered_regions = 0;
  const size_t pageSize = UseLargePages ?
      os::large_page_size() : os::vm_page_size();
  assert(alignment % pageSize == 0, "Must be");
  for (int i = 0; i < _n_gens; i++) {
    total_reserved += _gen_specs[i]->max_size();
    if (total_reserved < _gen_specs[i]->max_size()) {
      vm_exit_during_initialization(overflow_msg);
    }
    n_covered_regions += _gen_specs[i]->n_covered_regions();
  }
  assert(total_reserved % alignment == 0,
         err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
                 SIZE_FORMAT, total_reserved, alignment));
  n_covered_regions += 2;
  return heap_rs->base();
}
void GenCollectedHeap::post_initialize() {
  SharedHeap::post_initialize();
  TwoGenerationCollectorPolicy *policy =
    (TwoGenerationCollectorPolicy *)collector_policy();
  guarantee(policy->is_two_generation_policy(), "Illegal policy type");
  DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
  assert(def_new_gen->kind() == Generation::DefNew ||
         def_new_gen->kind() == Generation::ParNew ||
         def_new_gen->kind() == Generation::ASParNew,
         "Wrong generation kind");
  Generation* old_gen = get_gen(1);
  assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
         old_gen->kind() == Generation::ASConcurrentMarkSweep ||
         old_gen->kind() == Generation::MarkSweepCompact,
    "Wrong generation kind");
  policy->initialize_size_policy(def_new_gen->eden()->capacity(),
                                 old_gen->capacity(),
                                 def_new_gen->from()->capacity());
  policy->initialize_gc_policy_counters();
}
void GenCollectedHeap::ref_processing_init() {
  SharedHeap::ref_processing_init();
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->ref_processor_init();
  }
}
size_t GenCollectedHeap::capacity() const {
  size_t res = 0;
  for (int i = 0; i < _n_gens; i++) {
    res += _gens[i]->capacity();
  }
  return res;
}
size_t GenCollectedHeap::used() const {
  size_t res = 0;
  for (int i = 0; i < _n_gens; i++) {
    res += _gens[i]->used();
  }
  return res;
}
void GenCollectedHeap::save_used_regions(int level) {
  assert(level < _n_gens, "Illegal level parameter");
  for (int i = level; i >= 0; i--) {
    _gens[i]->save_used_region();
  }
}
size_t GenCollectedHeap::max_capacity() const {
  size_t res = 0;
  for (int i = 0; i < _n_gens; i++) {
    res += _gens[i]->max_capacity();
  }
  return res;
}
unsigned int GenCollectedHeap::update_full_collections_completed() {
  MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  assert(_full_collections_completed <= _total_full_collections,
         "Can't complete more collections than were started");
  _full_collections_completed = _total_full_collections;
  ml.notify_all();
  return _full_collections_completed;
}
unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
  MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  assert((_full_collections_completed <= _total_full_collections) &&
         (count <= _total_full_collections),
         "Can't complete more collections than were started");
  if (count > _full_collections_completed) {
    _full_collections_completed = count;
    ml.notify_all();
  }
  return _full_collections_completed;
}
#ifndef PRODUCT
void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
                                                         size_t size) {
  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
    juint* start = (juint*) (addr + skip_header_HeapWords());
    juint* end   = (juint*) (addr + size);
    for (juint* slot = start; slot < end; slot += 1) {
      assert(*slot == badHeapWordVal,
             "Found non badHeapWordValue in pre-allocation check");
    }
  }
}
#endif
HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
                                               bool is_tlab,
                                               bool first_only) {
  HeapWord* res;
  for (int i = 0; i < _n_gens; i++) {
    if (_gens[i]->should_allocate(size, is_tlab)) {
      res = _gens[i]->allocate(size, is_tlab);
      if (res != NULL) return res;
      else if (first_only) break;
    }
  }
  return NULL;
}
HeapWord* GenCollectedHeap::mem_allocate(size_t size,
                                         bool* gc_overhead_limit_was_exceeded) {
  return collector_policy()->mem_allocate_work(size,
                                               false /* is_tlab */,
                                               gc_overhead_limit_was_exceeded);
}
bool GenCollectedHeap::must_clear_all_soft_refs() {
  return _gc_cause == GCCause::_last_ditch_collection;
}
bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  return UseConcMarkSweepGC &&
         ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
          (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
}
void GenCollectedHeap::do_collection(bool  full,
                                     bool   clear_all_soft_refs,
                                     size_t size,
                                     bool   is_tlab,
                                     int    max_level) {
  bool prepared_for_verification = false;
  ResourceMark rm;
  DEBUG_ONLY(Thread* my_thread = Thread::current();)
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(my_thread->is_VM_thread() ||
         my_thread->is_ConcurrentGC_thread(),
         "incorrect thread type capability");
  assert(Heap_lock->is_locked(),
         "the requesting thread should have the Heap_lock");
  guarantee(!is_gc_active(), "collection is not reentrant");
  assert(max_level < n_gens(), "sanity check");
  if (GC_locker::check_active_before_gc()) {
    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  }
  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
                          collector_policy()->should_clear_all_soft_refs();
  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  const size_t metadata_prev_used = MetaspaceAux::used_bytes();
  print_heap_before_gc();
  {
    FlagSetting fl(_is_gc_active, true);
    bool complete = full && (max_level == (n_gens()-1));
    const char* gc_cause_prefix = complete ? "Full GC" : "GC";
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
    gc_prologue(complete);
    increment_total_collections(complete);
    size_t gch_prev_used = used();
    int starting_level = 0;
    if (full) {
      for (int i = max_level; i >= 0; i--) {
        if (_gens[i]->full_collects_younger_generations()) {
          starting_level = i;
          break;
        }
      }
    }
    bool must_restore_marks_for_biased_locking = false;
    int max_level_collected = starting_level;
    for (int i = starting_level; i <= max_level; i++) {
      if (_gens[i]->should_collect(full, size, is_tlab)) {
        if (i == n_gens() - 1) {  // a major collection is to happen
          if (!complete) {
            increment_total_full_collections();
          }
          pre_full_gc_dump(NULL);    // do any pre full gc dumps
        }
        GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
        TraceCollectorStats tcs(_gens[i]->counters());
        TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
        size_t prev_used = _gens[i]->used();
        _gens[i]->stat_record()->invocations++;
        _gens[i]->stat_record()->accumulated_time.start();
        record_gen_tops_before_GC();
        if (PrintGC && Verbose) {
          gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
                     i,
                     _gens[i]->stat_record()->invocations,
                     size*HeapWordSize);
        }
        if (VerifyBeforeGC && i >= VerifyGCLevel &&
            total_collections() >= VerifyGCStartAt) {
          HandleMark hm;  // Discard invalid handles created during verification
          if (!prepared_for_verification) {
            prepare_for_verify();
            prepared_for_verification = true;
          }
          Universe::verify(" VerifyBeforeGC:");
        }
        COMPILER2_PRESENT(DerivedPointerTable::clear());
        if (!must_restore_marks_for_biased_locking &&
            _gens[i]->performs_in_place_marking()) {
          must_restore_marks_for_biased_locking = true;
          BiasedLocking::preserve_marks();
        }
        {
          HandleMark hm;  // Discard invalid handles created during gc
          save_marks();   // save marks for all gens
          ReferenceProcessor* rp = _gens[i]->ref_processor();
          if (rp->discovery_is_atomic()) {
            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
            rp->setup_policy(do_clear_all_soft_refs);
          } else {
          }
          _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
          if (!rp->enqueuing_is_done()) {
            rp->enqueue_discovered_references();
          } else {
            rp->set_enqueuing_is_done(false);
          }
          rp->verify_no_references_recorded();
        }
        max_level_collected = i;
        if (size > 0) {
          if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
            if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
              size = 0;
            }
          }
        }
        COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
        _gens[i]->stat_record()->accumulated_time.stop();
        update_gc_stats(i, full);
        if (VerifyAfterGC && i >= VerifyGCLevel &&
            total_collections() >= VerifyGCStartAt) {
          HandleMark hm;  // Discard invalid handles created during verification
          Universe::verify(" VerifyAfterGC:");
        }
        if (PrintGCDetails) {
          gclog_or_tty->print(":");
          _gens[i]->print_heap_change(prev_used);
        }
      }
    }
    complete = complete || (max_level_collected == n_gens() - 1);
    if (complete) { // We did a "major" collection
      post_full_gc_dump(NULL);   // do any post full gc dumps
    }
    if (PrintGCDetails) {
      print_heap_change(gch_prev_used);
      if (complete) {
        MetaspaceAux::print_metaspace_change(metadata_prev_used);
      }
    }
    for (int j = max_level_collected; j >= 0; j -= 1) {
      _gens[j]->compute_new_size();
    }
    if (complete) {
      ClassLoaderDataGraph::purge();
      MetaspaceAux::verify_metrics();
      MetaspaceGC::compute_new_size();
      update_full_collections_completed();
    }
    MemoryService::track_memory_usage();
    gc_epilogue(complete);
    if (must_restore_marks_for_biased_locking) {
      BiasedLocking::restore_marks();
    }
  }
  AdaptiveSizePolicy* sp = gen_policy()->size_policy();
  AdaptiveSizePolicyOutput(sp, total_collections());
  print_heap_after_gc();
#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif
}
HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
  return collector_policy()->satisfy_failed_allocation(size, is_tlab);
}
void GenCollectedHeap::set_par_threads(uint t) {
  SharedHeap::set_par_threads(t);
  set_n_termination(t);
}
void GenCollectedHeap::set_n_termination(uint t) {
  _process_strong_tasks->set_n_threads(t);
}
#ifdef ASSERT
class AssertNonScavengableClosure: public OopClosure {
public:
  virtual void do_oop(oop* p) {
    assert(!Universe::heap()->is_in_partial_collection(*p),
      "Referent should not be scavengable.");  }
  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
#endif
void GenCollectedHeap::process_roots(bool activate_scope,
                                     ScanningOption so,
                                     OopClosure* strong_roots,
                                     OopClosure* weak_roots,
                                     CLDClosure* strong_cld_closure,
                                     CLDClosure* weak_cld_closure,
                                     CodeBlobToOopClosure* code_roots) {
  StrongRootsScope srs(this, activate_scope);
  assert(_strong_roots_parity != 0, "must have called prologue code");
  assert(code_roots != NULL, "code root closure should always be set");
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
    ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
  }
  CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
  CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
    Universe::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
    JNIHandles::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
    ObjectSynchronizer::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
    FlatProfiler::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
    Management::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
    JvmtiExport::oops_do(strong_roots);
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
  }
  if (weak_roots != NULL) {
    if (CollectedHeap::use_parallel_gc_threads()) {
      StringTable::possibly_parallel_oops_do(weak_roots);
    } else {
      StringTable::oops_do(weak_roots);
    }
  }
  if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
    if (so & SO_ScavengeCodeCache) {
      assert(code_roots != NULL, "must supply closure for code cache");
      CodeCache::scavenge_root_nmethods_do(code_roots);
    }
    if (so & SO_AllCodeCache) {
      assert(code_roots != NULL, "must supply closure for code cache");
      CodeCache::blobs_do(code_roots);
    }
    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
  }
}
void GenCollectedHeap::gen_process_roots(int level,
                                         bool younger_gens_as_roots,
                                         bool activate_scope,
                                         ScanningOption so,
                                         bool only_strong_roots,
                                         OopsInGenClosure* not_older_gens,
                                         OopsInGenClosure* older_gens,
                                         CLDClosure* cld_closure) {
  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
  bool is_moving_collection = false;
  if (level == 0 || is_adjust_phase) {
    is_moving_collection = true;
  }
  MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
  process_roots(activate_scope, so,
                not_older_gens, weak_roots,
                cld_closure, weak_cld_closure,
                &mark_code_closure);
  if (younger_gens_as_roots) {
    if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
      for (int i = 0; i < level; i++) {
        not_older_gens->set_generation(_gens[i]);
        _gens[i]->oop_iterate(not_older_gens);
      }
      not_older_gens->reset_generation();
    }
  }
  for (int i = level+1; i < _n_gens; i++) {
    older_gens->set_generation(_gens[i]);
    rem_set()->younger_refs_iterate(_gens[i], older_gens);
    older_gens->reset_generation();
  }
  _process_strong_tasks->all_tasks_completed();
}
void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
  JNIHandles::weak_oops_do(root_closure);
  JFR_ONLY(Jfr::weak_oops_do(root_closure));
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->ref_processor()->weak_oops_do(root_closure);
  }
}
#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
void GenCollectedHeap::                                                 \
oop_since_save_marks_iterate(int level,                                 \
                             OopClosureType* cur,                       \
                             OopClosureType* older) {                   \
  _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
  for (int i = level+1; i < n_gens(); i++) {                            \
    _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
  }                                                                     \
}
ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
  for (int i = level; i < _n_gens; i++) {
    if (!_gens[i]->no_allocs_since_save_marks()) return false;
  }
  return true;
}
bool GenCollectedHeap::supports_inline_contig_alloc() const {
  return _gens[0]->supports_inline_contig_alloc();
}
HeapWord** GenCollectedHeap::top_addr() const {
  return _gens[0]->top_addr();
}
HeapWord** GenCollectedHeap::end_addr() const {
  return _gens[0]->end_addr();
}
void GenCollectedHeap::collect(GCCause::Cause cause) {
  if (should_do_concurrent_full_gc(cause)) {
#if INCLUDE_ALL_GCS
    collect_mostly_concurrent(cause);
#else  // INCLUDE_ALL_GCS
    ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
  } else if ((cause == GCCause::_wb_young_gc) ||
             (cause == GCCause::_gc_locker)) {
    collect(cause, 0);
  } else {
#ifdef ASSERT
  if (cause == GCCause::_scavenge_alot) {
    collect(cause, 0);
  } else {
    collect(cause, n_gens() - 1);
  }
#else
    collect(cause, n_gens() - 1);
#endif
  }
}
void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  MutexLocker ml(Heap_lock);
  collect_locked(cause, max_level);
}
void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
  assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
  collect_locked(cause, n_gens() - 1);
}
void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
  unsigned int gc_count_before      = total_collections();
  unsigned int full_gc_count_before = total_full_collections();
  if (GC_locker::should_discard(cause, gc_count_before)) {
    return;
  }
  {
    MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
    VM_GenCollectFull op(gc_count_before, full_gc_count_before,
                         cause, max_level);
    VMThread::execute(&op);
  }
}
#if INCLUDE_ALL_GCS
bool GenCollectedHeap::create_cms_collector() {
  assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
         (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
         "Unexpected generation kinds");
  NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
  CMSCollector* collector = new CMSCollector(
    (ConcurrentMarkSweepGeneration*)_gens[1],
    _rem_set->as_CardTableRS(),
    (ConcurrentMarkSweepPolicy*) collector_policy());
  if (collector == NULL || !collector->completed_initialization()) {
    if (collector) {
      delete collector;  // Be nice in embedded situation
    }
    vm_shutdown_during_initialization("Could not create CMS collector");
    return false;
  }
  return true;  // success
}
void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
  MutexLocker ml(Heap_lock);
  unsigned int full_gc_count_before = total_full_collections();
  unsigned int gc_count_before      = total_collections();
  {
    MutexUnlocker mu(Heap_lock);
    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
    VMThread::execute(&op);
  }
}
#endif // INCLUDE_ALL_GCS
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
   do_full_collection(clear_all_soft_refs, _n_gens - 1);
}
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
                                          int max_level) {
  do_collection(true                 /* full */,
                clear_all_soft_refs  /* clear_all_soft_refs */,
                0                    /* size */,
                false                /* is_tlab */,
                max_level            /* max_level */);
  if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
    if (PrintGCDetails) {
      gclog_or_tty->print_cr("GC locker: Trying a full collection "
                             "because scavenge failed");
    }
    do_collection(true                 /* full */,
                  clear_all_soft_refs  /* clear_all_soft_refs */,
                  0                    /* size */,
                  false                /* is_tlab */,
                  n_gens() - 1         /* max_level */);
  }
}
bool GenCollectedHeap::is_in_young(oop p) {
  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
  assert(result == _gens[0]->is_in_reserved(p),
         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
  return result;
}
bool GenCollectedHeap::is_in(const void* p) const {
  #ifndef ASSERT
  guarantee(VerifyBeforeGC      ||
            VerifyDuringGC      ||
            VerifyBeforeExit    ||
            VerifyDuringStartup ||
            PrintAssembly       ||
            tty->count() != 0   ||   // already printing
            VerifyAfterGC       ||
    VMError::fatal_error_in_progress(), "too expensive");
  #endif
  for (int i = 0; i < _n_gens; i++) {
    if (_gens[i]->is_in(p)) return true;
  }
  return false;
}
#ifdef ASSERT
bool GenCollectedHeap::is_in_partial_collection(const void* p) {
  assert(is_in_reserved(p) || p == NULL,
    "Does not work if address is non-null and outside of the heap");
  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
}
#endif
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->oop_iterate(cl);
  }
}
void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->object_iterate(cl);
  }
}
void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->safe_object_iterate(cl);
  }
}
Space* GenCollectedHeap::space_containing(const void* addr) const {
  for (int i = 0; i < _n_gens; i++) {
    Space* res = _gens[i]->space_containing(addr);
    if (res != NULL) return res;
  }
  assert(false, "Could not find containing space");
  return NULL;
}
HeapWord* GenCollectedHeap::block_start(const void* addr) const {
  assert(is_in_reserved(addr), "block_start of address outside of heap");
  for (int i = 0; i < _n_gens; i++) {
    if (_gens[i]->is_in_reserved(addr)) {
      assert(_gens[i]->is_in(addr),
             "addr should be in allocated part of generation");
      return _gens[i]->block_start(addr);
    }
  }
  assert(false, "Some generation should contain the address");
  return NULL;
}
size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
  assert(is_in_reserved(addr), "block_size of address outside of heap");
  for (int i = 0; i < _n_gens; i++) {
    if (_gens[i]->is_in_reserved(addr)) {
      assert(_gens[i]->is_in(addr),
             "addr should be in allocated part of generation");
      return _gens[i]->block_size(addr);
    }
  }
  assert(false, "Some generation should contain the address");
  return 0;
}
bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
  assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
  assert(block_start(addr) == addr, "addr must be a block start");
  for (int i = 0; i < _n_gens; i++) {
    if (_gens[i]->is_in_reserved(addr)) {
      return _gens[i]->block_is_obj(addr);
    }
  }
  assert(false, "Some generation should contain the address");
  return false;
}
bool GenCollectedHeap::supports_tlab_allocation() const {
  for (int i = 0; i < _n_gens; i += 1) {
    if (_gens[i]->supports_tlab_allocation()) {
      return true;
    }
  }
  return false;
}
size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
  size_t result = 0;
  for (int i = 0; i < _n_gens; i += 1) {
    if (_gens[i]->supports_tlab_allocation()) {
      result += _gens[i]->tlab_capacity();
    }
  }
  return result;
}
size_t GenCollectedHeap::tlab_used(Thread* thr) const {
  size_t result = 0;
  for (int i = 0; i < _n_gens; i += 1) {
    if (_gens[i]->supports_tlab_allocation()) {
      result += _gens[i]->tlab_used();
    }
  }
  return result;
}
size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  size_t result = 0;
  for (int i = 0; i < _n_gens; i += 1) {
    if (_gens[i]->supports_tlab_allocation()) {
      result += _gens[i]->unsafe_max_tlab_alloc();
    }
  }
  return result;
}
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
  bool gc_overhead_limit_was_exceeded;
  return collector_policy()->mem_allocate_work(size /* size */,
                                               true /* is_tlab */,
                                               &gc_overhead_limit_was_exceeded);
}
static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
  bool first = true;
  size_t min_size = 0;   // "first" makes this conceptually infinite.
  ScratchBlock **smallest_ptr, *smallest;
  ScratchBlock  *cur = *prev_ptr;
  while (cur) {
    assert(*prev_ptr == cur, "just checking");
    if (first || cur->num_words < min_size) {
      smallest_ptr = prev_ptr;
      smallest     = cur;
      min_size     = smallest->num_words;
      first        = false;
    }
    prev_ptr = &cur->next;
    cur     =  cur->next;
  }
  smallest      = *smallest_ptr;
  return smallest;
}
static void sort_scratch_list(ScratchBlock*& list) {
  ScratchBlock* sorted = NULL;
  ScratchBlock* unsorted = list;
  while (unsorted) {
    ScratchBlock *smallest = removeSmallestScratch(&unsorted);
    smallest->next  = sorted;
    sorted          = smallest;
  }
  list = sorted;
}
ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
                                               size_t max_alloc_words) {
  ScratchBlock* res = NULL;
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
  }
  sort_scratch_list(res);
  return res;
}
void GenCollectedHeap::release_scratch() {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->reset_scratch();
  }
}
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
  void do_generation(Generation* gen) {
    gen->prepare_for_verify();
  }
};
void GenCollectedHeap::prepare_for_verify() {
  ensure_parsability(false);        // no need to retire TLABs
  GenPrepareForVerifyClosure blk;
  generation_iterate(&blk, false);
}
void GenCollectedHeap::generation_iterate(GenClosure* cl,
                                          bool old_to_young) {
  if (old_to_young) {
    for (int i = _n_gens-1; i >= 0; i--) {
      cl->do_generation(_gens[i]);
    }
  } else {
    for (int i = 0; i < _n_gens; i++) {
      cl->do_generation(_gens[i]);
    }
  }
}
void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->space_iterate(cl, true);
  }
}
bool GenCollectedHeap::is_maximal_no_gc() const {
  for (int i = 0; i < _n_gens; i++) {
    if (!_gens[i]->is_maximal_no_gc()) {
      return false;
    }
  }
  return true;
}
void GenCollectedHeap::save_marks() {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->save_marks();
  }
}
GenCollectedHeap* GenCollectedHeap::heap() {
  assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  return _gch;
}
void GenCollectedHeap::prepare_for_compaction() {
  guarantee(_n_gens == 2, "Wrong number of generations");
  Generation* old_gen = _gens[1];
  CompactPoint cp(old_gen);
  old_gen->prepare_for_compaction(&cp);
  Generation* young_gen = _gens[0];
  young_gen->prepare_for_compaction(&cp);
}
GCStats* GenCollectedHeap::gc_stats(int level) const {
  return _gens[level]->gc_stats();
}
void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
  for (int i = _n_gens-1; i >= 0; i--) {
    Generation* g = _gens[i];
    if (!silent) {
      gclog_or_tty->print("%s", g->name());
      gclog_or_tty->print(" ");
    }
    g->verify();
  }
  if (!silent) {
    gclog_or_tty->print("remset ");
  }
  rem_set()->verify();
}
void GenCollectedHeap::print_on(outputStream* st) const {
  for (int i = 0; i < _n_gens; i++) {
    _gens[i]->print_on(st);
  }
  MetaspaceAux::print_on(st);
}
void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  if (workers() != NULL) {
    workers()->threads_do(tc);
  }
#if INCLUDE_ALL_GCS
  if (UseConcMarkSweepGC) {
    ConcurrentMarkSweepThread::threads_do(tc);
  }
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
#if INCLUDE_ALL_GCS
  if (UseParNewGC) {
    workers()->print_worker_threads_on(st);
  }
  if (UseConcMarkSweepGC) {
    ConcurrentMarkSweepThread::print_all_on(st);
  }
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_on_error(outputStream* st) const {
  this->CollectedHeap::print_on_error(st);
#if INCLUDE_ALL_GCS
  if (UseConcMarkSweepGC) {
    st->cr();
    CMSCollector::print_on_error(st);
  }
#endif // INCLUDE_ALL_GCS
}
void GenCollectedHeap::print_tracing_info() const {
  if (TraceGen0Time) {
    get_gen(0)->print_summary_info();
  }
  if (TraceGen1Time) {
    get_gen(1)->print_summary_info();
  }
}
void GenCollectedHeap::print_heap_change(size_t prev_used) const {
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(" "  SIZE_FORMAT
                        "->" SIZE_FORMAT
                        "("  SIZE_FORMAT ")",
                        prev_used, used(), capacity());
  } else {
    gclog_or_tty->print(" "  SIZE_FORMAT "K"
                        "->" SIZE_FORMAT "K"
                        "("  SIZE_FORMAT "K)",
                        prev_used / K, used() / K, capacity() / K);
  }
}
class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
 private:
  bool _full;
 public:
  void do_generation(Generation* gen) {
    gen->gc_prologue(_full);
  }
  GenGCPrologueClosure(bool full) : _full(full) {};
};
void GenCollectedHeap::gc_prologue(bool full) {
  assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  always_do_update_barrier = false;
  CollectedHeap::accumulate_statistics_all_tlabs();
  ensure_parsability(true);   // retire TLABs
  GenGCPrologueClosure blk(full);
  generation_iterate(&blk, false);  // not old-to-young.
};
class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
 private:
  bool _full;
 public:
  void do_generation(Generation* gen) {
    gen->gc_epilogue(_full);
  }
  GenGCEpilogueClosure(bool full) : _full(full) {};
};
void GenCollectedHeap::gc_epilogue(bool full) {
#ifdef COMPILER2
  assert(DerivedPointerTable::is_empty(), "derived pointer present");
  size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
  guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
#endif /* COMPILER2 */
  resize_all_tlabs();
  GenGCEpilogueClosure blk(full);
  generation_iterate(&blk, false);  // not old-to-young.
  if (!CleanChunkPoolAsync) {
    Chunk::clean_chunk_pool();
  }
  MetaspaceCounters::update_performance_counters();
  CompressedClassSpaceCounters::update_performance_counters();
  always_do_update_barrier = UseConcMarkSweepGC;
};
#ifndef PRODUCT
class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
 private:
 public:
  void do_generation(Generation* gen) {
    gen->record_spaces_top();
  }
};
void GenCollectedHeap::record_gen_tops_before_GC() {
  if (ZapUnusedHeapArea) {
    GenGCSaveTopsBeforeGCClosure blk;
    generation_iterate(&blk, false);  // not old-to-young.
  }
}
#endif  // not PRODUCT
class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
 public:
  void do_generation(Generation* gen) {
    gen->ensure_parsability();
  }
};
void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
  CollectedHeap::ensure_parsability(retire_tlabs);
  GenEnsureParsabilityClosure ep_cl;
  generation_iterate(&ep_cl, false);
}
oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
                                              oop obj,
                                              size_t obj_size) {
  guarantee(old_gen->level() == 1, "We only get here with an old generation");
  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  HeapWord* result = NULL;
  result = old_gen->expand_and_allocate(obj_size, false);
  if (result != NULL) {
    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
  }
  return oop(result);
}
class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
  jlong _time;   // in ms
  jlong _now;    // in ms
 public:
  GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
  jlong time() { return _time; }
  void do_generation(Generation* gen) {
    _time = MIN2(_time, gen->time_of_last_gc(_now));
  }
};
jlong GenCollectedHeap::millis_since_last_gc() {
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  GenTimeOfLastGCClosure tolgc_cl(now);
  generation_iterate(&tolgc_cl, false);
  jlong retVal = now - tolgc_cl.time();
  if (retVal < 0) {
    NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);)
    return 0;
  }
  return retVal;
}
C:\hotspot-69087d08d473\src\share\vm/memory/genCollectedHeap.hpp
#ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
#define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/generation.hpp"
#include "memory/sharedHeap.hpp"
class SubTasksDone;
class GenCollectedHeap : public SharedHeap {
  friend class GenCollectorPolicy;
  friend class Generation;
  friend class DefNewGeneration;
  friend class TenuredGeneration;
  friend class ConcurrentMarkSweepGeneration;
  friend class CMSCollector;
  friend class GenMarkSweep;
  friend class VM_GenCollectForAllocation;
  friend class VM_GenCollectFull;
  friend class VM_GenCollectFullConcurrent;
  friend class VM_GC_HeapInspection;
  friend class VM_HeapDumper;
  friend class HeapInspection;
  friend class GCCauseSetter;
  friend class VMStructs;
public:
  enum SomeConstants {
    max_gens = 10
  };
  friend class VM_PopulateDumpSharedSpace;
 protected:
  static GenCollectedHeap* _gch;
 private:
  int _n_gens;
  Generation* _gens[max_gens];
  GenerationSpec** _gen_specs;
  GenCollectorPolicy* _gen_policy;
  bool _incremental_collection_failed;
  unsigned int _full_collections_completed;
  SubTasksDone* _process_strong_tasks;
  NOT_PRODUCT(static size_t _skip_header_HeapWords;)
protected:
  HeapWord* attempt_allocation(size_t size,
                               bool   is_tlab,
                               bool   first_only);
  void do_collection(bool   full,
                     bool   clear_all_soft_refs,
                     size_t size,
                     bool   is_tlab,
                     int    max_level);
  HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
  virtual void do_full_collection(bool clear_all_soft_refs);
  void do_full_collection(bool clear_all_soft_refs, int max_level);
  bool must_clear_all_soft_refs();
public:
  GenCollectedHeap(GenCollectorPolicy *policy);
  GCStats* gc_stats(int level) const;
  virtual jint initialize();
  char* allocate(size_t alignment,
                 size_t* _total_reserved, int* _n_covered_regions,
                 ReservedSpace* heap_rs);
  void post_initialize();
  virtual void ref_processing_init();
  virtual CollectedHeap::Name kind() const {
    return CollectedHeap::GenCollectedHeap;
  }
  GenCollectorPolicy* gen_policy() const { return _gen_policy; }
  virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
  virtual AdaptiveSizePolicy* size_policy() {
    return gen_policy()->size_policy();
  }
  static size_t conservative_max_heap_alignment() {
    return Generation::GenGrain;
  }
  size_t capacity() const;
  size_t used() const;
  void save_used_regions(int level);
  size_t max_capacity() const;
  HeapWord* mem_allocate(size_t size,
                         bool*  gc_overhead_limit_was_exceeded);
  bool supports_inline_contig_alloc() const;
  HeapWord** top_addr() const;
  HeapWord** end_addr() const;
  virtual bool supports_heap_inspection() const { return true; }
  void collect(GCCause::Cause cause);
  void collect_locked(GCCause::Cause cause);
  void collect(GCCause::Cause cause, int max_level);
  bool is_in(const void* p) const;
  bool is_in_closed_subset(const void* p) const {
    if (UseConcMarkSweepGC) {
      return is_in_reserved(p);
    } else {
      return is_in(p);
    }
  }
  bool is_in_young(oop p);
#ifdef ASSERT
  virtual bool is_in_partial_collection(const void* p);
#endif
  virtual bool is_scavengable(const void* addr) {
    return is_in_young((oop)addr);
  }
  void oop_iterate(ExtendedOopClosure* cl);
  void object_iterate(ObjectClosure* cl);
  void safe_object_iterate(ObjectClosure* cl);
  Space* space_containing(const void* addr) const;
  virtual HeapWord* block_start(const void* addr) const;
  virtual size_t block_size(const HeapWord* addr) const;
  virtual bool block_is_obj(const HeapWord* addr) const;
  virtual bool supports_tlab_allocation() const;
  virtual size_t tlab_capacity(Thread* thr) const;
  virtual size_t tlab_used(Thread* thr) const;
  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
  virtual HeapWord* allocate_new_tlab(size_t size);
  virtual bool can_elide_tlab_store_barriers() const {
    return true;
  }
  virtual bool card_mark_must_follow_store() const {
    return UseConcMarkSweepGC;
  }
  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
    return is_in_young(new_obj);
  }
  ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
  void release_scratch();
  virtual void ensure_parsability(bool retire_tlabs);
  virtual jlong millis_since_last_gc();
  unsigned int total_full_collections_completed() {
    assert(_full_collections_completed <= _total_full_collections,
           "Can't complete more collections than were started");
    return _full_collections_completed;
  }
  unsigned int update_full_collections_completed();
  unsigned int update_full_collections_completed(unsigned int count);
  void update_time_of_last_gc(jlong now) {
    for (int i = 0; i < _n_gens; i++) {
      _gens[i]->update_time_of_last_gc(now);
    }
  }
  void update_gc_stats(int current_level, bool full) {
    for (int i = 0; i < _n_gens; i++) {
      _gens[i]->update_gc_stats(current_level, full);
    }
  }
  bool no_gc_in_progress() { return !is_gc_active(); }
  void prepare_for_verify();
  void verify(bool silent, VerifyOption option);
  virtual void print_on(outputStream* st) const;
  virtual void print_gc_threads_on(outputStream* st) const;
  virtual void gc_threads_do(ThreadClosure* tc) const;
  virtual void print_tracing_info() const;
  virtual void print_on_error(outputStream* st) const;
  void print_heap_change(size_t prev_used) const;
  class GenClosure : public StackObj {
   public:
    virtual void do_generation(Generation* gen) = 0;
  };
  void generation_iterate(GenClosure* cl, bool old_to_young);
  void space_iterate(SpaceClosure* cl);
  virtual bool is_maximal_no_gc() const;
  Generation* prev_gen(Generation* gen) const {
    int l = gen->level();
    guarantee(l > 0, "Out of bounds");
    return _gens[l-1];
  }
  Generation* next_gen(Generation* gen) const {
    int l = gen->level() + 1;
    guarantee(l < _n_gens, "Out of bounds");
    return _gens[l];
  }
  Generation* get_gen(int i) const {
    guarantee(i >= 0 && i < _n_gens, "Out of bounds");
    return _gens[i];
  }
  int n_gens() const {
    assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
    return _n_gens;
  }
  static GenCollectedHeap* heap();
  void set_par_threads(uint t);
  void set_n_termination(uint t);
  enum ScanningOption {
    SO_None                =  0x0,
    SO_AllCodeCache        =  0x8,
    SO_ScavengeCodeCache   = 0x10
  };
 private:
  void process_roots(bool activate_scope,
                     ScanningOption so,
                     OopClosure* strong_roots,
                     OopClosure* weak_roots,
                     CLDClosure* strong_cld_closure,
                     CLDClosure* weak_cld_closure,
                     CodeBlobToOopClosure* code_roots);
  void gen_process_roots(int level,
                         bool younger_gens_as_roots,
                         bool activate_scope,
                         ScanningOption so,
                         OopsInGenClosure* not_older_gens,
                         OopsInGenClosure* weak_roots,
                         OopsInGenClosure* older_gens,
                         CLDClosure* cld_closure,
                         CLDClosure* weak_cld_closure,
                         CodeBlobClosure* code_closure);
 public:
  static const bool StrongAndWeakRoots = false;
  static const bool StrongRootsOnly    = true;
  void gen_process_roots(int level,
                         bool younger_gens_as_roots,
                         bool activate_scope,
                         ScanningOption so,
                         bool only_strong_roots,
                         OopsInGenClosure* not_older_gens,
                         OopsInGenClosure* older_gens,
                         CLDClosure* cld_closure);
  void gen_process_weak_roots(OopClosure* root_closure);
  void save_marks();
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix)    \
  void oop_since_save_marks_iterate(int level,                          \
                                    OopClosureType* cur,                \
                                    OopClosureType* older);
  ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
  bool no_allocs_since_save_marks(int level);
  bool incremental_collection_will_fail(bool consult_young) {
    assert(heap()->collector_policy()->is_two_generation_policy(),
           "the following definition may not be suitable for an n(>2)-generation system");
    return incremental_collection_failed() ||
           (consult_young && !get_gen(0)->collection_attempt_is_safe());
  }
  bool incremental_collection_failed() const {
    return _incremental_collection_failed;
  }
  void set_incremental_collection_failed() {
    _incremental_collection_failed = true;
  }
  void clear_incremental_collection_failed() {
    _incremental_collection_failed = false;
  }
  oop handle_failed_promotion(Generation* old_gen,
                              oop obj,
                              size_t obj_size);
private:
  NOT_PRODUCT(
    static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
  )
  void check_for_non_bad_heap_word_value(HeapWord* addr,
    size_t size) PRODUCT_RETURN;
  void prepare_for_compaction();
  void collect_locked(GCCause::Cause cause, int max_level);
  bool create_cms_collector();
  bool should_do_concurrent_full_gc(GCCause::Cause cause);
  void collect_mostly_concurrent(GCCause::Cause cause);
  void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
  virtual void gc_prologue(bool full);
  virtual void gc_epilogue(bool full);
};
#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/generation.cpp
#include "precompiled.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.hpp"
#include "memory/generation.inline.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
  _level(level),
  _ref_processor(NULL) {
  if (!_virtual_space.initialize(rs, initial_size)) {
    vm_exit_during_initialization("Could not reserve enough space for "
                    "object heap");
  }
  if (ZapUnusedHeapArea) {
    MemRegion mangle_region((HeapWord*)_virtual_space.low(),
      (HeapWord*)_virtual_space.high());
    SpaceMangler::mangle_region(mangle_region);
  }
  _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
          (HeapWord*)_virtual_space.high_boundary());
}
GenerationSpec* Generation::spec() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
  return gch->_gen_specs[level()];
}
size_t Generation::used_stable() const {
  return used();
}
size_t Generation::max_capacity() const {
  return reserved().byte_size();
}
void Generation::print_heap_change(size_t prev_used) const {
  if (PrintGCDetails && Verbose) {
    gclog_or_tty->print(" "  SIZE_FORMAT
                        "->" SIZE_FORMAT
                        "("  SIZE_FORMAT ")",
                        prev_used, used(), capacity());
  } else {
    gclog_or_tty->print(" "  SIZE_FORMAT "K"
                        "->" SIZE_FORMAT "K"
                        "("  SIZE_FORMAT "K)",
                        prev_used / K, used() / K, capacity() / K);
  }
}
void Generation::ref_processor_init() {
  assert(_ref_processor == NULL, "a reference processor already exists");
  assert(!_reserved.is_empty(), "empty generation?");
  _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
  if (_ref_processor == NULL) {
    vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
  }
}
void Generation::print() const { print_on(tty); }
void Generation::print_on(outputStream* st)  const {
  st->print(" %-20s", name());
  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used()/K);
  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
              _virtual_space.low_boundary(),
              _virtual_space.high(),
              _virtual_space.high_boundary());
}
void Generation::print_summary_info() { print_summary_info_on(tty); }
void Generation::print_summary_info_on(outputStream* st) {
  StatRecord* sr = stat_record();
  double time = sr->accumulated_time.seconds();
  st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
               "%d GC's, avg GC time %3.7f]",
               level(), time, sr->invocations,
               sr->invocations > 0 ? time / sr->invocations : 0.0);
}
class GenerationIsInReservedClosure : public SpaceClosure {
 public:
  const void* _p;
  Space* sp;
  virtual void do_space(Space* s) {
    if (sp == NULL) {
      if (s->is_in_reserved(_p)) sp = s;
    }
  }
  GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
};
class GenerationIsInClosure : public SpaceClosure {
 public:
  const void* _p;
  Space* sp;
  virtual void do_space(Space* s) {
    if (sp == NULL) {
      if (s->is_in(_p)) sp = s;
    }
  }
  GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
};
bool Generation::is_in(const void* p) const {
  GenerationIsInClosure blk(p);
  ((Generation*)this)->space_iterate(&blk);
  return blk.sp != NULL;
}
DefNewGeneration* Generation::as_DefNewGeneration() {
  assert((kind() == Generation::DefNew) ||
         (kind() == Generation::ParNew) ||
         (kind() == Generation::ASParNew),
    "Wrong youngest generation type");
  return (DefNewGeneration*) this;
}
Generation* Generation::next_gen() const {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  int next = level() + 1;
  if (next < gch->_n_gens) {
    return gch->_gens[next];
  } else {
    return NULL;
  }
}
size_t Generation::max_contiguous_available() const {
  size_t max = 0;
  for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
    size_t avail = gen->contiguous_available();
    if (avail > max) {
      max = avail;
    }
  }
  return max;
}
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
  size_t available = max_contiguous_available();
  bool   res = (available >= max_promotion_in_bytes);
  if (PrintGC && Verbose) {
    gclog_or_tty->print_cr(
      "Generation: promo attempt is%s safe: available(" SIZE_FORMAT ") %s max_promo(" SIZE_FORMAT ")",
      res? "":" not", available, res? ">=":"<",
      max_promotion_in_bytes);
  }
  return res;
}
oop Generation::promote(oop obj, size_t obj_size) {
  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
#ifndef PRODUCT
  if (Universe::heap()->promotion_should_fail()) {
    return NULL;
  }
#endif  // #ifndef PRODUCT
  HeapWord* result = allocate(obj_size, false);
  if (result != NULL) {
    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
    return oop(result);
  } else {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    return gch->handle_failed_promotion(this, obj, obj_size);
  }
}
oop Generation::par_promote(int thread_num,
                            oop obj, markOop m, size_t word_sz) {
  ShouldNotCallThis();
  return NULL;
}
void Generation::par_promote_alloc_undo(int thread_num,
                                        HeapWord* obj, size_t word_sz) {
  guarantee(false, "No good general implementation.");
}
Space* Generation::space_containing(const void* p) const {
  GenerationIsInReservedClosure blk(p);
  ((Generation*)this)->space_iterate(&blk);
  return blk.sp;
}
class GenerationBlockStartClosure : public SpaceClosure {
 public:
  const void* _p;
  HeapWord* _start;
  virtual void do_space(Space* s) {
    if (_start == NULL && s->is_in_reserved(_p)) {
      _start = s->block_start(_p);
    }
  }
  GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
};
HeapWord* Generation::block_start(const void* p) const {
  GenerationBlockStartClosure blk(p);
  ((Generation*)this)->space_iterate(&blk);
  return blk._start;
}
class GenerationBlockSizeClosure : public SpaceClosure {
 public:
  const HeapWord* _p;
  size_t size;
  virtual void do_space(Space* s) {
    if (size == 0 && s->is_in_reserved(_p)) {
      size = s->block_size(_p);
    }
  }
  GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
};
size_t Generation::block_size(const HeapWord* p) const {
  GenerationBlockSizeClosure blk(p);
  ((Generation*)this)->space_iterate(&blk);
  assert(blk.size > 0, "seems reasonable");
  return blk.size;
}
class GenerationBlockIsObjClosure : public SpaceClosure {
 public:
  const HeapWord* _p;
  bool is_obj;
  virtual void do_space(Space* s) {
    if (!is_obj && s->is_in_reserved(_p)) {
      is_obj |= s->block_is_obj(_p);
    }
  }
  GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
};
bool Generation::block_is_obj(const HeapWord* p) const {
  GenerationBlockIsObjClosure blk(p);
  ((Generation*)this)->space_iterate(&blk);
  return blk.is_obj;
}
class GenerationOopIterateClosure : public SpaceClosure {
 public:
  ExtendedOopClosure* _cl;
  virtual void do_space(Space* s) {
    s->oop_iterate(_cl);
  }
  GenerationOopIterateClosure(ExtendedOopClosure* cl) :
    _cl(cl) {}
};
void Generation::oop_iterate(ExtendedOopClosure* cl) {
  GenerationOopIterateClosure blk(cl);
  space_iterate(&blk);
}
void Generation::younger_refs_in_space_iterate(Space* sp,
                                               OopsInGenClosure* cl) {
  GenRemSet* rs = SharedHeap::heap()->rem_set();
  rs->younger_refs_in_space_iterate(sp, cl);
}
class GenerationObjIterateClosure : public SpaceClosure {
 private:
  ObjectClosure* _cl;
 public:
  virtual void do_space(Space* s) {
    s->object_iterate(_cl);
  }
  GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
};
void Generation::object_iterate(ObjectClosure* cl) {
  GenerationObjIterateClosure blk(cl);
  space_iterate(&blk);
}
class GenerationSafeObjIterateClosure : public SpaceClosure {
 private:
  ObjectClosure* _cl;
 public:
  virtual void do_space(Space* s) {
    s->safe_object_iterate(_cl);
  }
  GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
};
void Generation::safe_object_iterate(ObjectClosure* cl) {
  GenerationSafeObjIterateClosure blk(cl);
  space_iterate(&blk);
}
void Generation::prepare_for_compaction(CompactPoint* cp) {
  CompactibleSpace* space = first_compaction_space();
  while (space != NULL) {
    space->prepare_for_compaction(cp);
    space = space->next_compaction_space();
  }
}
class AdjustPointersClosure: public SpaceClosure {
 public:
  void do_space(Space* sp) {
    sp->adjust_pointers();
  }
};
void Generation::adjust_pointers() {
  AdjustPointersClosure blk;
  space_iterate(&blk, true);
}
void Generation::compact() {
  CompactibleSpace* sp = first_compaction_space();
  while (sp != NULL) {
    sp->compact();
    sp = sp->next_compaction_space();
  }
}
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
                               int level,
                               GenRemSet* remset) :
  Generation(rs, initial_byte_size, level), _rs(remset),
  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  _used_at_prologue()
{
  HeapWord* start = (HeapWord*)rs.base();
  size_t reserved_byte_size = rs.size();
  assert((uintptr_t(start) & 3) == 0, "bad alignment");
  assert((reserved_byte_size & 3) == 0, "bad alignment");
  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
  _bts = new BlockOffsetSharedArray(reserved_mr,
                                    heap_word_size(initial_byte_size));
  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
  _rs->resize_covered_region(committed_mr);
  if (_bts == NULL)
    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  }
  _min_heap_delta_bytes = MinHeapDeltaBytes;
  _capacity_at_prologue = initial_byte_size;
  _used_at_prologue = 0;
}
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
  assert_locked_or_safepoint(Heap_lock);
  if (bytes == 0) {
    return true;  // That's what grow_by(0) would return
  }
  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
  if (aligned_bytes == 0){
    aligned_bytes = ReservedSpace::page_align_size_down(bytes);
  }
  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  bool success = false;
  if (aligned_expand_bytes > aligned_bytes) {
    success = grow_by(aligned_expand_bytes);
  }
  if (!success) {
    success = grow_by(aligned_bytes);
  }
  if (!success) {
    success = grow_to_reserved();
  }
  if (PrintGC && Verbose) {
    if (success && GC_locker::is_active_and_needs_gc()) {
      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
    }
  }
  return success;
}
void CardGeneration::clear_remembered_set() {
  _rs->clear(reserved());
}
void CardGeneration::invalidate_remembered_set() {
  _rs->invalidate(used_region());
}
void CardGeneration::compute_new_size() {
  assert(_shrink_factor <= 100, "invalid shrink factor");
  size_t current_shrink_factor = _shrink_factor;
  _shrink_factor = 0;
  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  const size_t used_after_gc = used();
  const size_t capacity_after_gc = capacity();
  const double min_tmp = used_after_gc / maximum_used_percentage;
  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
  minimum_desired_capacity = MAX2(minimum_desired_capacity,
                                  spec()->init_size());
  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  if (PrintGC && Verbose) {
    const size_t free_after_gc = free();
    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
    gclog_or_tty->print_cr("  "
                  "  minimum_free_percentage: %6.2f"
                  "  maximum_used_percentage: %6.2f",
                  minimum_free_percentage,
                  maximum_used_percentage);
    gclog_or_tty->print_cr("  "
                  "   free_after_gc   : %6.1fK"
                  "   used_after_gc   : %6.1fK"
                  "   capacity_after_gc   : %6.1fK",
                  free_after_gc / (double) K,
                  used_after_gc / (double) K,
                  capacity_after_gc / (double) K);
    gclog_or_tty->print_cr("  "
                  "   free_percentage: %6.2f",
                  free_percentage);
  }
  if (capacity_after_gc < minimum_desired_capacity) {
    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
    if (expand_bytes >= _min_heap_delta_bytes) {
      expand(expand_bytes, 0); // safe if expansion fails
    }
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("    expanding:"
                    "  minimum_desired_capacity: %6.1fK"
                    "  expand_bytes: %6.1fK"
                    "  _min_heap_delta_bytes: %6.1fK",
                    minimum_desired_capacity / (double) K,
                    expand_bytes / (double) K,
                    _min_heap_delta_bytes / (double) K);
    }
    return;
  }
  size_t shrink_bytes = 0;
  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
  if (MaxHeapFreeRatio < 100) {
    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
    const double max_tmp = used_after_gc / minimum_used_percentage;
    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
    maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                    spec()->init_size());
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("  "
                             "  maximum_free_percentage: %6.2f"
                             "  minimum_used_percentage: %6.2f",
                             maximum_free_percentage,
                             minimum_used_percentage);
      gclog_or_tty->print_cr("  "
                             "  _capacity_at_prologue: %6.1fK"
                             "  minimum_desired_capacity: %6.1fK"
                             "  maximum_desired_capacity: %6.1fK",
                             _capacity_at_prologue / (double) K,
                             minimum_desired_capacity / (double) K,
                             maximum_desired_capacity / (double) K);
    }
    assert(minimum_desired_capacity <= maximum_desired_capacity,
           "sanity check");
    if (capacity_after_gc > maximum_desired_capacity) {
      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
      if (current_shrink_factor == 0) {
        _shrink_factor = 10;
      } else {
        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
      }
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("  "
                      "  shrinking:"
                      "  initSize: %.1fK"
                      "  maximum_desired_capacity: %.1fK",
                      spec()->init_size() / (double) K,
                      maximum_desired_capacity / (double) K);
        gclog_or_tty->print_cr("  "
                      "  shrink_bytes: %.1fK"
                      "  current_shrink_factor: %d"
                      "  new shrink factor: %d"
                      "  _min_heap_delta_bytes: %.1fK",
                      shrink_bytes / (double) K,
                      current_shrink_factor,
                      _shrink_factor,
                      _min_heap_delta_bytes / (double) K);
      }
    }
  }
  if (capacity_after_gc > _capacity_at_prologue) {
    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("  "
                             "  aggressive shrinking:"
                             "  _capacity_at_prologue: %.1fK"
                             "  capacity_after_gc: %.1fK"
                             "  expansion_for_promotion: %.1fK"
                             "  shrink_bytes: %.1fK",
                             capacity_after_gc / (double) K,
                             _capacity_at_prologue / (double) K,
                             expansion_for_promotion / (double) K,
                             shrink_bytes / (double) K);
    }
  }
  if (shrink_bytes >= _min_heap_delta_bytes) {
    shrink(shrink_bytes);
  }
}
void CardGeneration::prepare_for_verify() {}
void OneContigSpaceCardGeneration::collect(bool   full,
                                           bool   clear_all_soft_refs,
                                           size_t size,
                                           bool   is_tlab) {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  SpecializationStats::clear();
  ReferenceProcessorSpanMutator
    x(ref_processor(), gch->reserved_region());
  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
  gc_timer->register_gc_start();
  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
  gc_timer->register_gc_end();
  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  SpecializationStats::print();
}
HeapWord*
OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
                                                  bool is_tlab,
                                                  bool parallel) {
  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
  if (parallel) {
    MutexLocker x(ParGCRareEvent_lock);
    HeapWord* result = NULL;
    size_t byte_size = word_size * HeapWordSize;
    while (true) {
      expand(byte_size, _min_heap_delta_bytes);
      if (GCExpandToAllocateDelayMillis > 0) {
        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
      }
      result = _the_space->par_allocate(word_size);
      if ( result != NULL) {
        return result;
      } else {
        if (_virtual_space.uncommitted_size() < byte_size) {
          return NULL;
        }
      }
    }
  } else {
    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
    return _the_space->allocate(word_size);
  }
}
bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
  GCMutexLocker x(ExpandHeap_lock);
  return CardGeneration::expand(bytes, expand_bytes);
}
void OneContigSpaceCardGeneration::shrink(size_t bytes) {
  assert_locked_or_safepoint(ExpandHeap_lock);
  size_t size = ReservedSpace::page_align_size_down(bytes);
  if (size > 0) {
    shrink_by(size);
  }
}
size_t OneContigSpaceCardGeneration::capacity() const {
  return _the_space->capacity();
}
size_t OneContigSpaceCardGeneration::used() const {
  return _the_space->used();
}
size_t OneContigSpaceCardGeneration::free() const {
  return _the_space->free();
}
MemRegion OneContigSpaceCardGeneration::used_region() const {
  return the_space()->used_region();
}
size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
  return _the_space->free();
}
size_t OneContigSpaceCardGeneration::contiguous_available() const {
  return _the_space->free() + _virtual_space.uncommitted_size();
}
bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
  assert_locked_or_safepoint(ExpandHeap_lock);
  bool result = _virtual_space.expand_by(bytes);
  if (result) {
    size_t new_word_size =
       heap_word_size(_virtual_space.committed_size());
    MemRegion mr(_the_space->bottom(), new_word_size);
    Universe::heap()->barrier_set()->resize_covered_region(mr);
    _bts->resize(new_word_size);
    if (ZapUnusedHeapArea) {
      MemRegion mangle_region(_the_space->end(),
      (HeapWord*)_virtual_space.high());
      SpaceMangler::mangle_region(mangle_region);
    }
    _the_space->set_end((HeapWord*)_virtual_space.high());
    update_counters();
    if (Verbose && PrintGC) {
      size_t new_mem_size = _virtual_space.committed_size();
      size_t old_mem_size = new_mem_size - bytes;
      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
    }
  }
  return result;
}
bool OneContigSpaceCardGeneration::grow_to_reserved() {
  assert_locked_or_safepoint(ExpandHeap_lock);
  bool success = true;
  const size_t remaining_bytes = _virtual_space.uncommitted_size();
  if (remaining_bytes > 0) {
    success = grow_by(remaining_bytes);
    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
  }
  return success;
}
void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
  assert_locked_or_safepoint(ExpandHeap_lock);
  _virtual_space.shrink_by(bytes);
  _the_space->set_end((HeapWord*) _virtual_space.high());
  size_t new_word_size = heap_word_size(_the_space->capacity());
  _bts->resize(new_word_size);
  MemRegion mr(_the_space->bottom(), new_word_size);
  Universe::heap()->barrier_set()->resize_covered_region(mr);
  if (Verbose && PrintGC) {
    size_t new_mem_size = _virtual_space.committed_size();
    size_t old_mem_size = new_mem_size + bytes;
    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
                  name(), old_mem_size/K, new_mem_size/K);
  }
}
void OneContigSpaceCardGeneration::prepare_for_verify() {}
void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
  _the_space->object_iterate(blk);
}
void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
                                                 bool usedOnly) {
  blk->do_space(_the_space);
}
void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
  blk->set_generation(this);
  younger_refs_in_space_iterate(_the_space, blk);
  blk->reset_generation();
}
void OneContigSpaceCardGeneration::save_marks() {
  _the_space->set_saved_mark();
}
void OneContigSpaceCardGeneration::reset_saved_marks() {
  _the_space->reset_saved_mark();
}
bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
  return _the_space->saved_mark_at_top();
}
#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
                                                                                \
void OneContigSpaceCardGeneration::                                             \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
  blk->set_generation(this);                                                    \
  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
  blk->reset_generation();                                                      \
  save_marks();                                                                 \
}
ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
  _last_gc = WaterMark(the_space(), the_space()->top());
  update_counters();
  if (ZapUnusedHeapArea) {
    the_space()->check_mangled_unused_area_complete();
  }
}
void OneContigSpaceCardGeneration::record_spaces_top() {
  assert(ZapUnusedHeapArea, "Not mangling unused space");
  the_space()->set_top_for_allocations();
}
void OneContigSpaceCardGeneration::verify() {
  the_space()->verify();
}
void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
  Generation::print_on(st);
  st->print("   the");
  the_space()->print_on(st);
}
C:\hotspot-69087d08d473\src\share\vm/memory/generation.hpp
#ifndef SHARE_VM_MEMORY_GENERATION_HPP
#define SHARE_VM_MEMORY_GENERATION_HPP
#include "gc_implementation/shared/collectorCounters.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/universe.hpp"
#include "memory/watermark.hpp"
#include "runtime/mutex.hpp"
#include "runtime/perfData.hpp"
#include "runtime/virtualspace.hpp"
class DefNewGeneration;
class GenerationSpec;
class CompactibleSpace;
class ContiguousSpace;
class CompactPoint;
class OopsInGenClosure;
class OopClosure;
class ScanClosure;
class FastScanClosure;
class GenCollectedHeap;
class GenRemSet;
class GCStats;
struct ScratchBlock {
  ScratchBlock* next;
  size_t num_words;
  HeapWord scratch_space[1];  // Actually, of size "num_words-2" (assuming
};
class Generation: public CHeapObj<mtGC> {
  friend class VMStructs;
 private:
  jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
  MemRegion _prev_used_region; // for collectors that want to "remember" a value for
 protected:
  MemRegion _reserved;
  VirtualSpace _virtual_space;
  int _level;
  ReferenceProcessor* _ref_processor;
  CollectorCounters* _gc_counters;
  GCStats* _gc_stats;
  Generation* next_gen() const;
  Generation(ReservedSpace rs, size_t initial_byte_size, int level);
  void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
 public:
  enum Name {
    ASParNew,
    ASConcurrentMarkSweep,
    DefNew,
    ParNew,
    MarkSweepCompact,
    ConcurrentMarkSweep,
    Other
  };
  enum SomePublicConstants {
    LogOfGenGrain = 16 ARM32_ONLY(+1),
    GenGrain = 1 << LogOfGenGrain
  };
  virtual void ref_processor_init();
  void set_ref_processor(ReferenceProcessor* rp) {
    assert(_ref_processor == NULL, "clobbering existing _ref_processor");
    _ref_processor = rp;
  }
  virtual Generation::Name kind() { return Generation::Other; }
  GenerationSpec* spec();
  virtual bool refs_discovery_is_atomic() const { return true;  }
  virtual bool refs_discovery_is_mt()     const { return false; }
  virtual size_t capacity() const = 0;  // The maximum number of object bytes the
  virtual size_t used() const = 0;      // The number of used bytes in the gen.
  virtual size_t used_stable() const;   // The number of used bytes for memory monitoring tools.
  virtual size_t free() const = 0;      // The number of free bytes in the gen.
  virtual size_t max_capacity() const;
  virtual size_t capacity_before_gc() const { return 0; }
  virtual size_t contiguous_available() const = 0;
  virtual size_t max_contiguous_available() const;
  virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
  virtual void promotion_failure_occurred() { /* does nothing */ }
  virtual size_t unsafe_max_alloc_nogc() const = 0;
  virtual bool is_maximal_no_gc() const {
    return _virtual_space.uncommitted_size() == 0;
  }
  MemRegion reserved() const { return _reserved; }
  virtual MemRegion used_region() const { return _reserved; }
  MemRegion prev_used_region() const { return _prev_used_region; }
  virtual void  save_used_region()   { _prev_used_region = used_region(); }
  virtual bool is_in(const void* p) const;
  bool is_in_reserved(const void* p) const {
    return _reserved.contains(p);
  }
  DefNewGeneration*  as_DefNewGeneration();
  virtual Space* space_containing(const void* addr) const;
  virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
  virtual CompactibleSpace* first_compaction_space() const = 0;
  virtual bool should_allocate(size_t word_size, bool is_tlab) {
    bool result = false;
    size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
    if (!is_tlab || supports_tlab_allocation()) {
      result = (word_size > 0) && (word_size < overflow_limit);
    }
    return result;
  }
  virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
  virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
  virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
                                             size_t word_size) {
    return NULL;
  }
  virtual bool supports_inline_contig_alloc() const { return false; }
  virtual HeapWord** top_addr() const { return NULL; }
  virtual HeapWord** end_addr() const { return NULL; }
  virtual bool supports_tlab_allocation() const { return false; }
  virtual size_t tlab_capacity() const {
    guarantee(false, "Generation doesn't support thread local allocation buffers");
    return 0;
  }
  virtual size_t tlab_used() const {
    guarantee(false, "Generation doesn't support thread local allocation buffers");
    return 0;
  }
  virtual size_t unsafe_max_tlab_alloc() const {
    guarantee(false, "Generation doesn't support thread local allocation buffers");
    return 0;
  }
  virtual oop promote(oop obj, size_t obj_size);
  virtual oop par_promote(int thread_num,
                          oop obj, markOop m, size_t word_sz);
  virtual void par_promote_alloc_undo(int thread_num,
                                      HeapWord* obj, size_t word_sz);
  virtual void par_promote_alloc_done(int thread_num) {}
  virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
  virtual bool full_collects_younger_generations() const { return false; }
  virtual bool performs_in_place_marking() const { return true; }
  virtual bool should_collect(bool   full,
                              size_t word_size,
                              bool   is_tlab) {
    return (full || should_allocate(word_size, is_tlab));
  }
  virtual bool collection_attempt_is_safe() {
    guarantee(false, "Are you sure you want to call this method?");
    return true;
  }
  virtual void collect(bool   full,
                       bool   clear_all_soft_refs,
                       size_t word_size,
                       bool   is_tlab) = 0;
  virtual HeapWord* expand_and_allocate(size_t word_size,
                                        bool is_tlab,
                                        bool parallel = false) = 0;
  virtual void gc_prologue(bool full) {};
  virtual void gc_epilogue(bool full) {};
  virtual void record_spaces_top() {};
  virtual void ensure_parsability() {};
  virtual jlong time_of_last_gc(jlong now) {
    NOT_PRODUCT(
      if (now < _time_of_last_gc) {
        warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, (int64_t)_time_of_last_gc, (int64_t)now);
      }
    )
    return _time_of_last_gc;
  }
  virtual void update_time_of_last_gc(jlong now)  {
    _time_of_last_gc = now;
  }
  GCStats* gc_stats() const { return _gc_stats; }
  virtual void update_gc_stats(int current_level, bool full) {}
  virtual void prepare_for_compaction(CompactPoint* cp);
  virtual void adjust_pointers();
  virtual void compact();
  virtual void post_compact() {ShouldNotReachHere();}
  virtual void* get_data_recorder(int thr_num) { return NULL; }
  virtual void sample_eden_chunk() {}
  virtual void prepare_for_verify() {};
  virtual void save_marks() {}
  virtual void reset_saved_marks() {}
  virtual bool no_allocs_since_save_marks() = 0;
  virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {    \
    oop_since_save_marks_iterate_v((OopsInGenClosure*)cl);                      \
  }
  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
#undef Generation_SINCE_SAVE_MARKS_DECL
  virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
                                  size_t max_alloc_words) {}
  virtual void reset_scratch() {};
  virtual void compute_new_size() = 0;
  virtual const char* name() const = 0;
  virtual const char* short_name() const = 0;
  int level() const { return _level; }
  virtual bool must_be_youngest() const = 0;
  virtual bool must_be_oldest() const = 0;
  ReferenceProcessor* const ref_processor() { return _ref_processor; }
  virtual void oop_iterate(ExtendedOopClosure* cl);
  virtual void object_iterate(ObjectClosure* cl);
  virtual void safe_object_iterate(ObjectClosure* cl);
  virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
  virtual void clear_remembered_set() { }
  virtual void invalidate_remembered_set() { }
  virtual HeapWord* block_start(const void* addr) const;
  virtual size_t block_size(const HeapWord* addr) const ;
  virtual bool block_is_obj(const HeapWord* addr) const;
  void print_heap_change(size_t prev_used) const;
  virtual void print() const;
  virtual void print_on(outputStream* st) const;
  virtual void verify() = 0;
  struct StatRecord {
    int invocations;
    elapsedTimer accumulated_time;
    StatRecord() :
      invocations(0),
      accumulated_time(elapsedTimer()) {}
  };
private:
  StatRecord _stat_record;
public:
  StatRecord* stat_record() { return &_stat_record; }
  virtual void print_summary_info();
  virtual void print_summary_info_on(outputStream* st);
  virtual void update_counters() = 0;
  virtual CollectorCounters* counters() { return _gc_counters; }
};
class BlockOffsetSharedArray;
class CardGeneration: public Generation {
  friend class VMStructs;
 protected:
  GenRemSet* _rs;
  BlockOffsetSharedArray* _bts;
  size_t _shrink_factor;
  size_t _min_heap_delta_bytes;   // Minimum amount to expand.
  size_t _capacity_at_prologue;
  size_t _used_at_prologue;
  CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
                 GenRemSet* remset);
 public:
  virtual bool expand(size_t bytes, size_t expand_bytes);
  virtual void shrink(size_t bytes) = 0;
  virtual void compute_new_size();
  virtual void clear_remembered_set();
  virtual void invalidate_remembered_set();
  virtual void prepare_for_verify();
  virtual bool grow_by(size_t bytes) = 0;
  virtual bool grow_to_reserved() = 0;
};
class OneContigSpaceCardGeneration: public CardGeneration {
  friend class VMStructs;
  friend class VM_PopulateDumpSharedSpace;
 protected:
  ContiguousSpace*  _the_space;       // actual space holding objects
  WaterMark  _last_gc;                // watermark between objects allocated before
  virtual bool grow_by(size_t bytes);
  virtual bool grow_to_reserved();
  void shrink_by(size_t bytes);
  virtual bool expand(size_t bytes, size_t expand_bytes);
  void shrink(size_t bytes);
  ContiguousSpace* the_space() const { return _the_space; }
 public:
  OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
                               int level, GenRemSet* remset,
                               ContiguousSpace* space) :
    CardGeneration(rs, initial_byte_size, level, remset),
    _the_space(space)
  {}
  inline bool is_in(const void* p) const;
  size_t capacity() const;
  size_t used() const;
  size_t free() const;
  MemRegion used_region() const;
  size_t unsafe_max_alloc_nogc() const;
  size_t contiguous_available() const;
  void object_iterate(ObjectClosure* blk);
  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  void younger_refs_iterate(OopsInGenClosure* blk);
  inline CompactibleSpace* first_compaction_space() const;
  virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
  virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
  inline WaterMark top_mark();
  inline WaterMark bottom_mark();
#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)      \
  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
  OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
  void save_marks();
  void reset_saved_marks();
  bool no_allocs_since_save_marks();
  inline size_t block_size(const HeapWord* addr) const;
  inline bool block_is_obj(const HeapWord* addr) const;
  virtual void collect(bool full,
                       bool clear_all_soft_refs,
                       size_t size,
                       bool is_tlab);
  HeapWord* expand_and_allocate(size_t size,
                                bool is_tlab,
                                bool parallel = false);
  virtual void prepare_for_verify();
  virtual void gc_epilogue(bool full);
  virtual void record_spaces_top();
  virtual void verify();
  virtual void print_on(outputStream* st) const;
};
#endif // SHARE_VM_MEMORY_GENERATION_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/generation.inline.hpp
#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
#include "memory/genCollectedHeap.hpp"
#include "memory/generation.hpp"
#include "memory/space.hpp"
bool OneContigSpaceCardGeneration::is_in(const void* p) const {
  return the_space()->is_in(p);
}
WaterMark OneContigSpaceCardGeneration::top_mark() {
  return the_space()->top_mark();
}
CompactibleSpace*
OneContigSpaceCardGeneration::first_compaction_space() const {
  return the_space();
}
HeapWord* OneContigSpaceCardGeneration::allocate(size_t word_size,
                                                 bool is_tlab) {
  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
  return the_space()->allocate(word_size);
}
HeapWord* OneContigSpaceCardGeneration::par_allocate(size_t word_size,
                                                     bool is_tlab) {
  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
  return the_space()->par_allocate(word_size);
}
WaterMark OneContigSpaceCardGeneration::bottom_mark() {
  return the_space()->bottom_mark();
}
size_t OneContigSpaceCardGeneration::block_size(const HeapWord* addr) const {
  if (addr < the_space()->top()) return oop(addr)->size();
  else {
    assert(addr == the_space()->top(), "non-block head arg to block_size");
    return the_space()->_end - the_space()->top();
  }
}
bool OneContigSpaceCardGeneration::block_is_obj(const HeapWord* addr) const {
  return addr < the_space()->top();
}
#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/generationSpec.cpp
#include "precompiled.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/filemap.hpp"
#include "memory/genRemSet.hpp"
#include "memory/generationSpec.hpp"
#include "memory/tenuredGeneration.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/parNew/asParNewGeneration.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#endif // INCLUDE_ALL_GCS
Generation* GenerationSpec::init(ReservedSpace rs, int level,
                                 GenRemSet* remset) {
  switch (name()) {
    case Generation::DefNew:
      return new DefNewGeneration(rs, init_size(), level);
    case Generation::MarkSweepCompact:
      return new TenuredGeneration(rs, init_size(), level, remset);
#if INCLUDE_ALL_GCS
    case Generation::ParNew:
      return new ParNewGeneration(rs, init_size(), level);
    case Generation::ASParNew:
      return new ASParNewGeneration(rs,
                                    init_size(),
                                    init_size() /* min size */,
                                    level);
    case Generation::ConcurrentMarkSweep: {
      assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
      CardTableRS* ctrs = remset->as_CardTableRS();
      if (ctrs == NULL) {
        vm_exit_during_initialization("Rem set incompatibility.");
      }
      ConcurrentMarkSweepGeneration* g = NULL;
      g = new ConcurrentMarkSweepGeneration(rs,
                 init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
                 (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
      g->initialize_performance_counters();
      return g;
    }
    case Generation::ASConcurrentMarkSweep: {
      assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
      CardTableRS* ctrs = remset->as_CardTableRS();
      if (ctrs == NULL) {
        vm_exit_during_initialization("Rem set incompatibility.");
      }
      ASConcurrentMarkSweepGeneration* g = NULL;
      g = new ASConcurrentMarkSweepGeneration(rs,
                 init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
                 (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
      g->initialize_performance_counters();
      return g;
    }
#endif // INCLUDE_ALL_GCS
    default:
      guarantee(false, "unrecognized GenerationName");
      return NULL;
  }
}
C:\hotspot-69087d08d473\src\share\vm/memory/generationSpec.hpp
#ifndef SHARE_VM_MEMORY_GENERATIONSPEC_HPP
#define SHARE_VM_MEMORY_GENERATIONSPEC_HPP
#include "memory/generation.hpp"
class GenerationSpec : public CHeapObj<mtGC> {
  friend class VMStructs;
private:
  Generation::Name _name;
  size_t           _init_size;
  size_t           _max_size;
public:
  GenerationSpec(Generation::Name name, size_t init_size, size_t max_size) {
    _name = name;
    _init_size = init_size;
    _max_size = max_size;
  }
  Generation* init(ReservedSpace rs, int level, GenRemSet* remset);
  Generation::Name name()        const { return _name; }
  size_t init_size()             const { return _init_size; }
  void set_init_size(size_t size)      { _init_size = size; }
  size_t max_size()              const { return _max_size; }
  void set_max_size(size_t size)       { _max_size = size; }
  void align(size_t alignment) {
    set_init_size(align_size_up(init_size(), alignment));
    set_max_size(align_size_up(max_size(), alignment));
  }
  virtual int n_covered_regions() const { return 1; }
};
typedef GenerationSpec* GenerationSpecPtr;
#endif // SHARE_VM_MEMORY_GENERATIONSPEC_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genMarkSweep.cpp
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/modRefBarrierSet.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/space.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
  guarantee(level == 1, "We always collect both old and young.");
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
  if (gch->collector_policy()->should_clear_all_soft_refs()) {
    assert(clear_all_softrefs, "Policy should have been checked earlier");
  }
#endif
  assert(ref_processor() == NULL, "no stomping");
  assert(rp != NULL, "should be non-NULL");
  _ref_processor = rp;
  rp->setup_policy(clear_all_softrefs);
  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
  gch->trace_heap_before_gc(_gc_tracer);
  CodeCache::gc_prologue();
  Threads::gc_prologue();
  _total_invocations++;
  size_t gch_prev_used = gch->used();
  gch->save_used_regions(level);
  allocate_stacks();
  mark_sweep_phase1(level, clear_all_softrefs);
  mark_sweep_phase2();
  COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
  mark_sweep_phase3(level);
  mark_sweep_phase4();
  restore_marks();
  gch->save_marks();
  deallocate_stacks();
  bool all_empty = true;
  for (int i = 0; all_empty && i < level; i++) {
    Generation* g = gch->get_gen(i);
    all_empty = all_empty && gch->get_gen(i)->used() == 0;
  }
  GenRemSet* rs = gch->rem_set();
  Generation* old_gen = gch->get_gen(level);
  if (all_empty) {
    rs->clear_into_younger(old_gen);
  } else {
    rs->invalidate_or_clear(old_gen);
  }
  Threads::gc_epilogue();
  CodeCache::gc_epilogue();
  JvmtiExport::gc_epilogue();
  if (PrintGC && !PrintGCDetails) {
    gch->print_heap_change(gch_prev_used);
  }
  _ref_processor = NULL;
  Universe::update_heap_info_at_gc();
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  gch->update_time_of_last_gc(now);
  gch->trace_heap_after_gc(_gc_tracer);
}
void GenMarkSweep::allocate_stacks() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0);
  if (scratch != NULL) {
    _preserved_count_max =
      scratch->num_words * HeapWordSize / sizeof(PreservedMark);
  } else {
    _preserved_count_max = 0;
  }
  _preserved_marks = (PreservedMark*)scratch;
  _preserved_count = 0;
}
void GenMarkSweep::deallocate_stacks() {
  if (!UseG1GC) {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    gch->release_scratch();
  }
  _preserved_mark_stack.clear(true);
  _preserved_oop_stack.clear(true);
  _marking_stack.clear();
  _objarray_stack.clear(true);
}
void GenMarkSweep::mark_sweep_phase1(int level,
                                  bool clear_all_softrefs) {
  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace(" 1");
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  follow_root_closure.set_orig_generation(gch->get_gen(level));
  ClassLoaderDataGraph::clear_claimed_marks();
  gch->gen_process_roots(level,
                         false, // Younger gens are not roots.
                         true,  // activate StrongRootsScope
                         GenCollectedHeap::SO_None,
                         ClassUnloading,
                         &follow_root_closure,
                         &follow_root_closure,
                         &follow_cld_closure);
  {
    ref_processor()->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
      ref_processor()->process_discovered_references(
        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
    gc_tracer()->report_gc_reference_stats(stats);
  }
  assert(_marking_stack.is_empty(), "Marking should have completed");
  bool purged_class = SystemDictionary::do_unloading(&is_alive);
  CodeCache::do_unloading(&is_alive, purged_class);
  Klass::clean_weak_klass_links(&is_alive);
  StringTable::unlink(&is_alive);
  SymbolTable::unlink();
  gc_tracer()->report_object_count_after_gc(&is_alive);
}
void GenMarkSweep::mark_sweep_phase2() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace("2");
  gch->prepare_for_compaction();
}
class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure {
public:
  void do_generation(Generation* gen) {
    gen->adjust_pointers();
  }
};
void GenMarkSweep::mark_sweep_phase3(int level) {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace("3");
  ClassLoaderDataGraph::clear_claimed_marks();
  adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
  gch->gen_process_roots(level,
                         false, // Younger gens are not roots.
                         true,  // activate StrongRootsScope
                         GenCollectedHeap::SO_AllCodeCache,
                         GenCollectedHeap::StrongAndWeakRoots,
                         &adjust_pointer_closure,
                         &adjust_pointer_closure,
                         &adjust_cld_closure);
  gch->gen_process_weak_roots(&adjust_pointer_closure);
  adjust_marks();
  GenAdjustPointersClosure blk;
  gch->generation_iterate(&blk, true);
}
class GenCompactClosure: public GenCollectedHeap::GenClosure {
public:
  void do_generation(Generation* gen) {
    gen->compact();
  }
};
void GenMarkSweep::mark_sweep_phase4() {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace("4");
  GenCompactClosure blk;
  gch->generation_iterate(&blk, true);
}
C:\hotspot-69087d08d473\src\share\vm/memory/genMarkSweep.hpp
#ifndef SHARE_VM_MEMORY_GENMARKSWEEP_HPP
#define SHARE_VM_MEMORY_GENMARKSWEEP_HPP
#include "gc_implementation/shared/markSweep.hpp"
class GenMarkSweep : public MarkSweep {
  friend class VM_MarkSweep;
  friend class G1MarkSweep;
 public:
  static void invoke_at_safepoint(int level, ReferenceProcessor* rp,
                                  bool clear_all_softrefs);
 private:
  static void mark_sweep_phase1(int level, bool clear_all_softrefs);
  static void mark_sweep_phase2();
  static void mark_sweep_phase3(int level);
  static void mark_sweep_phase4();
  static void allocate_stacks();
  static void deallocate_stacks();
};
#endif // SHARE_VM_MEMORY_GENMARKSWEEP_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genOopClosures.hpp
#ifndef SHARE_VM_MEMORY_GENOOPCLOSURES_HPP
#define SHARE_VM_MEMORY_GENOOPCLOSURES_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class Generation;
class HeapWord;
class CardTableRS;
class CardTableModRefBS;
class DefNewGeneration;
class KlassRemSet;
template<class E, MEMFLAGS F, unsigned int N> class GenericTaskQueue;
typedef GenericTaskQueue<oop, mtGC, TASKQUEUE_SIZE> OopTaskQueue;
template<class T, MEMFLAGS F> class GenericTaskQueueSet;
typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
class OopsInGenClosure : public ExtendedOopClosure {
 private:
  Generation*  _orig_gen;     // generation originally set in ctor
  Generation*  _gen;          // generation being scanned
 protected:
  HeapWord*    _gen_boundary; // start of generation
  CardTableRS* _rs;           // remembered set
  Generation* generation() { return _gen; }
  CardTableRS* rs() { return _rs; }
  template <class T> void do_barrier(T* p);
  template <class T> void par_do_barrier(T* p);
 public:
  OopsInGenClosure() : ExtendedOopClosure(NULL),
    _orig_gen(NULL), _gen(NULL), _gen_boundary(NULL), _rs(NULL) {};
  OopsInGenClosure(Generation* gen);
  void set_generation(Generation* gen);
  void reset_generation() { _gen = _orig_gen; }
  void set_orig_generation(Generation* gen) {
    _orig_gen = gen;
    set_gene ration(gen);
  }
  HeapWord* gen_boundary() { return _gen_boundary; }
};
class OopsInKlassOrGenClosure: public OopsInGenClosure {
  Klass* _scanned_klass;
 public:
  OopsInKlassOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_klass(NULL) {}
  void set_scanned_klass(Klass* k) {
    assert(k == NULL || _scanned_klass == NULL, "Must be");
    _scanned_klass = k;
  }
  bool is_scanning_a_klass() { return _scanned_klass != NULL; }
  void do_klass_barrier();
};
class ScanClosure: public OopsInKlassOrGenClosure {
 protected:
  DefNewGeneration* _g;
  HeapWord*         _boundary;
  bool              _gc_barrier;
  template <class T> inline void do_oop_work(T* p);
 public:
  ScanClosure(DefNewGeneration* g, bool gc_barrier);
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
  inline void do_oop_nv(oop* p);
  inline void do_oop_nv(narrowOop* p);
  Prefetch::style prefetch_style() {
    return Prefetch::do_write;
  }
};
class FastScanClosure: public OopsInKlassOrGenClosure {
 protected:
  DefNewGeneration* _g;
  HeapWord*         _boundary;
  bool              _gc_barrier;
  template <class T> inline void do_oop_work(T* p);
 public:
  FastScanClosure(DefNewGeneration* g, bool gc_barrier);
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
  inline void do_oop_nv(oop* p);
  inline void do_oop_nv(narrowOop* p);
  Prefetch::style prefetch_style() {
    return Prefetch::do_write;
  }
};
class KlassScanClosure: public KlassClosure {
  OopsInKlassOrGenClosure* _scavenge_closure;
  bool                     _accumulate_modified_oops;
 public:
  KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
                   KlassRemSet* klass_rem_set_policy);
  void do_klass(Klass* k);
};
class FilteringClosure: public ExtendedOopClosure {
 private:
  HeapWord*   _boundary;
  ExtendedOopClosure* _cl;
 protected:
  template <class T> inline void do_oop_work(T* p) {
    T heap_oop = oopDesc::load_heap_oop(p);
    if (!oopDesc::is_null(heap_oop)) {
      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
      if ((HeapWord*)obj < _boundary) {
        _cl->do_oop(p);
      }
    }
  }
 public:
  FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
    ExtendedOopClosure(cl->_ref_processor), _boundary(boundary),
    _cl(cl) {}
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
  inline void do_oop_nv(oop* p)       { FilteringClosure::do_oop_work(p); }
  inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
  virtual bool do_metadata()          { return do_metadata_nv(); }
  inline bool do_metadata_nv()        { assert(!_cl->do_metadata(), "assumption broken, must change to 'return _cl->do_metadata()'"); return false; }
};
class ScanWeakRefClosure: public OopClosure {
 protected:
  DefNewGeneration* _g;
  HeapWord*         _boundary;
  template <class T> inline void do_oop_work(T* p);
 public:
  ScanWeakRefClosure(DefNewGeneration* g);
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
  inline void do_oop_nv(oop* p);
  inline void do_oop_nv(narrowOop* p);
};
class VerifyOopClosure: public OopClosure {
 protected:
  template <class T> inline void do_oop_work(T* p) {
    oop obj = oopDesc::load_decode_heap_oop(p);
    guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj)));
  }
 public:
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
  static VerifyOopClosure verify_oop;
};
#endif // SHARE_VM_MEMORY_GENOOPCLOSURES_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genOopClosures.inline.hpp
#ifndef SHARE_VM_MEMORY_GENOOPCLOSURES_INLINE_HPP
#define SHARE_VM_MEMORY_GENOOPCLOSURES_INLINE_HPP
#include "memory/cardTableRS.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.hpp"
#include "memory/genRemSet.hpp"
#include "memory/generation.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/space.hpp"
inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
  ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
  set_generation(gen);
}
inline void OopsInGenClosure::set_generation(Generation* gen) {
  _gen = gen;
  _gen_boundary = _gen->reserved().start();
  if (_rs == NULL) {
    GenRemSet* rs = SharedHeap::heap()->rem_set();
    assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind");
    _rs = (CardTableRS*)rs;
  }
}
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
  assert(generation()->is_in_reserved(p), "expected ref in generation");
  T heap_oop = oopDesc::load_heap_oop(p);
  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  if ((HeapWord*)obj < _gen_boundary) {
    _rs->inline_write_ref_field_gc(p, obj);
  }
}
template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
  assert(generation()->is_in_reserved(p), "expected ref in generation");
  T heap_oop = oopDesc::load_heap_oop(p);
  assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  if ((HeapWord*)obj < gen_boundary()) {
    rs()->write_ref_field_gc_par(p, obj);
  }
}
inline void OopsInKlassOrGenClosure::do_klass_barrier() {
  assert(_scanned_klass != NULL, "Must be");
  _scanned_klass->record_modified_oops();
}
template <class T> inline void ScanClosure::do_oop_work(T* p) {
  T heap_oop = oopDesc::load_heap_oop(p);
  if (!oopDesc::is_null(heap_oop)) {
    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    if ((HeapWord*)obj < _boundary) {
      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
      oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                        : _g->copy_to_survivor_space(obj);
      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
    }
    if (is_scanning_a_klass()) {
      do_klass_barrier();
    } else if (_gc_barrier) {
      do_barrier(p);
    }
  }
}
inline void ScanClosure::do_oop_nv(oop* p)       { ScanClosure::do_oop_work(p); }
inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); }
template <class T> inline void FastScanClosure::do_oop_work(T* p) {
  T heap_oop = oopDesc::load_heap_oop(p);
  if (!oopDesc::is_null(heap_oop)) {
    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    if ((HeapWord*)obj < _boundary) {
      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
      oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                        : _g->copy_to_survivor_space(obj);
      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
      if (is_scanning_a_klass()) {
        do_klass_barrier();
      } else if (_gc_barrier) {
        do_barrier(p);
      }
    }
  }
}
inline void FastScanClosure::do_oop_nv(oop* p)       { FastScanClosure::do_oop_work(p); }
inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
  assert(!oopDesc::is_null(*p), "null weak reference?");
  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
  if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
    oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                      : _g->copy_to_survivor_space(obj);
    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
  }
}
inline void ScanWeakRefClosure::do_oop_nv(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
#endif // SHARE_VM_MEMORY_GENOOPCLOSURES_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genRemSet.cpp
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/genRemSet.hpp"
uintx GenRemSet::max_alignment_constraint(Name nm) {
  assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
  return CardTableRS::ct_max_alignment_constraint();
}
class HasAccumulatedModifiedOopsClosure : public KlassClosure {
  bool _found;
 public:
  HasAccumulatedModifiedOopsClosure() : _found(false) {}
  void do_klass(Klass* klass) {
    if (_found) {
      return;
    }
    if (klass->has_accumulated_modified_oops()) {
      _found = true;
    }
  }
  bool found() {
    return _found;
  }
};
bool KlassRemSet::mod_union_is_clear() {
  HasAccumulatedModifiedOopsClosure closure;
  ClassLoaderDataGraph::classes_do(&closure);
  return !closure.found();
}
class ClearKlassModUnionClosure : public KlassClosure {
 public:
  void do_klass(Klass* klass) {
    if (klass->has_accumulated_modified_oops()) {
      klass->clear_accumulated_modified_oops();
    }
  }
};
void KlassRemSet::clear_mod_union() {
  ClearKlassModUnionClosure closure;
  ClassLoaderDataGraph::classes_do(&closure);
}
C:\hotspot-69087d08d473\src\share\vm/memory/genRemSet.hpp
#ifndef SHARE_VM_MEMORY_GENREMSET_HPP
#define SHARE_VM_MEMORY_GENREMSET_HPP
#include "oops/oop.hpp"
class Generation;
class BarrierSet;
class OopsInGenClosure;
class CardTableRS;
class KlassRemSet {
  bool _accumulate_modified_oops;
 public:
  KlassRemSet() : _accumulate_modified_oops(false) {}
  void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
  bool accumulate_modified_oops() { return _accumulate_modified_oops; }
  bool mod_union_is_clear();
  void clear_mod_union();
};
class GenRemSet: public CHeapObj<mtGC> {
  friend class Generation;
  BarrierSet* _bs;
  KlassRemSet _klass_rem_set;
public:
  enum Name {
    CardTable,
    Other
  };
  GenRemSet(BarrierSet * bs) : _bs(bs) {}
  GenRemSet() : _bs(NULL) {}
  virtual Name rs_kind() = 0;
  virtual CardTableRS* as_CardTableRS() { return NULL; }
  BarrierSet* bs() { return _bs; }
  void set_bs(BarrierSet* bs) { _bs = bs; }
  KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
  virtual void prepare_for_younger_refs_iterate(bool parallel) = 0;
  virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0;
  virtual void younger_refs_in_space_iterate(Space* sp,
                                             OopsInGenClosure* cl) = 0;
  void write_ref_field_gc(void* field, oop new_val);
protected:
  virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
public:
  virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
  virtual void resize_covered_region(MemRegion new_region) = 0;
  virtual bool is_aligned(HeapWord* addr) = 0;
  static uintx max_alignment_constraint(Name nm);
  virtual void verify() = 0;
  virtual void verify_aligned_region_empty(MemRegion mr) = 0;
  virtual void print() {}
  virtual void clear(MemRegion mr) = 0;
  virtual void clear_into_younger(Generation* old_gen) = 0;
  virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0;
  virtual void invalidate_or_clear(Generation* old_gen) = 0;
};
#endif // SHARE_VM_MEMORY_GENREMSET_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/genRemSet.inline.hpp
#ifndef SHARE_VM_MEMORY_GENREMSET_INLINE_HPP
#define SHARE_VM_MEMORY_GENREMSET_INLINE_HPP
void GenRemSet::write_ref_field_gc(void* field, oop new_val) {
  if (kind() == CardTableModRef) {
    ((CardTableRS*)this)->inline_write_ref_field_gc(field, new_val);
  } else {
    write_ref_field_gc_work(field, new_val);
  }
}
#endif // SHARE_VM_MEMORY_GENREMSET_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/guardedMemory.cpp
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/guardedMemory.hpp"
#include "runtime/os.hpp"
void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) {
  size_t total_sz = GuardedMemory::get_total_size(len);
  void* outerp = os::malloc(total_sz, mtInternal);
  if (outerp != NULL) {
    GuardedMemory guarded(outerp, len, tag);
    void* innerp = guarded.get_user_ptr();
    memcpy(innerp, ptr, len);
    return innerp;
  }
  return NULL; // OOM
}
bool GuardedMemory::free_copy(void* p) {
  if (p == NULL) {
    return true;
  }
  GuardedMemory guarded((u_char*)p);
  bool verify_ok = guarded.verify_guards();
  os::free(guarded.release_for_freeing());
  return verify_ok;
}
void GuardedMemory::print_on(outputStream* st) const {
  if (_base_addr == NULL) {
    st->print_cr("GuardedMemory(" PTR_FORMAT ") not associated to any memory", p2i(this));
    return;
  }
  st->print_cr("GuardedMemory(" PTR_FORMAT ") base_addr=" PTR_FORMAT
      " tag=" PTR_FORMAT " user_size=" SIZE_FORMAT " user_data=" PTR_FORMAT,
      p2i(this), p2i(_base_addr), p2i(get_tag()), get_user_size(), p2i(get_user_ptr()));
  Guard* guard = get_head_guard();
  st->print_cr("  Header guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
  guard = get_tail_guard();
  st->print_cr("  Trailer guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
  u_char udata = *get_user_ptr();
  switch (udata) {
  case uninitBlockPad:
    st->print_cr("  User data appears unused");
    break;
  case freeBlockPad:
    st->print_cr("  User data appears to have been freed");
    break;
  default:
    st->print_cr("  User data appears to be in use");
    break;
  }
}
#ifndef PRODUCT
#define GEN_PURPOSE_TAG ((void *) ((uintptr_t)0xf000f000))
static void guarded_memory_test_check(void* p, size_t sz, void* tag) {
  assert(p != NULL, "NULL pointer given to check");
  u_char* c = (u_char*) p;
  GuardedMemory guarded(c);
  assert(guarded.get_tag() == tag, "Tag is not the same as supplied");
  assert(guarded.get_user_ptr() == c, "User pointer is not the same as supplied");
  assert(guarded.get_user_size() == sz, "User size is not the same as supplied");
  assert(guarded.verify_guards(), "Guard broken");
}
void GuardedMemory::test_guarded_memory() {
  size_t total_sz = GuardedMemory::get_total_size(1);
  assert(total_sz > 1 && total_sz >= (sizeof(GuardHeader) + 1 + sizeof(Guard)), "Unexpected size");
  u_char* basep = (u_char*) os::malloc(total_sz, mtInternal);
  GuardedMemory guarded(basep, 1, GEN_PURPOSE_TAG);
  assert(*basep == badResourceValue, "Expected guard in the form of badResourceValue");
  u_char* userp = guarded.get_user_ptr();
  assert(*userp == uninitBlockPad, "Expected uninitialized data in the form of uninitBlockPad");
  guarded_memory_test_check(userp, 1, GEN_PURPOSE_TAG);
  void* freep = guarded.release_for_freeing();
  assert((u_char*)freep == basep, "Expected the same pointer guard was ");
  assert(*userp == freeBlockPad, "Expected user data to be free block padded");
  assert(!guarded.verify_guards(), "Expected failed");
  os::free(freep);
  size_t sz = 0;
  do {
    void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
    void* up = guarded.wrap_with_guards(p, sz, (void*)1);
    memset(up, 0, sz);
    guarded_memory_test_check(up, sz, (void*)1);
    os::free(guarded.release_for_freeing());
    sz = (sz << 4) + 1;
  } while (sz < (256 * 1024));
  basep = (u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
  guarded.wrap_with_guards(basep, 1);
  assert(!guarded.verify_guards(), "Expected failure");
  os::free(basep);
  sz = 1;
  do {
    void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
    void* up = guarded.wrap_with_guards(p, sz, (void*)1);
    memset(up, 0, sz + 1); // Buffer-overwrite (within guard)
    assert(!guarded.verify_guards(), "Guard was not broken as expected");
    os::free(guarded.release_for_freeing());
    sz = (sz << 4) + 1;
  } while (sz < (256 * 1024));
  assert(GuardedMemory::free_copy(NULL), "Expected free NULL to be OK");
  const char* str = "Check my bounds out";
  size_t str_sz = strlen(str) + 1;
  char* str_copy = (char*) GuardedMemory::wrap_copy(str, str_sz);
  guarded_memory_test_check(str_copy, str_sz, NULL);
  assert(strcmp(str, str_copy) == 0, "Not identical copy");
  assert(GuardedMemory::free_copy(str_copy), "Free copy failed to verify");
  void* no_data = NULL;
  void* no_data_copy = GuardedMemory::wrap_copy(no_data, 0);
  assert(GuardedMemory::free_copy(no_data_copy), "Expected valid guards even for no data copy");
}
#endif // !PRODUCT
C:\hotspot-69087d08d473\src\share\vm/memory/guardedMemory.hpp
#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class GuardedMemory : StackObj { // Wrapper on stack
protected:
  class Guard { // Class for raw memory (no vtbl allowed)
    friend class GuardedMemory;
   protected:
    enum {
      GUARD_SIZE = 16
    };
    u_char _guard[GUARD_SIZE];
   public:
    void build() {
      u_char* c = _guard; // Possibly unaligned if tail guard
      u_char* end = c + GUARD_SIZE;
      while (c < end) {
        c++;
      }
    }
    bool verify() const {
      u_char* c = (u_char*) _guard;
      u_char* end = c + GUARD_SIZE;
      while (c < end) {
        if (*c != badResourceValue) {
          return false;
        }
        c++;
      }
      return true;
    }
  }; // GuardedMemory::Guard
  class GuardHeader : Guard {
    friend class GuardedMemory;
   protected:
    union {
      uintptr_t __unused_full_word1;
      size_t _user_size;
    };
    void* _tag;
   public:
    void set_user_size(const size_t usz) { _user_size = usz; }
    size_t get_user_size() const { return _user_size; }
    void set_tag(const void* tag) { _tag = (void*) tag; }
    void* get_tag() const { return _tag; }
  }; // GuardedMemory::GuardHeader
 protected:
  u_char* _base_addr;
 public:
  GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
    wrap_with_guards(base_ptr, user_size, tag);
  }
  GuardedMemory(void* userp) {
    u_char* user_ptr = (u_char*) userp;
    assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
    _base_addr = (user_ptr - sizeof(GuardHeader));
  }
  void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
    assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
    _base_addr = (u_char*)base_ptr;
    get_head_guard()->build();
    get_head_guard()->set_user_size(user_size);
    get_tail_guard()->build();
    set_tag(tag);
    set_user_bytes(uninitBlockPad);
    assert(verify_guards(), "Expected valid memory guards");
    return get_user_ptr();
  }
  bool verify_guards() const {
    if (_base_addr != NULL) {
      return (get_head_guard()->verify() && get_tail_guard()->verify());
    }
    return false;
  }
  void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
  void* get_tag() const { return get_head_guard()->get_tag(); }
  size_t get_user_size() const {
    assert(_base_addr != NULL, "Not wrapping any memory");
    return get_head_guard()->get_user_size();
  }
  u_char* get_user_ptr() const {
    assert(_base_addr != NULL, "Not wrapping any memory");
    return _base_addr + sizeof(GuardHeader);
  }
  void* release_for_freeing() {
    set_user_bytes(freeBlockPad);
    return release();
  }
  void* release() {
    void* p = (void*) _base_addr;
    _base_addr = NULL;
    return p;
  }
  virtual void print_on(outputStream* st) const;
 protected:
  GuardHeader*  get_head_guard() const { return (GuardHeader*) _base_addr; }
  Guard*        get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
  void set_user_bytes(u_char ch) {
    memset(get_user_ptr(), ch, get_user_size());
  }
 public:
  static size_t get_total_size(size_t user_size) {
    size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
    assert(total_size > user_size, "Unexpected wrap-around");
    return total_size;
  }
  static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
  static bool free_copy(void* p);
#ifndef PRODUCT
  static void test_guarded_memory(void);
#endif
}; // GuardedMemory
#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/heap.cpp
#include "precompiled.hpp"
#include "memory/heap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
size_t CodeHeap::header_size() {
  return sizeof(HeapBlock);
}
CodeHeap::CodeHeap() {
  _number_of_committed_segments = 0;
  _number_of_reserved_segments  = 0;
  _segment_size                 = 0;
  _log2_segment_size            = 0;
  _next_segment                 = 0;
  _freelist                     = NULL;
  _freelist_segments            = 0;
}
void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  address p = (address)_segmap.low() + beg;
  address q = (address)_segmap.low() + end;
  while (p < q) *p++ = 0xFF;
}
void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
  assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  address p = (address)_segmap.low() + beg;
  address q = (address)_segmap.low() + end;
  int i = 0;
  while (p < q) {
    if (i == 0xFF) i = 1;
  }
}
static size_t align_to_page_size(size_t size) {
  const size_t alignment = (size_t)os::vm_page_size();
  assert(is_power_of_2(alignment), "no kidding ???");
  return (size + alignment - 1) & ~(alignment - 1);
}
void CodeHeap::on_code_mapping(char* base, size_t size) {
#ifdef LINUX
  extern void linux_wrap_code(char* base, size_t size);
  linux_wrap_code(base, size);
#endif
}
bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
                       size_t segment_size) {
  assert(reserved_size >= committed_size, "reserved < committed");
  assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
  assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
  _segment_size      = segment_size;
  _log2_segment_size = exact_log2(segment_size);
  size_t page_size = os::vm_page_size();
  if (os::can_execute_large_page_memory()) {
    page_size = os::page_size_for_region_unaligned(reserved_size, 8);
  }
  const size_t granularity = os::vm_allocation_granularity();
  const size_t r_align = MAX2(page_size, granularity);
  const size_t r_size = align_size_up(reserved_size, r_align);
  const size_t c_size = align_size_up(committed_size, page_size);
  const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
    MAX2(page_size, granularity);
  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
  os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
                       rs.base(), rs.size());
  if (!_memory.initialize(rs, c_size)) {
    return false;
  }
  on_code_mapping(_memory.low(), _memory.committed_size());
  _number_of_committed_segments = size_to_segments(_memory.committed_size());
  _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
  assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
  const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
  const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
  const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
  if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
    return false;
  }
  MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
  assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
  assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
  assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
  clear();
  return true;
}
void CodeHeap::release() {
  Unimplemented();
}
bool CodeHeap::expand_by(size_t size) {
  size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
  if (dm > 0) {
    char* base = _memory.low() + _memory.committed_size();
    if (!_memory.expand_by(dm)) return false;
    on_code_mapping(base, dm);
    size_t i = _number_of_committed_segments;
    _number_of_committed_segments = size_to_segments(_memory.committed_size());
    assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
    assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
    size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
    if (ds > 0) {
      if (!_segmap.expand_by(ds)) return false;
    }
    assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
    mark_segmap_as_free(i, _number_of_committed_segments);
  }
  return true;
}
void CodeHeap::shrink_by(size_t size) {
  Unimplemented();
}
void CodeHeap::clear() {
  _next_segment = 0;
  mark_segmap_as_free(0, _number_of_committed_segments);
}
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
  size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
  assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
  debug_only(verify());
  HeapBlock* block = search_freelist(number_of_segments, is_critical);
  debug_only(if (VerifyCodeCacheOften) verify());
  if (block != NULL) {
    assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
    assert(!block->free(), "must be marked free");
#ifdef ASSERT
    memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
    return block->allocated_space();
  }
  if (number_of_segments < CodeCacheMinBlockLength) {
    number_of_segments = CodeCacheMinBlockLength;
  }
  if (!is_critical) {
    if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
      return NULL;
    }
  }
  if (_next_segment + number_of_segments <= _number_of_committed_segments) {
    mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
    HeapBlock* b =  block_at(_next_segment);
    b->initialize(number_of_segments);
    _next_segment += number_of_segments;
#ifdef ASSERT
    memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
    return b->allocated_space();
  } else {
    return NULL;
  }
}
void CodeHeap::deallocate(void* p) {
  assert(p == find_start(p), "illegal deallocation");
  HeapBlock* b = (((HeapBlock *)p) - 1);
  assert(b->allocated_space() == p, "sanity check");
#ifdef ASSERT
  memset((void *)b->allocated_space(),
         badCodeHeapFreeVal,
         segments_to_size(b->length()) - sizeof(HeapBlock));
#endif
  add_to_freelist(b);
  debug_only(if (VerifyCodeCacheOften) verify());
}
void* CodeHeap::find_start(void* p) const {
  if (!contains(p)) {
    return NULL;
  }
  size_t i = segment_for(p);
  address b = (address)_segmap.low();
  if (b[i] == 0xFF) {
    return NULL;
  }
  while (b[i] > 0) i -= (int)b[i];
  HeapBlock* h = block_at(i);
  if (h->free()) {
    return NULL;
  }
  return h->allocated_space();
}
size_t CodeHeap::alignment_unit() const {
  return _segment_size;
}
size_t CodeHeap::alignment_offset() const {
  return sizeof(HeapBlock) & (_segment_size - 1);
}
void* CodeHeap::next_free(HeapBlock *b) const {
  if (b != NULL && b->free()) b = next_block(b);
  assert(b == NULL || !b->free(), "must be in use or at end of heap");
  return (b == NULL) ? NULL : b->allocated_space();
}
HeapBlock* CodeHeap::first_block() const {
  if (_next_segment > 0)
    return block_at(0);
  return NULL;
}
HeapBlock *CodeHeap::block_start(void *q) const {
  HeapBlock* b = (HeapBlock*)find_start(q);
  if (b == NULL) return NULL;
  return b - 1;
}
HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
  if (b == NULL) return NULL;
  size_t i = segment_for(b) + b->length();
  if (i < _next_segment)
    return block_at(i);
  return NULL;
}
size_t CodeHeap::capacity() const {
  return _memory.committed_size();
}
size_t CodeHeap::max_capacity() const {
  return _memory.reserved_size();
}
size_t CodeHeap::allocated_capacity() const {
  return segments_to_size(_next_segment - _freelist_segments);
}
size_t CodeHeap::heap_unallocated_capacity() const {
  return segments_to_size(_number_of_reserved_segments - _next_segment);
}
FreeBlock *CodeHeap::following_block(FreeBlock *b) {
  return (FreeBlock*)(((address)b) + _segment_size * b->length());
}
void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
  assert(a != NULL && b != NULL, "must be real pointers");
  b->set_link(a->link());
  a->set_link(b);
  merge_right(b); // Try to make b bigger
  merge_right(a); // Try to make a include b
}
void CodeHeap::merge_right(FreeBlock *a) {
  assert(a->free(), "must be a free block");
  if (following_block(a) == a->link()) {
    assert(a->link() != NULL && a->link()->free(), "must be free too");
    a->set_length(a->length() + a->link()->length());
    a->set_link(a->link()->link());
    size_t beg = segment_for(a);
    mark_segmap_as_used(beg, beg + a->length());
  }
}
void CodeHeap::add_to_freelist(HeapBlock *a) {
  FreeBlock* b = (FreeBlock*)a;
  assert(b != _freelist, "cannot be removed twice");
  _freelist_segments += b->length();
  b->set_free();
  if (_freelist == NULL) {
    _freelist = b;
    b->set_link(NULL);
    return;
  }
  FreeBlock* prev = NULL;
  FreeBlock* cur  = _freelist;
  while(cur != NULL && cur < b) {
    assert(prev == NULL || prev < cur, "must be ordered");
    prev = cur;
    cur  = cur->link();
  }
  assert( (prev == NULL && b < _freelist) ||
          (prev < b && (cur == NULL || b < cur)), "list must be ordered");
  if (prev == NULL) {
    b->set_link(_freelist);
    _freelist = b;
    merge_right(_freelist);
  } else {
    insert_after(prev, b);
  }
}
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
  FreeBlock *best_block = NULL;
  FreeBlock *best_prev  = NULL;
  size_t best_length = 0;
  FreeBlock *prev = NULL;
  FreeBlock *cur = _freelist;
  while(cur != NULL) {
    size_t l = cur->length();
    if (l >= length && (best_block == NULL || best_length > l)) {
      if (!is_critical) {
        if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
          break;
        }
      }
      best_block = cur;
      best_prev  = prev;
      best_length = best_block->length();
    }
    prev = cur;
    cur  = cur->link();
  }
  if (best_block == NULL) {
    return NULL;
  }
  assert((best_prev == NULL && _freelist == best_block ) ||
         (best_prev != NULL && best_prev->link() == best_block), "sanity check");
  if (best_length < length + CodeCacheMinBlockLength) {
    length = best_length;
    if (best_prev == NULL) {
      assert(_freelist == best_block, "sanity check");
      _freelist = _freelist->link();
    } else {
      best_prev->set_link(best_block->link());
    }
  } else {
    best_block->set_length(best_length - length);
    best_block = following_block(best_block);
    size_t beg = segment_for(best_block);
    mark_segmap_as_used(beg, beg + length);
    best_block->set_length(length);
  }
  best_block->set_used();
  _freelist_segments -= length;
  return best_block;
}
#ifndef PRODUCT
void CodeHeap::print() {
  tty->print_cr("The Heap");
}
#endif
void CodeHeap::verify() {
  int count = 0;
  size_t len = 0;
  for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
    len += b->length();
    count++;
  }
  static int free_block_threshold = 10000;
  if (count > free_block_threshold) {
    warning("CodeHeap: # of free blocks > %d", free_block_threshold);
    free_block_threshold *= 2;
  }
  for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
    if (h->free()) count--;
  }
}
C:\hotspot-69087d08d473\src\share\vm/memory/heap.hpp
#ifndef SHARE_VM_MEMORY_HEAP_HPP
#define SHARE_VM_MEMORY_HEAP_HPP
#include "memory/allocation.hpp"
#include "runtime/virtualspace.hpp"
class HeapBlock VALUE_OBJ_CLASS_SPEC {
  friend class VMStructs;
 public:
  struct Header {
    size_t  _length;                             // the length in segments
    bool    _used;                               // Used bit
  };
 protected:
  union {
    Header _header;
    int64_t _padding[ (sizeof(Header) + sizeof(int64_t)-1) / sizeof(int64_t) ];
  };
 public:
  void initialize(size_t length)                 { _header._length = length; set_used(); }
  void* allocated_space() const                  { return (void*)(this + 1); }
  size_t length() const                          { return _header._length; }
  void set_used()                                { _header._used = true; }
  void set_free()                                { _header._used = false; }
  bool free()                                    { return !_header._used; }
};
class FreeBlock: public HeapBlock {
  friend class VMStructs;
 protected:
  FreeBlock* _link;
 public:
  void initialize(size_t length)             { HeapBlock::initialize(length); _link= NULL; }
  void set_length(size_t l)                  { _header._length = l; }
  FreeBlock* link() const                    { return _link; }
  void set_link(FreeBlock* link)             { _link = link; }
};
class CodeHeap : public CHeapObj<mtCode> {
  friend class VMStructs;
 private:
  VirtualSpace _memory;                          // the memory holding the blocks
  VirtualSpace _segmap;                          // the memory holding the segment map
  size_t       _number_of_committed_segments;
  size_t       _number_of_reserved_segments;
  size_t       _segment_size;
  int          _log2_segment_size;
  size_t       _next_segment;
  FreeBlock*   _freelist;
  size_t       _freelist_segments;               // No. of segments in freelist
  size_t   size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
  size_t   segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
  size_t   segment_for(void* p) const            { return ((char*)p - _memory.low()) >> _log2_segment_size; }
  HeapBlock* block_at(size_t i) const            { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
  void  mark_segmap_as_free(size_t beg, size_t end);
  void  mark_segmap_as_used(size_t beg, size_t end);
  FreeBlock* following_block(FreeBlock *b);
  void insert_after(FreeBlock* a, FreeBlock* b);
  void merge_right (FreeBlock* a);
  void add_to_freelist(HeapBlock *b);
  FreeBlock* search_freelist(size_t length, bool is_critical);
  void*      next_free(HeapBlock* b) const;
  HeapBlock* first_block() const;
  HeapBlock* next_block(HeapBlock* b) const;
  HeapBlock* block_start(void* p) const;
  void on_code_mapping(char* base, size_t size);
 public:
  CodeHeap();
  bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
  void  release();                               // releases all allocated memory
  bool  expand_by(size_t size);                  // expands commited memory by size
  void  shrink_by(size_t size);                  // shrinks commited memory by size
  void  clear();                                 // clears all heap contents
  void* allocate  (size_t size, bool is_critical);  // allocates a block of size or returns NULL
  void  deallocate(void* p);                     // deallocates a block
  char* low_boundary() const                     { return _memory.low_boundary (); }
  char* high() const                             { return _memory.high(); }
  char* high_boundary() const                    { return _memory.high_boundary(); }
  bool  contains(const void* p) const            { return low_boundary() <= p && p < high(); }
  void* find_start(void* p) const;              // returns the block containing p or NULL
  size_t alignment_unit() const;                // alignment of any block
  size_t alignment_offset() const;              // offset of first byte of any block, within the enclosing alignment unit
  static size_t header_size();                  // returns the header size for each heap block
  void* first() const       { return next_free(first_block()); }
  void* next(void* p) const { return next_free(next_block(block_start(p))); }
  size_t capacity() const;
  size_t max_capacity() const;
  size_t allocated_capacity() const;
  size_t unallocated_capacity() const            { return max_capacity() - allocated_capacity(); }
private:
  size_t heap_unallocated_capacity() const;
public:
  void verify();
  void print()  PRODUCT_RETURN;
};
#endif // SHARE_VM_MEMORY_HEAP_HPP
C:\hotspot-69087d08d473\src\share\vm/memory/heapInspection.cpp
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/heapInspection.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
int KlassInfoEntry::compare(KlassInfoEntry* e1, KlassInfoEntry* e2) {
  if(e1->_instance_words > e2->_instance_words) {
    return -1;
  } else if(e1->_instance_words < e2->_instance_words) {
    return 1;
  }
  ResourceMark rm;
  const char* name1 = e1->klass()->external_name();
  const char* name2 = e2->klass()->external_name();
  bool d1 = (name1[0] == '[');
  bool d2 = (name2[0] == '[');
  if (d1 && !d2) {
    return -1;
  } else if (d2 && !d1) {
    return 1;
  } else {
    return strcmp(name1, name2);
  }
}
const char* KlassInfoEntry::name() const {
  const char* name;
  if (_klass->name() != NULL) {
    name = _klass->external_name();
  } else {
    if (_klass == Universe::boolArrayKlassObj())         name = "<boolArrayKlass>";         else
    if (_klass == Universe::charArrayKlassObj())         name = "<charArrayKlass>";         else
    if (_klass == Universe::singleArrayKlassObj())       name = "<singleArrayKlass>";       else
    if (_klass == Universe::doubleArrayKlassObj())       name = "<doubleArrayKlass>";       else
    if (_klass == Universe::byteArrayKlassObj())         name = "<byteArrayKlass>";         else
    if (_klass == Universe::shortArrayKlassObj())        name = "<shortArrayKlass>";        else
    if (_klass == Universe::intArrayKlassObj())          name = "<intArrayKlass>";          else
    if (_klass == Universe::longArrayKlassObj())         name = "<longArrayKlass>";         else
      name = "<no name>";
  }
  return name;
}
void KlassInfoEntry::print_on(outputStream* st) const {
  ResourceMark rm;
  st->print_cr(INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13) "  %s",
               (jlong)  _instance_count,
               (julong) _instance_words * HeapWordSize,
               name());
}
KlassInfoEntry* KlassInfoBucket::lookup(Klass* const k) {
  KlassInfoEntry* elt = _list;
  while (elt != NULL) {
    if (elt->is_equal(k)) {
      return elt;
    }
    elt = elt->next();
  }
  elt = new (std::nothrow) KlassInfoEntry(k, list());
  if (elt != NULL) {
    set_list(elt);
  }
  return elt;
}
void KlassInfoBucket::iterate(KlassInfoClosure* cic) {
  KlassInfoEntry* elt = _list;
  while (elt != NULL) {
    cic->do_cinfo(elt);
    elt = elt->next();
  }
}
void KlassInfoBucket::empty() {
  KlassInfoEntry* elt = _list;
  _list = NULL;
  while (elt != NULL) {
    KlassInfoEntry* next = elt->next();
    delete elt;
    elt = next;
  }
}
void KlassInfoTable::AllClassesFinder::do_klass(Klass* k) {
  _table->lookup(k);
}
KlassInfoTable::KlassInfoTable(bool need_class_stats) {
  _size_of_instances_in_words = 0;
  _size = 0;
  _ref = (HeapWord*) Universe::boolArrayKlassObj();
  _buckets =
    (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
       mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
  if (_buckets != NULL) {
    _size = _num_buckets;
    for (int index = 0; index < _size; index++) {
      _buckets[index].initialize();
    }
    if (need_class_stats) {
      AllClassesFinder finder(this);
      ClassLoaderDataGraph::classes_do(&finder);
    }
  }
}
KlassInfoTable::~KlassInfoTable() {
  if (_buckets != NULL) {
    for (int index = 0; index < _size; index++) {
      _buckets[index].empty();
    }
    FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal);
    _size = 0;
  }
}
uint KlassInfoTable::hash(const Klass* p) {
  return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
}
KlassInfoEntry* KlassInfoTable::lookup(Klass* k) {
  uint         idx = hash(k) % _size;
  assert(_buckets != NULL, "Allocation failure should have been caught");
  KlassInfoEntry*  e   = _buckets[idx].lookup(k);
  assert(e == NULL || k == e->klass(), "must be equal");
  return e;
}
bool KlassInfoTable::record_instance(const oop obj) {
  Klass*        k = obj->klass();
  KlassInfoEntry* elt = lookup(k);
  if (elt != NULL) {
    elt->set_count(elt->count() + 1);
    elt->set_words(elt->words() + obj->size());
    _size_of_instances_in_words += obj->size();
    return true;
  } else {
    return false;
  }
}
void KlassInfoTable::iterate(KlassInfoClosure* cic) {
  assert(_size == 0 || _buckets != NULL, "Allocation failure should have been caught");
  for (int index = 0; index < _size; index++) {
    _buckets[index].iterate(cic);
  }
}
size_t KlassInfoTable::size_of_instances_in_words() const {
  return _size_of_instances_in_words;
}
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
  return (*e1)->compare(*e1,*e2);
}
KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title) :
  _cit(cit),
  _title(title) {
  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
}
KlassInfoHisto::~KlassInfoHisto() {
  delete _elements;
}
void KlassInfoHisto::add(KlassInfoEntry* cie) {
  elements()->append(cie);
}
void KlassInfoHisto::sort() {
  elements()->sort(KlassInfoHisto::sort_helper);
}
void KlassInfoHisto::print_elements(outputStream* st) const {
  jlong total = 0;
  julong totalw = 0;
  for(int i=0; i < elements()->length(); i++) {
    st->print("%4d: ", i+1);
    elements()->at(i)->print_on(st);
    total += elements()->at(i)->count();
    totalw += elements()->at(i)->words();
  }
  st->print_cr("Total " INT64_FORMAT_W(13) "  " UINT64_FORMAT_W(13),
               total, totalw * HeapWordSize);
}
#define MAKE_COL_NAME(field, name, help)     #name,
#define MAKE_COL_HELP(field, name, help)     help,
static const char *name_table[] = {
  HEAP_INSPECTION_COLUMNS_DO(MAKE_COL_NAME)
};
static const char *help_table[] = {
  HEAP_INSPECTION_COLUMNS_DO(MAKE_COL_HELP)
};

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值