java survivor_java的survivor区中,为什么S0C,S1C总是小于S0CMX,S1CMX?

void DefNewGeneration::compute_new_size() {

// This is called after a gc that includes the following generation // (which is required to exist.) So from-space will normally be empty. // Note that we check both spaces, since if scavenge failed they revert roles. // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) {

return;

}

int next_level = level() + 1;

GenCollectedHeap* gch = GenCollectedHeap::heap();

assert(next_level < gch->_n_gens,

"DefNewGeneration cannot be an oldest gen");

Generation* next_gen = gch->_gens[next_level];

size_t old_size = next_gen->capacity();

size_t new_size_before = _virtual_space.committed_size();

size_t min_new_size = spec()->init_size();

size_t max_new_size = reserved().byte_size();

assert(min_new_size <= new_size_before &&

new_size_before <= max_new_size,

"just checking");

// All space sizes must be multiples of Generation::GenGrain. size_t alignment = Generation::GenGrain;

// Compute desired new generation size based on NewRatio and // NewSizeThreadIncrease size_t desired_new_size = old_size/NewRatio;

int threads_count = Threads::number_of_non_daemon_threads();

size_t thread_increase_size = threads_count * NewSizeThreadIncrease;

desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);

// Adjust new generation size desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);

assert(desired_new_size <= max_new_size, "just checking");

bool changed = false;

if (desired_new_size > new_size_before) {

size_t change = desired_new_size - new_size_before;

assert(change % alignment == 0, "just checking");

if (expand(change)) {

changed = true;

}

// If the heap failed to expand to the desired size, // "changed" will be false. If the expansion failed // (and at this point it was expected to succeed), // ignore the failure (leaving "changed" as false). }

if (desired_new_size < new_size_before && eden()->is_empty()) {

// bail out of shrinking if objects in eden size_t change = new_size_before - desired_new_size;

assert(change % alignment == 0, "just checking");

_virtual_space.shrink_by(change);

changed = true;

}

if (changed) {

// The spaces have already been mangled at this point but // may not have been cleared (set top = bottom) and should be. // Mangling was done when the heap was being expanded. compute_space_boundaries(eden()->used(),

SpaceDecorator::Clear,

SpaceDecorator::DontMangle);

MemRegion cmr((HeapWord*)_virtual_space.low(),

(HeapWord*)_virtual_space.high());

Universe::heap()->barrier_set()->resize_covered_region(cmr);

if (Verbose && PrintGC) {

size_t new_size_after = _virtual_space.committed_size();

size_t eden_size_after = eden()->capacity();

size_t survivor_size_after = from()->capacity();

gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"

SIZE_FORMAT "K [eden="

SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",

new_size_before/K, new_size_after/K,

eden_size_after/K, survivor_size_after/K);

if (WizardMode) {

gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",

thread_increase_size/K, threads_count);

}

gclog_or_tty->cr();

}

}

}

void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,

bool clear_space,

bool mangle_space) {

uintx alignment =

GenCollectedHeap::heap()->collector_policy()->space_alignment();

// If the spaces are being cleared (only done at heap initialization // currently), the survivor spaces need not be empty. // Otherwise, no care is taken for used areas in the survivor spaces // so check. assert(clear_space || (to()->is_empty() && from()->is_empty()),

"Initialization of the survivor spaces assumes these are empty");

// Compute sizes uintx size = _virtual_space.committed_size();

uintx survivor_size = compute_survivor_size(size, alignment);

uintx eden_size = size - (2*survivor_size);

assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

if (eden_size < minimum_eden_size) {

// May happen due to 64Kb rounding, if so adjust eden size back up minimum_eden_size = align_size_up(minimum_eden_size, alignment);

uintx maximum_survivor_size = (size - minimum_eden_size) / 2;

uintx unaligned_survivor_size =

align_size_down(maximum_survivor_size, alignment);

survivor_size = MAX2(unaligned_survivor_size, alignment);

eden_size = size - (2*survivor_size);

assert(eden_size > 0 && survivor_size <= eden_size, "just checking");

assert(eden_size >= minimum_eden_size, "just checking");

}

char *eden_start = _virtual_space.low();

char *from_start = eden_start + eden_size;

char *to_start = from_start + survivor_size;

char *to_end = to_start + survivor_size;

assert(to_end == _virtual_space.high(), "just checking");

assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");

assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");

assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");

MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);

MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);

MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);

// A minimum eden size implies that there is a part of eden that // is being used and that affects the initialization of any // newly formed eden. bool live_in_eden = minimum_eden_size > 0;

// If not clearing the spaces, do some checking to verify that // the space are already mangled. if (!clear_space) {

// Must check mangling before the spaces are reshaped. Otherwise, // the bottom or end of one space may have moved into another // a failure of the check may not correctly indicate which space // is not properly mangled. if (ZapUnusedHeapArea) {

HeapWord* limit = (HeapWord*) _virtual_space.high();

eden()->check_mangled_unused_area(limit);

from()->check_mangled_unused_area(limit);

to()->check_mangled_unused_area(limit);

}

}

// Reset the spaces for their new regions. eden()->initialize(edenMR,

clear_space && !live_in_eden,

SpaceDecorator::Mangle);

// If clear_space and live_in_eden, we will not have cleared any // portion of eden above its top. This can cause newly // expanded space not to be mangled if using ZapUnusedHeapArea. // We explicitly do such mangling here. if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {

eden()->mangle_unused_area();

}

from()->initialize(fromMR, clear_space, mangle_space);

to()->initialize(toMR, clear_space, mangle_space);

// Set next compaction spaces. eden()->set_next_compaction_space(from());

// The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion // failure handling when to-space can contain live objects. from()->set_next_compaction_space(NULL);

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值