void Management::initialize(TRAPS) {
ServiceThread::initialize();
if (ManagementServer) {
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
Handle loader = Handle(THREAD, SystemDictionary::java_system_loader());
Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_management_Agent(),
loader,
Handle(),
THREAD);
if (k == NULL) {
vm_exit_during_initialization("Management agent initialization failure: "
"class sun.management.Agent not found.");
}
instanceKlassHandle ik (THREAD, k);
JavaValue result(T_VOID);
JavaCalls::call_static(&result,
ik,
vmSymbols::startAgent_name(),
vmSymbols::void_method_signature(),
CHECK);
}
}
void Management::get_optional_support(jmmOptionalSupport* support) {
memcpy(support, &_optional_support, sizeof(jmmOptionalSupport));
}
Klass* Management::load_and_initialize_klass(Symbol* sh, TRAPS) {
Klass* k = SystemDictionary::resolve_or_fail(sh, true, CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
if (ik->should_be_initialized()) {
ik->initialize(CHECK_NULL);
}
assert(ik->class_loader() == NULL, "need to follow in oops_do");
return ik();
}
void Management::record_vm_startup_time(jlong begin, jlong duration) {
if (_begin_vm_creation_time == NULL) return;
_begin_vm_creation_time->set_value(begin);
_end_vm_creation_time->set_value(begin + duration);
PerfMemory::set_accessible(true);
}
jlong Management::timestamp() {
TimeStamp t;
t.update();
return t.ticks() - _stamp.ticks();
}
void Management::oops_do(OopClosure* f) {
MemoryService::oops_do(f);
ThreadService::oops_do(f);
}
Klass* Management::java_lang_management_ThreadInfo_klass(TRAPS) {
if (_threadInfo_klass == NULL) {
_threadInfo_klass = load_and_initialize_klass(vmSymbols::java_lang_management_ThreadInfo(), CHECK_NULL);
}
return _threadInfo_klass;
}
Klass* Management::java_lang_management_MemoryUsage_klass(TRAPS) {
if (_memoryUsage_klass == NULL) {
_memoryUsage_klass = load_and_initialize_klass(vmSymbols::java_lang_management_MemoryUsage(), CHECK_NULL);
}
return _memoryUsage_klass;
}
Klass* Management::java_lang_management_MemoryPoolMXBean_klass(TRAPS) {
if (_memoryPoolMXBean_klass == NULL) {
_memoryPoolMXBean_klass = load_and_initialize_klass(vmSymbols::java_lang_management_MemoryPoolMXBean(), CHECK_NULL);
}
return _memoryPoolMXBean_klass;
}
Klass* Management::java_lang_management_MemoryManagerMXBean_klass(TRAPS) {
if (_memoryManagerMXBean_klass == NULL) {
_memoryManagerMXBean_klass = load_and_initialize_klass(vmSymbols::java_lang_management_MemoryManagerMXBean(), CHECK_NULL);
}
return _memoryManagerMXBean_klass;
}
Klass* Management::java_lang_management_GarbageCollectorMXBean_klass(TRAPS) {
if (_garbageCollectorMXBean_klass == NULL) {
_garbageCollectorMXBean_klass = load_and_initialize_klass(vmSymbols::java_lang_management_GarbageCollectorMXBean(), CHECK_NULL);
}
return _garbageCollectorMXBean_klass;
}
Klass* Management::sun_management_Sensor_klass(TRAPS) {
if (_sensor_klass == NULL) {
_sensor_klass = load_and_initialize_klass(vmSymbols::sun_management_Sensor(), CHECK_NULL);
}
return _sensor_klass;
}
Klass* Management::sun_management_ManagementFactory_klass(TRAPS) {
if (_managementFactory_klass == NULL) {
_managementFactory_klass = load_and_initialize_klass(vmSymbols::sun_management_ManagementFactory(), CHECK_NULL);
}
return _managementFactory_klass;
}
Klass* Management::sun_management_GarbageCollectorImpl_klass(TRAPS) {
if (_garbageCollectorImpl_klass == NULL) {
_garbageCollectorImpl_klass = load_and_initialize_klass(vmSymbols::sun_management_GarbageCollectorImpl(), CHECK_NULL);
}
return _garbageCollectorImpl_klass;
}
Klass* Management::com_sun_management_GcInfo_klass(TRAPS) {
if (_gcInfo_klass == NULL) {
_gcInfo_klass = load_and_initialize_klass(vmSymbols::com_sun_management_GcInfo(), CHECK_NULL);
}
return _gcInfo_klass;
}
Klass* Management::sun_management_DiagnosticCommandImpl_klass(TRAPS) {
if (_diagnosticCommandImpl_klass == NULL) {
_diagnosticCommandImpl_klass = load_and_initialize_klass(vmSymbols::sun_management_DiagnosticCommandImpl(), CHECK_NULL);
}
return _diagnosticCommandImpl_klass;
}
Klass* Management::sun_management_ManagementFactoryHelper_klass(TRAPS) {
if (_managementFactoryHelper_klass == NULL) {
_managementFactoryHelper_klass = load_and_initialize_klass(vmSymbols::sun_management_ManagementFactoryHelper(), CHECK_NULL);
}
return _managementFactoryHelper_klass;
}
static void initialize_ThreadInfo_constructor_arguments(JavaCallArguments* args, ThreadSnapshot* snapshot, TRAPS) {
Handle snapshot_thread(THREAD, snapshot->threadObj());
jlong contended_time;
jlong waited_time;
if (ThreadService::is_thread_monitoring_contention()) {
contended_time = Management::ticks_to_ms(snapshot->contended_enter_ticks());
waited_time = Management::ticks_to_ms(snapshot->monitor_wait_ticks() + snapshot->sleep_ticks());
} else {
contended_time = max_julong;
waited_time = max_julong;
}
int thread_status = snapshot->thread_status();
assert((thread_status & JMM_THREAD_STATE_FLAG_MASK) == 0, "Flags already set in thread_status in Thread object");
if (snapshot->is_ext_suspended()) {
thread_status |= JMM_THREAD_STATE_FLAG_SUSPENDED;
}
if (snapshot->is_in_native()) {
thread_status |= JMM_THREAD_STATE_FLAG_NATIVE;
}
ThreadStackTrace* st = snapshot->get_stack_trace();
Handle stacktrace_h;
if (st != NULL) {
stacktrace_h = st->allocate_fill_stack_trace_element_array(CHECK);
} else {
stacktrace_h = Handle();
}
args->push_oop(snapshot_thread);
args->push_int(thread_status);
args->push_oop(Handle(THREAD, snapshot->blocker_object()));
args->push_oop(Handle(THREAD, snapshot->blocker_object_owner()));
args->push_long(snapshot->contended_enter_count());
args->push_long(contended_time);
args->push_long(snapshot->monitor_wait_count() + snapshot->sleep_count());
args->push_long(waited_time);
args->push_oop(stacktrace_h);
}
instanceOop Management::create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS) {
Klass* k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
JavaValue result(T_VOID);
JavaCallArguments args(14);
Handle element = ik->allocate_instance_handle(CHECK_NULL);
args.push_oop(element);
initialize_ThreadInfo_constructor_arguments(&args, snapshot, CHECK_NULL);
JavaCalls::call_special(&result,
ik,
vmSymbols::object_initializer_name(),
vmSymbols::java_lang_management_ThreadInfo_constructor_signature(),
&args,
CHECK_NULL);
return (instanceOop) element();
}
instanceOop Management::create_thread_info_instance(ThreadSnapshot* snapshot,
objArrayHandle monitors_array,
typeArrayHandle depths_array,
objArrayHandle synchronizers_array,
TRAPS) {
Klass* k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
JavaValue result(T_VOID);
JavaCallArguments args(17);
Handle element = ik->allocate_instance_handle(CHECK_NULL);
args.push_oop(element);
initialize_ThreadInfo_constructor_arguments(&args, snapshot, CHECK_NULL);
args.push_oop(monitors_array);
args.push_oop(depths_array);
args.push_oop(synchronizers_array);
JavaCalls::call_special(&result,
ik,
vmSymbols::object_initializer_name(),
vmSymbols::java_lang_management_ThreadInfo_with_locks_constructor_signature(),
&args,
CHECK_NULL);
return (instanceOop) element();
}
static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) {
if (mgr == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
}
oop mgr_obj = JNIHandles::resolve(mgr);
instanceHandle h(THREAD, (instanceOop) mgr_obj);
Klass* k = Management::java_lang_management_GarbageCollectorMXBean_klass(CHECK_NULL);
if (!h->is_a(k)) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"the object is not an instance of java.lang.management.GarbageCollectorMXBean class",
NULL);
}
MemoryManager* gc = MemoryService::get_memory_manager(h);
if (gc == NULL || !gc->is_gc_memory_manager()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid GC memory manager",
NULL);
}
return (GCMemoryManager*) gc;
}
static MemoryPool* get_memory_pool_from_jobject(jobject obj, TRAPS) {
if (obj == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
}
oop pool_obj = JNIHandles::resolve(obj);
assert(pool_obj->is_instance(), "Should be an instanceOop");
instanceHandle ph(THREAD, (instanceOop) pool_obj);
return MemoryService::get_memory_pool(ph);
}
#endif // INCLUDE_MANAGEMENT
static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
int num_threads = ids_ah->length();
int i = 0;
for (i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
if (tid <= 0) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid thread ID entry");
}
}
}
#if INCLUDE_MANAGEMENT
static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
Klass* threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
Klass* element_klass = ObjArrayKlass::cast(infoArray_h->klass())->element_klass();
if (element_klass != threadinfo_klass) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"infoArray element type is not ThreadInfo class");
}
}
static MemoryManager* get_memory_manager_from_jobject(jobject obj, TRAPS) {
if (obj == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), NULL);
}
oop mgr_obj = JNIHandles::resolve(obj);
assert(mgr_obj->is_instance(), "Should be an instanceOop");
instanceHandle mh(THREAD, (instanceOop) mgr_obj);
return MemoryService::get_memory_manager(mh);
}
JVM_LEAF(jint, jmm_GetVersion(JNIEnv *env))
return JMM_VERSION;
JVM_END
JVM_LEAF(jint, jmm_GetOptionalSupport(JNIEnv *env, jmmOptionalSupport* support))
if (support == NULL) {
return -1;
}
Management::get_optional_support(support);
return 0;
JVM_END
JVM_ENTRY(jobject, jmm_GetInputArguments(JNIEnv *env))
ResourceMark rm(THREAD);
if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
return NULL;
}
char** vm_flags = Arguments::jvm_flags_array();
char** vm_args = Arguments::jvm_args_array();
int num_flags = Arguments::num_jvm_flags();
int num_args = Arguments::num_jvm_args();
size_t length = 1; // null terminator
int i;
for (i = 0; i < num_flags; i++) {
length += strlen(vm_flags[i]);
}
for (i = 0; i < num_args; i++) {
length += strlen(vm_args[i]);
}
length += num_flags + num_args - 1;
char* args = NEW_RESOURCE_ARRAY(char, length);
args[0] = '\0';
if (num_flags > 0) {
strcat(args, vm_flags[0]);
for (i = 1; i < num_flags; i++) {
strcat(args, " ");
strcat(args, vm_flags[i]);
}
}
if (num_args > 0 && num_flags > 0) {
strcat(args, " ");
}
if (num_args > 0) {
strcat(args, vm_args[0]);
for (i = 1; i < num_args; i++) {
strcat(args, " ");
strcat(args, vm_args[i]);
}
}
Handle hargs = java_lang_String::create_from_platform_dependent_str(args, CHECK_NULL);
return JNIHandles::make_local(env, hargs());
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetInputArgumentArray(JNIEnv *env))
ResourceMark rm(THREAD);
if (Arguments::num_jvm_args() == 0 && Arguments::num_jvm_flags() == 0) {
return NULL;
}
char** vm_flags = Arguments::jvm_flags_array();
char** vm_args = Arguments::jvm_args_array();
int num_flags = Arguments::num_jvm_flags();
int num_args = Arguments::num_jvm_args();
instanceKlassHandle ik (THREAD, SystemDictionary::String_klass());
objArrayOop r = oopFactory::new_objArray(ik(), num_args + num_flags, CHECK_NULL);
objArrayHandle result_h(THREAD, r);
int index = 0;
for (int j = 0; j < num_flags; j++, index++) {
Handle h = java_lang_String::create_from_platform_dependent_str(vm_flags[j], CHECK_NULL);
result_h->obj_at_put(index, h());
}
for (int i = 0; i < num_args; i++, index++) {
Handle h = java_lang_String::create_from_platform_dependent_str(vm_args[i], CHECK_NULL);
result_h->obj_at_put(index, h());
}
return (jobjectArray) JNIHandles::make_local(env, result_h());
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetMemoryPools(JNIEnv* env, jobject obj))
ResourceMark rm(THREAD);
int num_memory_pools;
MemoryManager* mgr = NULL;
if (obj == NULL) {
num_memory_pools = MemoryService::num_memory_pools();
} else {
mgr = get_memory_manager_from_jobject(obj, CHECK_NULL);
if (mgr == NULL) {
return NULL;
}
num_memory_pools = mgr->num_memory_pools();
}
Klass* k = Management::java_lang_management_MemoryPoolMXBean_klass(CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
objArrayOop r = oopFactory::new_objArray(ik(), num_memory_pools, CHECK_NULL);
objArrayHandle poolArray(THREAD, r);
if (mgr == NULL) {
for (int i = 0; i < num_memory_pools; i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
instanceOop p = pool->get_memory_pool_instance(CHECK_NULL);
instanceHandle ph(THREAD, p);
poolArray->obj_at_put(i, ph());
}
} else {
for (int i = 0; i < num_memory_pools; i++) {
MemoryPool* pool = mgr->get_memory_pool(i);
instanceOop p = pool->get_memory_pool_instance(CHECK_NULL);
instanceHandle ph(THREAD, p);
poolArray->obj_at_put(i, ph());
}
}
return (jobjectArray) JNIHandles::make_local(env, poolArray());
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetMemoryManagers(JNIEnv* env, jobject obj))
ResourceMark rm(THREAD);
int num_mgrs;
MemoryPool* pool = NULL;
if (obj == NULL) {
num_mgrs = MemoryService::num_memory_managers();
} else {
pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
if (pool == NULL) {
return NULL;
}
num_mgrs = pool->num_memory_managers();
}
Klass* k = Management::java_lang_management_MemoryManagerMXBean_klass(CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
objArrayOop r = oopFactory::new_objArray(ik(), num_mgrs, CHECK_NULL);
objArrayHandle mgrArray(THREAD, r);
if (pool == NULL) {
for (int i = 0; i < num_mgrs; i++) {
MemoryManager* mgr = MemoryService::get_memory_manager(i);
instanceOop p = mgr->get_memory_manager_instance(CHECK_NULL);
instanceHandle ph(THREAD, p);
mgrArray->obj_at_put(i, ph());
}
} else {
for (int i = 0; i < num_mgrs; i++) {
MemoryManager* mgr = pool->get_memory_manager(i);
instanceOop p = mgr->get_memory_manager_instance(CHECK_NULL);
instanceHandle ph(THREAD, p);
mgrArray->obj_at_put(i, ph());
}
}
return (jobjectArray) JNIHandles::make_local(env, mgrArray());
JVM_END
JVM_ENTRY(jobject, jmm_GetMemoryPoolUsage(JNIEnv* env, jobject obj))
ResourceMark rm(THREAD);
MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
if (pool != NULL) {
MemoryUsage usage = pool->get_memory_usage();
Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
return JNIHandles::make_local(env, h());
} else {
return NULL;
}
JVM_END
JVM_ENTRY(jobject, jmm_GetPeakMemoryPoolUsage(JNIEnv* env, jobject obj))
ResourceMark rm(THREAD);
MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
if (pool != NULL) {
MemoryUsage usage = pool->get_peak_memory_usage();
Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
return JNIHandles::make_local(env, h());
} else {
return NULL;
}
JVM_END
JVM_ENTRY(jobject, jmm_GetPoolCollectionUsage(JNIEnv* env, jobject obj))
ResourceMark rm(THREAD);
MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_NULL);
if (pool != NULL && pool->is_collected_pool()) {
MemoryUsage usage = pool->get_last_collection_usage();
Handle h = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
return JNIHandles::make_local(env, h());
} else {
return NULL;
}
JVM_END
JVM_ENTRY(void, jmm_SetPoolSensor(JNIEnv* env, jobject obj, jmmThresholdType type, jobject sensorObj))
if (obj == NULL || sensorObj == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
Klass* sensor_klass = Management::sun_management_Sensor_klass(CHECK);
oop s = JNIHandles::resolve(sensorObj);
assert(s->is_instance(), "Sensor should be an instanceOop");
instanceHandle sensor_h(THREAD, (instanceOop) s);
if (!sensor_h->is_a(sensor_klass)) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Sensor is not an instance of sun.management.Sensor class");
}
MemoryPool* mpool = get_memory_pool_from_jobject(obj, CHECK);
assert(mpool != NULL, "MemoryPool should exist");
switch (type) {
case JMM_USAGE_THRESHOLD_HIGH:
case JMM_USAGE_THRESHOLD_LOW:
mpool->set_usage_sensor_obj(sensor_h);
break;
case JMM_COLLECTION_USAGE_THRESHOLD_HIGH:
case JMM_COLLECTION_USAGE_THRESHOLD_LOW:
mpool->set_gc_usage_sensor_obj(sensor_h);
break;
default:
assert(false, "Unrecognized type");
}
JVM_END
JVM_ENTRY(jlong, jmm_SetPoolThreshold(JNIEnv* env, jobject obj, jmmThresholdType type, jlong threshold))
if (threshold < 0) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid threshold value",
-1);
}
if ((size_t)threshold > max_uintx) {
stringStream st;
st.print("Invalid valid threshold value. Threshold value (" UINT64_FORMAT ") > max value of size_t (" SIZE_FORMAT ")", (size_t)threshold, max_uintx);
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), st.as_string(), -1);
}
MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_(0L));
assert(pool != NULL, "MemoryPool should exist");
jlong prev = 0;
switch (type) {
case JMM_USAGE_THRESHOLD_HIGH:
if (!pool->usage_threshold()->is_high_threshold_supported()) {
return -1;
}
prev = pool->usage_threshold()->set_high_threshold((size_t) threshold);
break;
case JMM_USAGE_THRESHOLD_LOW:
if (!pool->usage_threshold()->is_low_threshold_supported()) {
return -1;
}
prev = pool->usage_threshold()->set_low_threshold((size_t) threshold);
break;
case JMM_COLLECTION_USAGE_THRESHOLD_HIGH:
if (!pool->gc_usage_threshold()->is_high_threshold_supported()) {
return -1;
}
return pool->gc_usage_threshold()->set_high_threshold((size_t) threshold);
case JMM_COLLECTION_USAGE_THRESHOLD_LOW:
if (!pool->gc_usage_threshold()->is_low_threshold_supported()) {
return -1;
}
return pool->gc_usage_threshold()->set_low_threshold((size_t) threshold);
default:
assert(false, "Unrecognized type");
return -1;
}
if (prev != threshold) {
LowMemoryDetector::recompute_enabled_for_collected_pools();
LowMemoryDetector::detect_low_memory(pool);
}
return prev;
JVM_END
JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
ResourceMark rm(THREAD);
size_t total_init = 0;
size_t total_used = 0;
size_t total_committed = 0;
size_t total_max = 0;
bool has_undefined_init_size = false;
bool has_undefined_max_size = false;
for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
if ((heap && pool->is_heap()) || (!heap && pool->is_non_heap())) {
MemoryUsage u = pool->get_memory_usage();
total_used += u.used();
total_committed += u.committed();
if (u.init_size() == (size_t)-1) {
has_undefined_init_size = true;
}
if (!has_undefined_init_size) {
total_init += u.init_size();
}
if (u.max_size() == (size_t)-1) {
has_undefined_max_size = true;
}
if (!has_undefined_max_size) {
total_max += u.max_size();
}
}
}
if (has_undefined_init_size) {
total_init = (size_t)-1;
}
if (has_undefined_max_size) {
total_max = (size_t)-1;
}
MemoryUsage usage((heap ? InitialHeapSize : total_init),
total_used,
total_committed,
(heap ? Universe::heap()->max_capacity() : total_max));
Handle obj = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
return JNIHandles::make_local(env, obj());
JVM_END
JVM_LEAF(jboolean, jmm_GetBoolAttribute(JNIEnv *env, jmmBoolAttribute att))
switch (att) {
case JMM_VERBOSE_GC:
return MemoryService::get_verbose();
case JMM_VERBOSE_CLASS:
return ClassLoadingService::get_verbose();
case JMM_THREAD_CONTENTION_MONITORING:
return ThreadService::is_thread_monitoring_contention();
case JMM_THREAD_CPU_TIME:
return ThreadService::is_thread_cpu_time_enabled();
case JMM_THREAD_ALLOCATED_MEMORY:
return ThreadService::is_thread_allocated_memory_enabled();
default:
assert(0, "Unrecognized attribute");
return false;
}
JVM_END
JVM_ENTRY(jboolean, jmm_SetBoolAttribute(JNIEnv *env, jmmBoolAttribute att, jboolean flag))
switch (att) {
case JMM_VERBOSE_GC:
return MemoryService::set_verbose(flag != 0);
case JMM_VERBOSE_CLASS:
return ClassLoadingService::set_verbose(flag != 0);
case JMM_THREAD_CONTENTION_MONITORING:
return ThreadService::set_thread_monitoring_contention(flag != 0);
case JMM_THREAD_CPU_TIME:
return ThreadService::set_thread_cpu_time_enabled(flag != 0);
case JMM_THREAD_ALLOCATED_MEMORY:
return ThreadService::set_thread_allocated_memory_enabled(flag != 0);
default:
assert(0, "Unrecognized attribute");
return false;
}
JVM_END
static jlong get_gc_attribute(GCMemoryManager* mgr, jmmLongAttribute att) {
switch (att) {
case JMM_GC_TIME_MS:
return mgr->gc_time_ms();
case JMM_GC_COUNT:
return mgr->gc_count();
case JMM_GC_EXT_ATTRIBUTE_INFO_SIZE:
return 1;
default:
assert(0, "Unrecognized GC attribute");
return -1;
}
}
class VmThreadCountClosure: public ThreadClosure {
private:
int _count;
public:
VmThreadCountClosure() : _count(0) {};
void do_thread(Thread* thread);
int count() { return _count; }
};
void VmThreadCountClosure::do_thread(Thread* thread) {
if (thread->is_Java_thread() && !thread->is_hidden_from_external_view()) {
return;
}
_count++;
}
static jint get_vm_thread_count() {
VmThreadCountClosure vmtcc;
{
MutexLockerEx ml(Threads_lock);
Threads::threads_do(&vmtcc);
}
return vmtcc.count();
}
static jint get_num_flags() {
int nFlags = (int) Flag::numFlags - 1;
int count = 0;
for (int i = 0; i < nFlags; i++) {
Flag* flag = &Flag::flags[i];
if (flag->is_unlocked() || flag->is_unlocker()) {
count++;
}
}
return count;
}
static jlong get_long_attribute(jmmLongAttribute att) {
switch (att) {
case JMM_CLASS_LOADED_COUNT:
return ClassLoadingService::loaded_class_count();
case JMM_CLASS_UNLOADED_COUNT:
return ClassLoadingService::unloaded_class_count();
case JMM_THREAD_TOTAL_COUNT:
return ThreadService::get_total_thread_count();
case JMM_THREAD_LIVE_COUNT:
return ThreadService::get_live_thread_count();
case JMM_THREAD_PEAK_COUNT:
return ThreadService::get_peak_thread_count();
case JMM_THREAD_DAEMON_COUNT:
return ThreadService::get_daemon_thread_count();
case JMM_JVM_INIT_DONE_TIME_MS:
return Management::vm_init_done_time();
case JMM_JVM_UPTIME_MS:
return Management::ticks_to_ms(os::elapsed_counter());
case JMM_COMPILE_TOTAL_TIME_MS:
return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
case JMM_OS_PROCESS_ID:
return os::current_process_id();
case JMM_CLASS_LOADED_BYTES:
return ClassLoadingService::loaded_class_bytes();
case JMM_CLASS_UNLOADED_BYTES:
return ClassLoadingService::unloaded_class_bytes();
case JMM_SHARED_CLASS_LOADED_COUNT:
return ClassLoadingService::loaded_shared_class_count();
case JMM_SHARED_CLASS_UNLOADED_COUNT:
return ClassLoadingService::unloaded_shared_class_count();
case JMM_SHARED_CLASS_LOADED_BYTES:
return ClassLoadingService::loaded_shared_class_bytes();
case JMM_SHARED_CLASS_UNLOADED_BYTES:
return ClassLoadingService::unloaded_shared_class_bytes();
case JMM_TOTAL_CLASSLOAD_TIME_MS:
return ClassLoader::classloader_time_ms();
case JMM_VM_GLOBAL_COUNT:
return get_num_flags();
case JMM_SAFEPOINT_COUNT:
return RuntimeService::safepoint_count();
case JMM_TOTAL_SAFEPOINTSYNC_TIME_MS:
return RuntimeService::safepoint_sync_time_ms();
case JMM_TOTAL_STOPPED_TIME_MS:
return RuntimeService::safepoint_time_ms();
case JMM_TOTAL_APP_TIME_MS:
return RuntimeService::application_time_ms();
case JMM_VM_THREAD_COUNT:
return get_vm_thread_count();
case JMM_CLASS_INIT_TOTAL_COUNT:
return ClassLoader::class_init_count();
case JMM_CLASS_INIT_TOTAL_TIME_MS:
return ClassLoader::class_init_time_ms();
case JMM_CLASS_VERIFY_TOTAL_TIME_MS:
return ClassLoader::class_verify_time_ms();
case JMM_METHOD_DATA_SIZE_BYTES:
return ClassLoadingService::class_method_data_size();
case JMM_OS_MEM_TOTAL_PHYSICAL_BYTES:
return os::physical_memory();
default:
return -1;
}
}
JVM_ENTRY(jlong, jmm_GetLongAttribute(JNIEnv *env, jobject obj, jmmLongAttribute att))
if (obj == NULL) {
return get_long_attribute(att);
} else {
GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK_(0L));
if (mgr != NULL) {
return get_gc_attribute(mgr, att);
}
}
return -1;
JVM_END
JVM_ENTRY(jint, jmm_GetLongAttributes(JNIEnv *env,
jobject obj,
jmmLongAttribute* atts,
jint count,
jlong* result))
int num_atts = 0;
if (obj == NULL) {
for (int i = 0; i < count; i++) {
result[i] = get_long_attribute(atts[i]);
if (result[i] != -1) {
num_atts++;
}
}
} else {
GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK_0);
for (int i = 0; i < count; i++) {
result[i] = get_gc_attribute(mgr, atts[i]);
if (result[i] != -1) {
num_atts++;
}
}
}
return num_atts;
JVM_END
static void do_thread_dump(ThreadDumpResult* dump_result,
typeArrayHandle ids_ah, // array of thread ID (long[])
int num_threads,
int max_depth,
bool with_locked_monitors,
bool with_locked_synchronizers,
TRAPS) {
if (num_threads == 0) return;
GrowableArray<instanceHandle>* thread_handle_array = new GrowableArray<instanceHandle>(num_threads);
{
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
thread_handle_array->append(threadObj_h);
}
}
VM_ThreadDump op(dump_result,
thread_handle_array,
num_threads,
max_depth, /* stack depth */
with_locked_monitors,
with_locked_synchronizers);
VMThread::execute(&op);
}
JVM_ENTRY(jint, jmm_GetThreadInfo(JNIEnv *env, jlongArray ids, jint maxDepth, jobjectArray infoArray))
if (ids == NULL || infoArray == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), -1);
}
if (maxDepth < -1) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid maxDepth", -1);
}
ResourceMark rm(THREAD);
typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
typeArrayHandle ids_ah(THREAD, ta);
oop infoArray_obj = JNIHandles::resolve_non_null(infoArray);
objArrayOop oa = objArrayOop(infoArray_obj);
objArrayHandle infoArray_h(THREAD, oa);
validate_thread_id_array(ids_ah, CHECK_0);
validate_thread_info_array(infoArray_h, CHECK_0);
int num_threads = ids_ah->length();
if (num_threads != infoArray_h->length()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"The length of the given ThreadInfo array does not match the length of the given array of thread IDs", -1);
}
if (JDK_Version::is_gte_jdk16x_version()) {
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_0);
}
ThreadDumpResult dump_result(num_threads);
if (maxDepth == 0) {
{
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
jlong tid = ids_ah->long_at(i);
JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
ThreadSnapshot* ts;
if (jt == NULL) {
ts = new ThreadSnapshot();
} else {
ts = new ThreadSnapshot(jt);
}
dump_result.add_thread_snapshot(ts);
}
}
} else {
do_thread_dump(&dump_result,
ids_ah,
num_threads,
maxDepth,
false, /* no locked monitor */
false, /* no locked synchronizers */
CHECK_0);
}
int num_snapshots = dump_result.num_snapshots();
assert(num_snapshots == num_threads, "Must match the number of thread snapshots");
int index = 0;
for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; index++, ts = ts->next()) {
if (ts->threadObj() == NULL) {
infoArray_h->obj_at_put(index, NULL);
continue;
}
instanceOop info_obj = Management::create_thread_info_instance(ts, CHECK_0);
infoArray_h->obj_at_put(index, info_obj);
}
return 0;
JVM_END
JVM_ENTRY(jobjectArray, jmm_DumpThreadsMaxDepth(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors,
jboolean locked_synchronizers, jint maxDepth))
ResourceMark rm(THREAD);
if (JDK_Version::is_gte_jdk16x_version()) {
java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(CHECK_NULL);
}
typeArrayOop ta = typeArrayOop(JNIHandles::resolve(thread_ids));
int num_threads = (ta != NULL ? ta->length() : 0);
typeArrayHandle ids_ah(THREAD, ta);
ThreadDumpResult dump_result(num_threads); // can safepoint
if (ids_ah() != NULL) {
validate_thread_id_array(ids_ah, CHECK_NULL);
do_thread_dump(&dump_result,
ids_ah,
num_threads,
maxDepth, /* stack depth */
(locked_monitors ? true : false), /* with locked monitors */
(locked_synchronizers ? true : false), /* with locked synchronizers */
CHECK_NULL);
} else {
VM_ThreadDump op(&dump_result,
maxDepth, /* stack depth */
(locked_monitors ? true : false), /* with locked monitors */
(locked_synchronizers ? true : false) /* with locked synchronizers */);
VMThread::execute(&op);
}
int num_snapshots = dump_result.num_snapshots();
Klass* k = Management::java_lang_management_ThreadInfo_klass(CHECK_NULL);
instanceKlassHandle ik (THREAD, k);
objArrayOop r = oopFactory::new_objArray(ik(), num_snapshots, CHECK_NULL);
objArrayHandle result_h(THREAD, r);
int index = 0;
for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; ts = ts->next(), index++) {
if (ts->threadObj() == NULL) {
result_h->obj_at_put(index, NULL);
continue;
}
ThreadStackTrace* stacktrace = ts->get_stack_trace();
assert(stacktrace != NULL, "Must have a stack trace dumped");
int num_frames = stacktrace->get_stack_depth();
int num_locked_monitors = stacktrace->num_jni_locked_monitors();
for (int i = 0; i < num_frames; i++) {
StackFrameInfo* frame = stacktrace->stack_frame_at(i);
num_locked_monitors += frame->num_locked_monitors();
}
objArrayHandle monitors_array;
typeArrayHandle depths_array;
objArrayHandle synchronizers_array;
if (locked_monitors) {
objArrayOop array = oopFactory::new_objArray(SystemDictionary::Object_klass(), num_locked_monitors, CHECK_NULL);
objArrayHandle mh(THREAD, array);
monitors_array = mh;
typeArrayOop tarray = oopFactory::new_typeArray(T_INT, num_locked_monitors, CHECK_NULL);
typeArrayHandle dh(THREAD, tarray);
depths_array = dh;
int count = 0;
int j = 0;
for (int depth = 0; depth < num_frames; depth++) {
StackFrameInfo* frame = stacktrace->stack_frame_at(depth);
int len = frame->num_locked_monitors();
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
for (j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
monitors_array->obj_at_put(count, monitor);
depths_array->int_at_put(count, depth);
count++;
}
}
GrowableArray<oop>* jni_locked_monitors = stacktrace->jni_locked_monitors();
for (j = 0; j < jni_locked_monitors->length(); j++) {
oop object = jni_locked_monitors->at(j);
assert(object != NULL && object->is_instance(), "must be a Java object");
monitors_array->obj_at_put(count, object);
depths_array->int_at_put(count, -1);
count++;
}
assert(count == num_locked_monitors, "number of locked monitors doesn't match");
}
if (locked_synchronizers) {
assert(ts->threadObj() != NULL, "Must be a valid JavaThread");
ThreadConcurrentLocks* tcl = ts->get_concurrent_locks();
GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
int num_locked_synchronizers = (locks != NULL ? locks->length() : 0);
objArrayOop array = oopFactory::new_objArray(SystemDictionary::Object_klass(), num_locked_synchronizers, CHECK_NULL);
objArrayHandle sh(THREAD, array);
synchronizers_array = sh;
for (int k = 0; k < num_locked_synchronizers; k++) {
synchronizers_array->obj_at_put(k, locks->at(k));
}
}
instanceOop info_obj = Management::create_thread_info_instance(ts,
monitors_array,
depths_array,
synchronizers_array,
CHECK_NULL);
result_h->obj_at_put(index, info_obj);
}
return (jobjectArray) JNIHandles::make_local(env, result_h());
JVM_END
JVM_ENTRY(jobjectArray, jmm_DumpThreads(JNIEnv *env, jlongArray thread_ids, jboolean locked_monitors,
jboolean locked_synchronizers))
return jmm_DumpThreadsMaxDepth(env, thread_ids, locked_monitors, locked_synchronizers, INT_MAX);
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetLoadedClasses(JNIEnv *env))
ResourceMark rm(THREAD);
LoadedClassesEnumerator lce(THREAD); // Pass current Thread as parameter
int num_classes = lce.num_loaded_classes();
objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), num_classes, CHECK_0);
objArrayHandle classes_ah(THREAD, r);
for (int i = 0; i < num_classes; i++) {
KlassHandle kh = lce.get_klass(i);
oop mirror = kh()->java_mirror();
classes_ah->obj_at_put(i, mirror);
}
return (jobjectArray) JNIHandles::make_local(env, classes_ah());
JVM_END
JVM_ENTRY(jboolean, jmm_ResetStatistic(JNIEnv *env, jvalue obj, jmmStatisticType type))
ResourceMark rm(THREAD);
switch (type) {
case JMM_STAT_PEAK_THREAD_COUNT:
ThreadService::reset_peak_thread_count();
return true;
case JMM_STAT_THREAD_CONTENTION_COUNT:
case JMM_STAT_THREAD_CONTENTION_TIME: {
jlong tid = obj.j;
if (tid < 0) {
THROW_(vmSymbols::java_lang_IllegalArgumentException(), JNI_FALSE);
}
MutexLockerEx ml(Threads_lock);
if (tid == 0) {
for (JavaThread* java_thread = Threads::first(); java_thread != NULL; java_thread = java_thread->next()) {
if (type == JMM_STAT_THREAD_CONTENTION_COUNT) {
ThreadService::reset_contention_count_stat(java_thread);
} else {
ThreadService::reset_contention_time_stat(java_thread);
}
}
} else {
JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid);
if (java_thread == NULL) {
return false;
}
if (type == JMM_STAT_THREAD_CONTENTION_COUNT) {
ThreadService::reset_contention_count_stat(java_thread);
} else {
ThreadService::reset_contention_time_stat(java_thread);
}
}
return true;
break;
}
case JMM_STAT_PEAK_POOL_USAGE: {
jobject o = obj.l;
if (o == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
}
oop pool_obj = JNIHandles::resolve(o);
assert(pool_obj->is_instance(), "Should be an instanceOop");
instanceHandle ph(THREAD, (instanceOop) pool_obj);
MemoryPool* pool = MemoryService::get_memory_pool(ph);
if (pool != NULL) {
pool->reset_peak_memory_usage();
return true;
}
break;
}
case JMM_STAT_GC_STAT: {
jobject o = obj.l;
if (o == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
}
GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(o, CHECK_0);
if (mgr != NULL) {
mgr->reset_gc_stat();
return true;
}
break;
}
default:
assert(0, "Unknown Statistic Type");
}
return false;
JVM_END
JVM_ENTRY(jlong, jmm_GetThreadCpuTime(JNIEnv *env, jlong thread_id))
if (!os::is_thread_cpu_time_supported()) {
return -1;
}
if (thread_id < 0) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid thread ID", -1);
}
JavaThread* java_thread = NULL;
if (thread_id == 0) {
return os::current_thread_cpu_time();
} else {
MutexLockerEx ml(Threads_lock);
java_thread = Threads::find_java_thread_from_java_tid(thread_id);
if (java_thread != NULL) {
return os::thread_cpu_time((Thread*) java_thread);
}
}
return -1;
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
int nFlags = (int) Flag::numFlags - 1;
objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
nFlags, CHECK_0);
objArrayHandle flags_ah(THREAD, r);
int num_entries = 0;
for (int i = 0; i < nFlags; i++) {
Flag* flag = &Flag::flags[i];
if (flag->is_constant_in_binary()) {
continue;
}
if (flag->is_unlocked() || flag->is_unlocker()) {
Handle s = java_lang_String::create_from_str(flag->_name, CHECK_0);
flags_ah->obj_at_put(num_entries, s());
num_entries++;
}
}
if (num_entries < nFlags) {
objArrayOop res = oopFactory::new_objArray(SystemDictionary::String_klass(), num_entries, CHECK_0);
for(int i = 0; i < num_entries; i++) {
res->obj_at_put(i, flags_ah->obj_at(i));
}
return (jobjectArray)JNIHandles::make_local(env, res);
}
return (jobjectArray)JNIHandles::make_local(env, flags_ah());
JVM_END
bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
Handle flag_name;
if (name() == NULL) {
flag_name = java_lang_String::create_from_str(flag->_name, CHECK_false);
} else {
flag_name = name;
}
global->name = (jstring)JNIHandles::make_local(env, flag_name());
if (flag->is_bool()) {
global->value.z = flag->get_bool() ? JNI_TRUE : JNI_FALSE;
global->type = JMM_VMGLOBAL_TYPE_JBOOLEAN;
} else if (flag->is_intx()) {
global->value.j = (jlong)flag->get_intx();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
} else if (flag->is_uintx()) {
global->value.j = (jlong)flag->get_uintx();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
} else if (flag->is_uint64_t()) {
global->value.j = (jlong)flag->get_uint64_t();
global->type = JMM_VMGLOBAL_TYPE_JLONG;
} else if (flag->is_double()) {
global->value.d = (jdouble)flag->get_double();
global->type = JMM_VMGLOBAL_TYPE_JDOUBLE;
} else if (flag->is_ccstr()) {
Handle str = java_lang_String::create_from_str(flag->get_ccstr(), CHECK_false);
global->value.l = (jobject)JNIHandles::make_local(env, str());
global->type = JMM_VMGLOBAL_TYPE_JSTRING;
} else {
global->type = JMM_VMGLOBAL_TYPE_UNKNOWN;
return false;
}
global->writeable = flag->is_writeable();
global->external = flag->is_external();
switch (flag->get_origin()) {
case Flag::DEFAULT:
global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
break;
case Flag::COMMAND_LINE:
global->origin = JMM_VMGLOBAL_ORIGIN_COMMAND_LINE;
break;
case Flag::ENVIRON_VAR:
global->origin = JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR;
break;
case Flag::CONFIG_FILE:
global->origin = JMM_VMGLOBAL_ORIGIN_CONFIG_FILE;
break;
case Flag::MANAGEMENT:
global->origin = JMM_VMGLOBAL_ORIGIN_MANAGEMENT;
break;
case Flag::ERGONOMIC:
global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
break;
default:
global->origin = JMM_VMGLOBAL_ORIGIN_OTHER;
}
return true;
}
JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env,
jobjectArray names,
jmmVMGlobal *globals,
jint count))
if (globals == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), 0);
}
ResourceMark rm(THREAD);
if (names != NULL) {
objArrayOop ta = objArrayOop(JNIHandles::resolve_non_null(names));
objArrayHandle names_ah(THREAD, ta);
Klass* element_klass = ObjArrayKlass::cast(names_ah->klass())->element_klass();
if (element_klass != SystemDictionary::String_klass()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Array element type is not String class", 0);
}
int names_length = names_ah->length();
int num_entries = 0;
for (int i = 0; i < names_length && i < count; i++) {
oop s = names_ah->obj_at(i);
if (s == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), 0);
}
Handle sh(THREAD, s);
char* str = java_lang_String::as_utf8_string(s);
Flag* flag = Flag::find_flag(str, strlen(str));
if (flag != NULL &&
add_global_entry(env, sh, &globals[i], flag, THREAD)) {
num_entries++;
} else {
globals[i].name = NULL;
}
}
return num_entries;
} else {
int nFlags = (int) Flag::numFlags - 1;
Handle null_h;
int num_entries = 0;
for (int i = 0; i < nFlags && num_entries < count; i++) {
Flag* flag = &Flag::flags[i];
if (flag->is_constant_in_binary()) {
continue;
}
if ((flag->is_unlocked() || flag->is_unlocker()) &&
add_global_entry(env, null_h, &globals[num_entries], flag, THREAD)) {
num_entries++;
}
}
return num_entries;
}
JVM_END
JVM_ENTRY(void, jmm_SetVMGlobal(JNIEnv *env, jstring flag_name, jvalue new_value))
ResourceMark rm(THREAD);
oop fn = JNIHandles::resolve_external_guard(flag_name);
if (fn == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"The flag name cannot be null.");
}
char* name = java_lang_String::as_utf8_string(fn);
Flag* flag = Flag::find_flag(name, strlen(name));
if (flag == NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Flag does not exist.");
}
if (!flag->is_writeable()) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"This flag is not writeable.");
}
bool succeed = false;
if (flag->is_bool()) {
bool bvalue = (new_value.z == JNI_TRUE ? true : false);
succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT);
} else if (flag->is_intx()) {
intx ivalue = (intx)new_value.j;
succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT);
} else if (flag->is_uintx()) {
uintx uvalue = (uintx)new_value.j;
if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
FormatBuffer<80> err_msg("%s", "");
if (!Arguments::verify_MaxHeapFreeRatio(err_msg, uvalue)) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
}
} else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) {
FormatBuffer<80> err_msg("%s", "");
if (!Arguments::verify_MinHeapFreeRatio(err_msg, uvalue)) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
}
}
succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT);
} else if (flag->is_uint64_t()) {
uint64_t uvalue = (uint64_t)new_value.j;
succeed = CommandLineFlags::uint64_tAtPut(name, &uvalue, Flag::MANAGEMENT);
} else if (flag->is_ccstr()) {
oop str = JNIHandles::resolve_external_guard(new_value.l);
if (str == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
ccstr svalue = java_lang_String::as_utf8_string(str);
succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT);
if (succeed) {
FREE_C_HEAP_ARRAY(char, svalue, mtInternal);
}
}
assert(succeed, "Setting flag should succeed");
JVM_END
class ThreadTimesClosure: public ThreadClosure {
private:
objArrayHandle _names_strings;
char **_names_chars;
typeArrayHandle _times;
int _names_len;
int _times_len;
int _count;
public:
ThreadTimesClosure(objArrayHandle names, typeArrayHandle times);
~ThreadTimesClosure();
virtual void do_thread(Thread* thread);
void do_unlocked();
int count() { return _count; }
};
ThreadTimesClosure::ThreadTimesClosure(objArrayHandle names,
typeArrayHandle times) {
assert(names() != NULL, "names was NULL");
assert(times() != NULL, "times was NULL");
_names_strings = names;
_names_len = names->length();
_names_chars = NEW_C_HEAP_ARRAY(char*, _names_len, mtInternal);
_times = times;
_times_len = times->length();
_count = 0;
}
void ThreadTimesClosure::do_thread(Thread* thread) {
assert(thread != NULL, "thread was NULL");
if (thread->is_Java_thread() && !thread->is_hidden_from_external_view()) {
return;
}
if (_count >= _names_len || _count >= _times_len) {
return;
}
EXCEPTION_MARK;
ResourceMark rm(THREAD); // thread->name() uses ResourceArea
assert(thread->name() != NULL, "All threads should have a name");
_names_chars[_count] = strdup(thread->name());
_times->long_at_put(_count, os::is_thread_cpu_time_supported() ?
os::thread_cpu_time(thread) : -1);
_count++;
}
void ThreadTimesClosure::do_unlocked() {
EXCEPTION_MARK;
for (int i = 0; i < _count; i++) {
Handle s = java_lang_String::create_from_str(_names_chars[i], CHECK);
_names_strings->obj_at_put(i, s());
}
}
ThreadTimesClosure::~ThreadTimesClosure() {
for (int i = 0; i < _count; i++) {
free(_names_chars[i]);
}
FREE_C_HEAP_ARRAY(char *, _names_chars, mtInternal);
}
JVM_ENTRY(jint, jmm_GetInternalThreadTimes(JNIEnv *env,
jobjectArray names,
jlongArray times))
if (names == NULL || times == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), 0);
}
objArrayOop na = objArrayOop(JNIHandles::resolve_non_null(names));
objArrayHandle names_ah(THREAD, na);
Klass* element_klass = ObjArrayKlass::cast(names_ah->klass())->element_klass();
if (element_klass != SystemDictionary::String_klass()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Array element type is not String class", 0);
}
typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(times));
typeArrayHandle times_ah(THREAD, ta);
ThreadTimesClosure ttc(names_ah, times_ah);
{
MutexLockerEx ml(Threads_lock);
Threads::threads_do(&ttc);
}
ttc.do_unlocked();
return ttc.count();
JVM_END
static Handle find_deadlocks(bool object_monitors_only, TRAPS) {
ResourceMark rm(THREAD);
VM_FindDeadlocks op(!object_monitors_only /* also check concurrent locks? */);
VMThread::execute(&op);
DeadlockCycle* deadlocks = op.result();
if (deadlocks == NULL) {
return Handle();
}
int num_threads = 0;
DeadlockCycle* cycle;
for (cycle = deadlocks; cycle != NULL; cycle = cycle->next()) {
num_threads += cycle->num_threads();
}
objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NH);
objArrayHandle threads_ah(THREAD, r);
int index = 0;
for (cycle = deadlocks; cycle != NULL; cycle = cycle->next()) {
GrowableArray<JavaThread*>* deadlock_threads = cycle->threads();
int len = deadlock_threads->length();
for (int i = 0; i < len; i++) {
threads_ah->obj_at_put(index, deadlock_threads->at(i)->threadObj());
index++;
}
}
return threads_ah;
}
JVM_ENTRY(jobjectArray, jmm_FindDeadlockedThreads(JNIEnv *env, jboolean object_monitors_only))
Handle result = find_deadlocks(object_monitors_only != 0, CHECK_0);
return (jobjectArray) JNIHandles::make_local(env, result());
JVM_END
JVM_ENTRY(jobjectArray, jmm_FindMonitorDeadlockedThreads(JNIEnv *env))
Handle result = find_deadlocks(true, CHECK_0);
return (jobjectArray) JNIHandles::make_local(env, result());
JVM_END
JVM_ENTRY(jint, jmm_GetGCExtAttributeInfo(JNIEnv *env, jobject mgr, jmmExtAttributeInfo* info, jint count))
if (count == 0) {
return 0;
}
if (info == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), 0);
}
info[0].name = "GcThreadCount";
info[0].type = 'I';
info[0].description = "Number of GC threads";
return 1;
JVM_END
static objArrayOop get_memory_usage_objArray(jobjectArray array, int length, TRAPS) {
if (array == NULL) {
THROW_(vmSymbols::java_lang_NullPointerException(), 0);
}
objArrayOop oa = objArrayOop(JNIHandles::resolve_non_null(array));
objArrayHandle array_h(THREAD, oa);
if (length != array_h->length()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"The length of the given MemoryUsage array does not match the number of memory pools.", 0);
}
Klass* usage_klass = Management::java_lang_management_MemoryUsage_klass(CHECK_0);
Klass* element_klass = ObjArrayKlass::cast(array_h->klass())->element_klass();
if (element_klass != usage_klass) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"The element type is not MemoryUsage class", 0);
}
return array_h();
}
JVM_ENTRY(void, jmm_GetLastGCStat(JNIEnv *env, jobject obj, jmmGCStat *gc_stat))
ResourceMark rm(THREAD);
if (gc_stat->gc_ext_attribute_values_size > 0 && gc_stat->gc_ext_attribute_values == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK);
int num_pools = MemoryService::num_memory_pools();
GCStatInfo stat(num_pools);
if (mgr->get_last_gc_stat(&stat) == 0) {
gc_stat->gc_index = 0;
return;
}
gc_stat->gc_index = stat.gc_index();
gc_stat->start_time = Management::ticks_to_ms(stat.start_time());
gc_stat->end_time = Management::ticks_to_ms(stat.end_time());
gc_stat->num_gc_ext_attributes = 0;
objArrayOop bu = get_memory_usage_objArray(gc_stat->usage_before_gc,
num_pools,
CHECK);
objArrayHandle usage_before_gc_ah(THREAD, bu);
objArrayOop au = get_memory_usage_objArray(gc_stat->usage_after_gc,
num_pools,
CHECK);
objArrayHandle usage_after_gc_ah(THREAD, au);
for (int i = 0; i < num_pools; i++) {
Handle before_usage = MemoryService::create_MemoryUsage_obj(stat.before_gc_usage_for_pool(i), CHECK);
Handle after_usage;
MemoryUsage u = stat.after_gc_usage_for_pool(i);
if (u.max_size() == 0 && u.used() > 0) {
MemoryUsage usage(u.init_size(), u.used(), u.committed(), (size_t)-1);
after_usage = MemoryService::create_MemoryUsage_obj(usage, CHECK);
} else {
after_usage = MemoryService::create_MemoryUsage_obj(stat.after_gc_usage_for_pool(i), CHECK);
}
usage_before_gc_ah->obj_at_put(i, before_usage());
usage_after_gc_ah->obj_at_put(i, after_usage());
}
if (gc_stat->gc_ext_attribute_values_size > 0) {
gc_stat->gc_ext_attribute_values[0].i = mgr->num_gc_threads();
}
JVM_END
JVM_ENTRY(void, jmm_SetGCNotificationEnabled(JNIEnv *env, jobject obj, jboolean enabled))
ResourceMark rm(THREAD);
GCMemoryManager* mgr = get_gc_memory_manager_from_jobject(obj, CHECK);
mgr->set_notification_enabled(enabled?true:false);
JVM_END
JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
#if INCLUDE_SERVICES
ResourceMark rm(THREAD);
oop on = JNIHandles::resolve_external_guard(outputfile);
if (on == NULL) {
THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
"Output file name cannot be null.", -1);
}
char* name = java_lang_String::as_platform_dependent_str(on, CHECK_(-1));
if (name == NULL) {
THROW_MSG_(vmSymbols::java_lang_NullPointerException(),
"Output file name cannot be null.", -1);
}
HeapDumper dumper(live ? true : false);
if (dumper.dump(name) != 0) {
const char* errmsg = dumper.error_as_C_string();
THROW_MSG_(vmSymbols::java_io_IOException(), errmsg, -1);
}
return 0;
#else // INCLUDE_SERVICES
return -1;
#endif // INCLUDE_SERVICES
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetDiagnosticCommands(JNIEnv *env))
ResourceMark rm(THREAD);
GrowableArray<const char *>* dcmd_list = DCmdFactory::DCmd_list(DCmd_Source_MBean);
objArrayOop cmd_array_oop = oopFactory::new_objArray(SystemDictionary::String_klass(),
dcmd_list->length(), CHECK_NULL);
objArrayHandle cmd_array(THREAD, cmd_array_oop);
for (int i = 0; i < dcmd_list->length(); i++) {
oop cmd_name = java_lang_String::create_oop_from_str(dcmd_list->at(i), CHECK_NULL);
cmd_array->obj_at_put(i, cmd_name);
}
return (jobjectArray) JNIHandles::make_local(env, cmd_array());
JVM_END
JVM_ENTRY(void, jmm_GetDiagnosticCommandInfo(JNIEnv *env, jobjectArray cmds,
dcmdInfo* infoArray))
if (cmds == NULL || infoArray == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
ResourceMark rm(THREAD);
objArrayOop ca = objArrayOop(JNIHandles::resolve_non_null(cmds));
objArrayHandle cmds_ah(THREAD, ca);
Klass* element_klass = ObjArrayKlass::cast(cmds_ah->klass())->element_klass();
if (element_klass != SystemDictionary::String_klass()) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Array element type is not String class");
}
GrowableArray<DCmdInfo *>* info_list = DCmdFactory::DCmdInfo_list(DCmd_Source_MBean);
int num_cmds = cmds_ah->length();
for (int i = 0; i < num_cmds; i++) {
oop cmd = cmds_ah->obj_at(i);
if (cmd == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"Command name cannot be null.");
}
char* cmd_name = java_lang_String::as_utf8_string(cmd);
if (cmd_name == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"Command name cannot be null.");
}
int pos = info_list->find((void*)cmd_name,DCmdInfo::by_name);
if (pos == -1) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Unknown diagnostic command");
}
DCmdInfo* info = info_list->at(pos);
infoArray[i].name = info->name();
infoArray[i].description = info->description();
infoArray[i].impact = info->impact();
JavaPermission p = info->permission();
infoArray[i].permission_class = p._class;
infoArray[i].permission_name = p._name;
infoArray[i].permission_action = p._action;
infoArray[i].num_arguments = info->num_arguments();
infoArray[i].enabled = info->is_enabled();
}
JVM_END
JVM_ENTRY(void, jmm_GetDiagnosticCommandArgumentsInfo(JNIEnv *env,
jstring command, dcmdArgInfo* infoArray))
ResourceMark rm(THREAD);
oop cmd = JNIHandles::resolve_external_guard(command);
if (cmd == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"Command line cannot be null.");
}
char* cmd_name = java_lang_String::as_utf8_string(cmd);
if (cmd_name == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"Command line content cannot be null.");
}
DCmd* dcmd = NULL;
DCmdFactory*factory = DCmdFactory::factory(DCmd_Source_MBean, cmd_name,
strlen(cmd_name));
if (factory != NULL) {
dcmd = factory->create_resource_instance(NULL);
}
if (dcmd == NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Unknown diagnostic command");
}
DCmdMark mark(dcmd);
GrowableArray<DCmdArgumentInfo*>* array = dcmd->argument_info_array();
if (array->length() == 0) {
return;
}
for (int i = 0; i < array->length(); i++) {
infoArray[i].name = array->at(i)->name();
infoArray[i].description = array->at(i)->description();
infoArray[i].type = array->at(i)->type();
infoArray[i].default_string = array->at(i)->default_string();
infoArray[i].mandatory = array->at(i)->is_mandatory();
infoArray[i].option = array->at(i)->is_option();
infoArray[i].multiple = array->at(i)->is_multiple();
infoArray[i].position = array->at(i)->position();
}
return;
JVM_END
JVM_ENTRY(jstring, jmm_ExecuteDiagnosticCommand(JNIEnv *env, jstring commandline))
ResourceMark rm(THREAD);
oop cmd = JNIHandles::resolve_external_guard(commandline);
if (cmd == NULL) {
THROW_MSG_NULL(vmSymbols::java_lang_NullPointerException(),
"Command line cannot be null.");
}
char* cmdline = java_lang_String::as_utf8_string(cmd);
if (cmdline == NULL) {
THROW_MSG_NULL(vmSymbols::java_lang_NullPointerException(),
"Command line content cannot be null.");
}
bufferedStream output;
DCmd::parse_and_execute(DCmd_Source_MBean, &output, cmdline, ' ', CHECK_NULL);
oop result = java_lang_String::create_oop_from_str(output.as_string(), CHECK_NULL);
return (jstring) JNIHandles::make_local(env, result);
JVM_END
JVM_ENTRY(void, jmm_SetDiagnosticFrameworkNotificationEnabled(JNIEnv *env, jboolean enabled))
DCmdFactory::set_jmx_notification_enabled(enabled?true:false);
JVM_END
jlong Management::ticks_to_ms(jlong ticks) {
assert(os::elapsed_frequency() > 0, "Must be non-zero");
return (jlong)(((double)ticks / (double)os::elapsed_frequency())
}
#endif // INCLUDE_MANAGEMENT
JVM_ENTRY(jlong, jmm_GetOneThreadAllocatedMemory(JNIEnv *env, jlong thread_id))
if (thread_id < 0) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid thread ID", -1);
}
if (thread_id == 0) {
if (THREAD->is_Java_thread()) {
return ((JavaThread*)THREAD)->cooked_allocated_bytes();
}
return -1;
}
MutexLockerEx ml(Threads_lock);
JavaThread* java_thread = Threads::find_java_thread_from_java_tid(thread_id);
if (java_thread != NULL) {
return java_thread->cooked_allocated_bytes();
}
return -1;
JVM_END
JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
jlongArray sizeArray))
if (ids == NULL || sizeArray == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
ResourceMark rm(THREAD);
typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
typeArrayHandle ids_ah(THREAD, ta);
typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
typeArrayHandle sizeArray_h(THREAD, sa);
validate_thread_id_array(ids_ah, CHECK);
int num_threads = ids_ah->length();
if (num_threads != sizeArray_h->length()) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"The length of the given long array does not match the length of "
"the given array of thread IDs");
}
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
if (java_thread != NULL) {
sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
}
}
JVM_END
JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
if (!os::is_thread_cpu_time_supported()) {
return -1;
}
if (thread_id < 0) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid thread ID", -1);
}
JavaThread* java_thread = NULL;
if (thread_id == 0) {
return os::current_thread_cpu_time(user_sys_cpu_time != 0);
} else {
MutexLockerEx ml(Threads_lock);
java_thread = Threads::find_java_thread_from_java_tid(thread_id);
if (java_thread != NULL) {
return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
}
}
return -1;
JVM_END
JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
jlongArray timeArray,
jboolean user_sys_cpu_time))
if (ids == NULL || timeArray == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
ResourceMark rm(THREAD);
typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
typeArrayHandle ids_ah(THREAD, ta);
typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
typeArrayHandle timeArray_h(THREAD, tia);
validate_thread_id_array(ids_ah, CHECK);
int num_threads = ids_ah->length();
if (num_threads != timeArray_h->length()) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"The length of the given long array does not match the length of "
"the given array of thread IDs");
}
MutexLockerEx ml(Threads_lock);
for (int i = 0; i < num_threads; i++) {
JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
if (java_thread != NULL) {
timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
user_sys_cpu_time != 0));
}
}
JVM_END
#if INCLUDE_MANAGEMENT
const struct jmmInterface_1_ jmm_interface = {
NULL,
jmm_GetOneThreadAllocatedMemory,
jmm_GetVersion,
jmm_GetOptionalSupport,
jmm_GetInputArguments,
jmm_GetThreadInfo,
jmm_GetInputArgumentArray,
jmm_GetMemoryPools,
jmm_GetMemoryManagers,
jmm_GetMemoryPoolUsage,
jmm_GetPeakMemoryPoolUsage,
jmm_GetThreadAllocatedMemory,
jmm_GetMemoryUsage,
jmm_GetLongAttribute,
jmm_GetBoolAttribute,
jmm_SetBoolAttribute,
jmm_GetLongAttributes,
jmm_FindMonitorDeadlockedThreads,
jmm_GetThreadCpuTime,
jmm_GetVMGlobalNames,
jmm_GetVMGlobals,
jmm_GetInternalThreadTimes,
jmm_ResetStatistic,
jmm_SetPoolSensor,
jmm_SetPoolThreshold,
jmm_GetPoolCollectionUsage,
jmm_GetGCExtAttributeInfo,
jmm_GetLastGCStat,
jmm_GetThreadCpuTimeWithKind,
jmm_GetThreadCpuTimesWithKind,
jmm_DumpHeap0,
jmm_FindDeadlockedThreads,
jmm_SetVMGlobal,
jmm_DumpThreadsMaxDepth,
jmm_DumpThreads,
jmm_SetGCNotificationEnabled,
jmm_GetDiagnosticCommands,
jmm_GetDiagnosticCommandInfo,
jmm_GetDiagnosticCommandArgumentsInfo,
jmm_ExecuteDiagnosticCommand,
jmm_SetDiagnosticFrameworkNotificationEnabled
};
#endif // INCLUDE_MANAGEMENT
void* Management::get_jmm_interface(int version) {
#if INCLUDE_MANAGEMENT
if (version == JMM_VERSION_1_0) {
return (void*) &jmm_interface;
}
#endif // INCLUDE_MANAGEMENT
return NULL;
}
C:\hotspot-69087d08d473\src\share\vm/services/management.hpp
#ifndef SHARE_VM_SERVICES_MANAGEMENT_HPP
#define SHARE_VM_SERVICES_MANAGEMENT_HPP
#include "memory/allocation.hpp"
#include "runtime/handles.hpp"
#include "runtime/timer.hpp"
#include "services/jmm.h"
class OopClosure;
class ThreadSnapshot;
class Management : public AllStatic {
private:
static PerfVariable* _begin_vm_creation_time;
static PerfVariable* _end_vm_creation_time;
static PerfVariable* _vm_init_done_time;
static jmmOptionalSupport _optional_support;
static TimeStamp _stamp; // Timestamp since vm init done time
static Klass* _sensor_klass;
static Klass* _threadInfo_klass;
static Klass* _memoryUsage_klass;
static Klass* _memoryPoolMXBean_klass;
static Klass* _memoryManagerMXBean_klass;
static Klass* _garbageCollectorMXBean_klass;
static Klass* _managementFactory_klass;
static Klass* _garbageCollectorImpl_klass;
static Klass* _diagnosticCommandImpl_klass;
static Klass* _managementFactoryHelper_klass;
static Klass* _gcInfo_klass;
static Klass* load_and_initialize_klass(Symbol* sh, TRAPS);
public:
static void init();
static void initialize(TRAPS);
static jlong ticks_to_ms(jlong ticks) NOT_MANAGEMENT_RETURN_(0L);
static jlong timestamp() NOT_MANAGEMENT_RETURN_(0L);
static void oops_do(OopClosure* f) NOT_MANAGEMENT_RETURN;
static void* get_jmm_interface(int version);
static void get_optional_support(jmmOptionalSupport* support);
static void get_loaded_classes(JavaThread* cur_thread, GrowableArray<KlassHandle>* klass_handle_array);
static void record_vm_startup_time(jlong begin, jlong duration)
NOT_MANAGEMENT_RETURN;
static void record_vm_init_completed() {
_vm_init_done_time->set_value(os::javaTimeMillis());
_stamp.update();
}
static jlong begin_vm_creation_time() {
return _begin_vm_creation_time->get_value();
}
static jlong vm_init_done_time() {
return _vm_init_done_time->get_value();
}
static Klass* java_lang_management_ThreadInfo_klass(TRAPS);
static Klass* java_lang_management_MemoryUsage_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* java_lang_management_MemoryPoolMXBean_klass(TRAPS);
static Klass* java_lang_management_MemoryManagerMXBean_klass(TRAPS);
static Klass* java_lang_management_GarbageCollectorMXBean_klass(TRAPS);
static Klass* sun_management_Sensor_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_ManagementFactory_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_GarbageCollectorImpl_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* com_sun_management_GcInfo_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_DiagnosticCommandImpl_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_ManagementFactoryHelper_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS);
static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);
};
class TraceVmCreationTime : public StackObj {
private:
TimeStamp _timer;
jlong _begin_time;
public:
TraceVmCreationTime() {}
~TraceVmCreationTime() {}
void start()
{ _timer.update_to(0); _begin_time = os::javaTimeMillis(); }
void end()
{ Management::record_vm_startup_time(_begin_time, _timer.milliseconds()); }
};
#endif // SHARE_VM_SERVICES_MANAGEMENT_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memBaseline.cpp
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"
int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
if (s1.size() == s2.size()) {
return 0;
} else if (s1.size() > s2.size()) {
return -1;
} else {
return 1;
}
}
int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
if (s1.reserved() == s2.reserved()) {
return 0;
} else if (s1.reserved() > s2.reserved()) {
return -1;
} else {
return 1;
}
}
int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
int compare_malloc_site_and_type(const MallocSite& s1, const MallocSite& s2) {
int res = compare_malloc_site(s1, s2);
if (res == 0) {
res = (int)(s1.flag() - s2.flag());
}
return res;
}
int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
class MallocAllocationSiteWalker : public MallocSiteWalker {
private:
SortedLinkedList<MallocSite, compare_malloc_size> _malloc_sites;
size_t _count;
public:
MallocAllocationSiteWalker() : _count(0) { }
inline size_t count() const { return _count; }
LinkedList<MallocSite>* malloc_sites() {
return &_malloc_sites;
}
bool do_malloc_site(const MallocSite* site) {
if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
if (_malloc_sites.add(*site) != NULL) {
_count++;
return true;
} else {
return false; // OOM
}
} else {
return true;
}
}
};
int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
return r1.compare(r2);
}
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base>
_virtual_memory_regions;
size_t _count;
public:
VirtualMemoryAllocationWalker() : _count(0) { }
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
if (_virtual_memory_regions.add(*rgn) != NULL) {
_count ++;
return true;
} else {
return false;
}
}
return true;
}
LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
return &_virtual_memory_regions;
}
};
bool MemBaseline::baseline_summary() {
MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
return true;
}
bool MemBaseline::baseline_allocation_sites() {
MallocAllocationSiteWalker malloc_walker;
if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
return false;
}
_malloc_sites.move(malloc_walker.malloc_sites());
_malloc_sites_order = by_size;
VirtualMemoryAllocationWalker virtual_memory_walker;
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}
_virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations());
if (!aggregate_virtual_memory_allocation_sites()) {
return false;
}
_virtual_memory_sites_order = by_address;
return true;
}
bool MemBaseline::baseline(bool summaryOnly) {
reset();
_class_count = InstanceKlass::number_of_instance_classes();
if (!baseline_summary()) {
return false;
}
_baseline_type = Summary_baselined;
if (!summaryOnly &&
MemTracker::tracking_level() == NMT_detail) {
baseline_allocation_sites();
_baseline_type = Detail_baselined;
}
return true;
}
int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
const VirtualMemoryAllocationSite& s2) {
return s1.call_stack()->compare(*s2.call_stack());
}
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
VirtualMemoryAllocationSite* site;
while ((rgn = itr.next()) != NULL) {
VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->flag());
site = allocation_sites.find(tmp);
if (site == NULL) {
LinkedListNode<VirtualMemoryAllocationSite>* node =
allocation_sites.add(tmp);
if (node == NULL) return false;
site = node->data();
}
site->reserve_memory(rgn->size());
site->commit_memory(rgn->committed_size());
}
_virtual_memory_sites.move(&allocation_sites);
return true;
}
MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
assert(!_malloc_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
malloc_sites_to_size_order();
break;
case by_site:
malloc_sites_to_allocation_site_order();
break;
case by_site_and_type:
malloc_sites_to_allocation_site_and_type_order();
break;
case by_address:
default:
ShouldNotReachHere();
}
return MallocSiteIterator(_malloc_sites.head());
}
VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
assert(!_virtual_memory_sites.is_empty(), "Not detail baseline");
switch(order) {
case by_size:
virtual_memory_sites_to_size_order();
break;
case by_site:
virtual_memory_sites_to_reservation_site_order();
break;
case by_address:
default:
ShouldNotReachHere();
}
return VirtualMemorySiteIterator(_virtual_memory_sites.head());
}
void MemBaseline::malloc_sites_to_size_order() {
if (_malloc_sites_order != by_size) {
SortedLinkedList<MallocSite, compare_malloc_size> tmp;
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(NULL);
_malloc_sites_order = by_size;
}
}
void MemBaseline::malloc_sites_to_allocation_site_order() {
if (_malloc_sites_order != by_site && _malloc_sites_order != by_site_and_type) {
SortedLinkedList<MallocSite, compare_malloc_site> tmp;
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(NULL);
_malloc_sites_order = by_site;
}
}
void MemBaseline::malloc_sites_to_allocation_site_and_type_order() {
if (_malloc_sites_order != by_site_and_type) {
SortedLinkedList<MallocSite, compare_malloc_site_and_type> tmp;
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(NULL);
_malloc_sites_order = by_site_and_type;
}
}
void MemBaseline::virtual_memory_sites_to_size_order() {
if (_virtual_memory_sites_order != by_size) {
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size> tmp;
tmp.move(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
_virtual_memory_sites_order = by_size;
}
}
void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
if (_virtual_memory_sites_order != by_size) {
SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site> tmp;
tmp.move(&_virtual_memory_sites);
_virtual_memory_sites.set_head(tmp.head());
tmp.set_head(NULL);
_virtual_memory_sites_order = by_size;
}
}
C:\hotspot-69087d08d473\src\share\vm/services/memBaseline.hpp
#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/nmtCommon.hpp"
#include "services/virtualMemoryTracker.hpp"
#include "utilities/linkedlist.hpp"
typedef LinkedListIterator<MallocSite> MallocSiteIterator;
typedef LinkedListIterator<VirtualMemoryAllocationSite> VirtualMemorySiteIterator;
typedef LinkedListIterator<ReservedMemoryRegion> VirtualMemoryAllocationIterator;
class MemBaseline VALUE_OBJ_CLASS_SPEC {
public:
enum BaselineThreshold {
SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
};
enum BaselineType {
Not_baselined,
Summary_baselined,
Detail_baselined
};
enum SortingOrder {
by_address, // by memory address
by_size, // by memory size
by_site, // by call site where the memory is allocated from
by_site_and_type // by call site and memory type
};
private:
MallocMemorySnapshot _malloc_memory_snapshot;
VirtualMemorySnapshot _virtual_memory_snapshot;
size_t _class_count;
LinkedListImpl<MallocSite> _malloc_sites;
LinkedListImpl<ReservedMemoryRegion> _virtual_memory_allocations;
LinkedListImpl<VirtualMemoryAllocationSite> _virtual_memory_sites;
SortingOrder _malloc_sites_order;
SortingOrder _virtual_memory_sites_order;
BaselineType _baseline_type;
public:
MemBaseline():
_baseline_type(Not_baselined),
_class_count(0) {
}
bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; }
MallocMemorySnapshot* malloc_memory_snapshot() {
return &_malloc_memory_snapshot;
}
VirtualMemorySnapshot* virtual_memory_snapshot() {
return &_virtual_memory_snapshot;
}
MallocSiteIterator malloc_sites(SortingOrder order);
VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
VirtualMemoryAllocationIterator virtual_memory_allocations() {
assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
}
size_t total_reserved_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
size_t amount = _malloc_memory_snapshot.total() +
_virtual_memory_snapshot.total_reserved();
return amount;
}
size_t total_committed_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
size_t amount = _malloc_memory_snapshot.total() +
_virtual_memory_snapshot.total_committed();
return amount;
}
size_t total_arena_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.total_arena();
}
size_t malloc_tracking_overhead() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
MemBaseline* bl = const_cast<MemBaseline*>(this);
return bl->_malloc_memory_snapshot.malloc_overhead()->size();
}
MallocMemory* malloc_memory(MEMFLAGS flag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.by_type(flag);
}
VirtualMemory* virtual_memory(MEMFLAGS flag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _virtual_memory_snapshot.by_type(flag);
}
size_t class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _class_count;
}
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.thread_count();
}
void reset() {
_baseline_type = Not_baselined;
_class_count = 0;
_malloc_sites.clear();
_virtual_memory_sites.clear();
_virtual_memory_allocations.clear();
}
private:
bool baseline_summary();
bool baseline_allocation_sites();
bool aggregate_virtual_memory_allocation_sites();
void malloc_sites_to_size_order();
void malloc_sites_to_allocation_site_order();
void malloc_sites_to_allocation_site_and_type_order();
void virtual_memory_sites_to_size_order();
void virtual_memory_sites_to_reservation_site_order();
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memoryManager.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "services/lowMemoryDetector.hpp"
#include "services/management.hpp"
#include "services/memoryManager.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryService.hpp"
#include "services/gcNotifier.hpp"
#include "utilities/dtrace.hpp"
#ifndef USDT2
HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__begin, char*, int, char*, int,
size_t, size_t, size_t, size_t);
HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__end, char*, int, char*, int,
size_t, size_t, size_t, size_t);
#endif /* !USDT2 */
MemoryManager::MemoryManager() {
_num_pools = 0;
(void)const_cast<instanceOop&>(_memory_mgr_obj = instanceOop(NULL));
}
int MemoryManager::add_pool(MemoryPool* pool) {
int index = _num_pools;
assert(index < MemoryManager::max_num_pools, "_num_pools exceeds the max");
if (index < MemoryManager::max_num_pools) {
_pools[index] = pool;
_num_pools++;
}
pool->add_manager(this);
return index;
}
MemoryManager* MemoryManager::get_code_cache_memory_manager() {
return (MemoryManager*) new CodeCacheMemoryManager();
}
MemoryManager* MemoryManager::get_metaspace_memory_manager() {
return (MemoryManager*) new MetaspaceMemoryManager();
}
GCMemoryManager* MemoryManager::get_copy_memory_manager() {
return (GCMemoryManager*) new CopyMemoryManager();
}
GCMemoryManager* MemoryManager::get_msc_memory_manager() {
return (GCMemoryManager*) new MSCMemoryManager();
}
GCMemoryManager* MemoryManager::get_parnew_memory_manager() {
return (GCMemoryManager*) new ParNewMemoryManager();
}
GCMemoryManager* MemoryManager::get_cms_memory_manager() {
return (GCMemoryManager*) new CMSMemoryManager();
}
GCMemoryManager* MemoryManager::get_psScavenge_memory_manager() {
return (GCMemoryManager*) new PSScavengeMemoryManager();
}
GCMemoryManager* MemoryManager::get_psMarkSweep_memory_manager() {
return (GCMemoryManager*) new PSMarkSweepMemoryManager();
}
GCMemoryManager* MemoryManager::get_g1YoungGen_memory_manager() {
return (GCMemoryManager*) new G1YoungGenMemoryManager();
}
GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
return (GCMemoryManager*) new G1OldGenMemoryManager();
}
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
instanceOop mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
if (mgr_obj == NULL) {
Klass* k = Management::sun_management_ManagementFactory_klass(CHECK_0);
instanceKlassHandle ik(THREAD, k);
Handle mgr_name = java_lang_String::create_from_str(name(), CHECK_0);
JavaValue result(T_OBJECT);
JavaCallArguments args;
args.push_oop(mgr_name); // Argument 1
Symbol* method_name = NULL;
Symbol* signature = NULL;
if (is_gc_memory_manager()) {
method_name = vmSymbols::createGarbageCollector_name();
signature = vmSymbols::createGarbageCollector_signature();
args.push_oop(Handle()); // Argument 2 (for future extension)
} else {
method_name = vmSymbols::createMemoryManager_name();
signature = vmSymbols::createMemoryManager_signature();
}
JavaCalls::call_static(&result,
ik,
method_name,
signature,
&args,
CHECK_0);
instanceOop m = (instanceOop) result.get_jobject();
instanceHandle mgr(THREAD, m);
{
MutexLocker ml(Management_lock);
mgr_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_mgr_obj);
if (mgr_obj != NULL) {
return mgr_obj;
}
mgr_obj = mgr();
OrderAccess::release_store_ptr(&_memory_mgr_obj, mgr_obj);
}
}
return mgr_obj;
}
void MemoryManager::oops_do(OopClosure* f) {
f->do_oop((oop*) &_memory_mgr_obj);
}
GCStatInfo::GCStatInfo(int num_pools) {
_before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
_after_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
_usage_array_size = num_pools;
clear();
}
GCStatInfo::~GCStatInfo() {
FREE_C_HEAP_ARRAY(MemoryUsage*, _before_gc_usage_array, mtInternal);
FREE_C_HEAP_ARRAY(MemoryUsage*, _after_gc_usage_array, mtInternal);
}
void GCStatInfo::set_gc_usage(int pool_index, MemoryUsage usage, bool before_gc) {
MemoryUsage* gc_usage_array;
if (before_gc) {
gc_usage_array = _before_gc_usage_array;
} else {
gc_usage_array = _after_gc_usage_array;
}
gc_usage_array[pool_index] = usage;
}
void GCStatInfo::clear() {
_index = 0;
_start_time = 0L;
_end_time = 0L;
size_t len = _usage_array_size * sizeof(MemoryUsage);
memset(_before_gc_usage_array, 0, len);
memset(_after_gc_usage_array, 0, len);
}
GCMemoryManager::GCMemoryManager() : MemoryManager() {
_num_collections = 0;
_last_gc_stat = NULL;
_last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true);
_current_gc_stat = NULL;
_num_gc_threads = 1;
_notification_enabled = false;
}
GCMemoryManager::~GCMemoryManager() {
delete _last_gc_stat;
delete _last_gc_lock;
delete _current_gc_stat;
}
void GCMemoryManager::add_pool(MemoryPool* pool) {
add_pool(pool, true);
}
void GCMemoryManager::add_pool(MemoryPool* pool, bool always_affected_by_gc) {
int index = MemoryManager::add_pool(pool);
_pool_always_affected_by_gc[index] = always_affected_by_gc;
}
void GCMemoryManager::initialize_gc_stat_info() {
assert(MemoryService::num_memory_pools() > 0, "should have one or more memory pools");
_last_gc_stat = new(ResourceObj::C_HEAP, mtGC) GCStatInfo(MemoryService::num_memory_pools());
_current_gc_stat = new(ResourceObj::C_HEAP, mtGC) GCStatInfo(MemoryService::num_memory_pools());
}
void GCMemoryManager::gc_begin(bool recordGCBeginTime, bool recordPreGCUsage,
bool recordAccumulatedGCTime) {
assert(_last_gc_stat != NULL && _current_gc_stat != NULL, "Just checking");
if (recordAccumulatedGCTime) {
_accumulated_timer.start();
}
if (recordGCBeginTime) {
_current_gc_stat->set_index(_num_collections+1);
_current_gc_stat->set_start_time(Management::timestamp());
}
if (recordPreGCUsage) {
for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
MemoryUsage usage = pool->get_memory_usage();
_current_gc_stat->set_before_gc_usage(i, usage);
#ifndef USDT2
HS_DTRACE_PROBE8(hotspot, mem__pool__gc__begin,
name(), strlen(name()),
pool->name(), strlen(pool->name()),
usage.init_size(), usage.used(),
usage.committed(), usage.max_size());
#else /* USDT2 */
HOTSPOT_MEM_POOL_GC_BEGIN(
(char *) name(), strlen(name()),
(char *) pool->name(), strlen(pool->name()),
usage.init_size(), usage.used(),
usage.committed(), usage.max_size());
#endif /* USDT2 */
}
}
}
void GCMemoryManager::gc_end(bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause,
bool allMemoryPoolsAffected) {
if (recordAccumulatedGCTime) {
_accumulated_timer.stop();
}
if (recordGCEndTime) {
_current_gc_stat->set_end_time(Management::timestamp());
}
if (recordPostGCUsage) {
int i;
for (i = 0; i < MemoryService::num_memory_pools(); i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
MemoryUsage usage = pool->get_memory_usage();
#ifndef USDT2
HS_DTRACE_PROBE8(hotspot, mem__pool__gc__end,
name(), strlen(name()),
pool->name(), strlen(pool->name()),
usage.init_size(), usage.used(),
usage.committed(), usage.max_size());
#else /* USDT2 */
HOTSPOT_MEM_POOL_GC_END(
(char *) name(), strlen(name()),
(char *) pool->name(), strlen(pool->name()),
usage.init_size(), usage.used(),
usage.committed(), usage.max_size());
#endif /* USDT2 */
_current_gc_stat->set_after_gc_usage(i, usage);
}
for (i = 0; i < num_memory_pools(); i++) {
MemoryPool* pool = get_memory_pool(i);
MemoryUsage usage = pool->get_memory_usage();
if (allMemoryPoolsAffected || pool_always_affected_by_gc(i)) {
pool->set_last_collection_usage(usage);
LowMemoryDetector::detect_after_gc_memory(pool);
}
}
}
if (countCollection) {
_num_collections++;
{
MutexLockerEx ml(_last_gc_lock, Mutex::_no_safepoint_check_flag);
GCStatInfo *tmp = _last_gc_stat;
_last_gc_stat = _current_gc_stat;
_current_gc_stat = tmp;
_current_gc_stat->clear();
}
if (is_notification_enabled()) {
bool isMajorGC = this == MemoryService::get_major_gc_manager();
GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
GCCause::to_string(cause));
}
}
}
size_t GCMemoryManager::get_last_gc_stat(GCStatInfo* dest) {
MutexLockerEx ml(_last_gc_lock, Mutex::_no_safepoint_check_flag);
if (_last_gc_stat->gc_index() != 0) {
dest->set_index(_last_gc_stat->gc_index());
dest->set_start_time(_last_gc_stat->start_time());
dest->set_end_time(_last_gc_stat->end_time());
assert(dest->usage_array_size() == _last_gc_stat->usage_array_size(),
"Must have same array size");
size_t len = dest->usage_array_size() * sizeof(MemoryUsage);
memcpy(dest->before_gc_usage_array(), _last_gc_stat->before_gc_usage_array(), len);
memcpy(dest->after_gc_usage_array(), _last_gc_stat->after_gc_usage_array(), len);
}
return _last_gc_stat->gc_index();
}
C:\hotspot-69087d08d473\src\share\vm/services/memoryManager.hpp
#ifndef SHARE_VM_SERVICES_MEMORYMANAGER_HPP
#define SHARE_VM_SERVICES_MEMORYMANAGER_HPP
#include "memory/allocation.hpp"
#include "runtime/timer.hpp"
#include "services/memoryUsage.hpp"
class MemoryPool;
class GCMemoryManager;
class OopClosure;
class MemoryManager : public CHeapObj<mtInternal> {
protected:
enum {
max_num_pools = 10
};
private:
MemoryPool* _pools[max_num_pools];
int _num_pools;
protected:
volatile instanceOop _memory_mgr_obj;
public:
enum Name {
Abstract,
CodeCache,
Metaspace,
Copy,
MarkSweepCompact,
ParNew,
ConcurrentMarkSweep,
PSScavenge,
PSMarkSweep,
G1YoungGen,
G1OldGen
};
MemoryManager();
int num_memory_pools() const { return _num_pools; }
MemoryPool* get_memory_pool(int index) {
assert(index >= 0 && index < _num_pools, "Invalid index");
return _pools[index];
}
int add_pool(MemoryPool* pool);
bool is_manager(instanceHandle mh) { return mh() == _memory_mgr_obj; }
virtual instanceOop get_memory_manager_instance(TRAPS);
virtual MemoryManager::Name kind() { return MemoryManager::Abstract; }
virtual bool is_gc_memory_manager() { return false; }
virtual const char* name() = 0;
void oops_do(OopClosure* f);
static MemoryManager* get_code_cache_memory_manager();
static MemoryManager* get_metaspace_memory_manager();
static GCMemoryManager* get_copy_memory_manager();
static GCMemoryManager* get_msc_memory_manager();
static GCMemoryManager* get_parnew_memory_manager();
static GCMemoryManager* get_cms_memory_manager();
static GCMemoryManager* get_psScavenge_memory_manager();
static GCMemoryManager* get_psMarkSweep_memory_manager();
static GCMemoryManager* get_g1YoungGen_memory_manager();
static GCMemoryManager* get_g1OldGen_memory_manager();
};
class CodeCacheMemoryManager : public MemoryManager {
private:
public:
CodeCacheMemoryManager() : MemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::CodeCache; }
const char* name() { return "CodeCacheManager"; }
};
class MetaspaceMemoryManager : public MemoryManager {
public:
MetaspaceMemoryManager() : MemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::Metaspace; }
const char *name() { return "Metaspace Manager"; }
};
class GCStatInfo : public ResourceObj {
private:
size_t _index;
jlong _start_time;
jlong _end_time;
MemoryUsage* _before_gc_usage_array;
MemoryUsage* _after_gc_usage_array;
int _usage_array_size;
void set_gc_usage(int pool_index, MemoryUsage, bool before_gc);
public:
GCStatInfo(int num_pools);
~GCStatInfo();
size_t gc_index() { return _index; }
jlong start_time() { return _start_time; }
jlong end_time() { return _end_time; }
int usage_array_size() { return _usage_array_size; }
MemoryUsage before_gc_usage_for_pool(int pool_index) {
assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
return _before_gc_usage_array[pool_index];
}
MemoryUsage after_gc_usage_for_pool(int pool_index) {
assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
return _after_gc_usage_array[pool_index];
}
MemoryUsage* before_gc_usage_array() { return _before_gc_usage_array; }
MemoryUsage* after_gc_usage_array() { return _after_gc_usage_array; }
void set_index(size_t index) { _index = index; }
void set_start_time(jlong time) { _start_time = time; }
void set_end_time(jlong time) { _end_time = time; }
void set_before_gc_usage(int pool_index, MemoryUsage usage) {
assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
set_gc_usage(pool_index, usage, true /* before gc */);
}
void set_after_gc_usage(int pool_index, MemoryUsage usage) {
assert(pool_index >= 0 && pool_index < _usage_array_size, "Range checking");
set_gc_usage(pool_index, usage, false /* after gc */);
}
void clear();
};
class GCMemoryManager : public MemoryManager {
private:
size_t _num_collections;
elapsedTimer _accumulated_timer;
elapsedTimer _gc_timer; // for measuring every GC duration
GCStatInfo* _last_gc_stat;
Mutex* _last_gc_lock;
GCStatInfo* _current_gc_stat;
int _num_gc_threads;
volatile bool _notification_enabled;
bool _pool_always_affected_by_gc[MemoryManager::max_num_pools];
public:
GCMemoryManager();
~GCMemoryManager();
void add_pool(MemoryPool* pool);
void add_pool(MemoryPool* pool, bool always_affected_by_gc);
bool pool_always_affected_by_gc(int index) {
assert(index >= 0 && index < num_memory_pools(), "Invalid index");
return _pool_always_affected_by_gc[index];
}
void initialize_gc_stat_info();
bool is_gc_memory_manager() { return true; }
jlong gc_time_ms() { return _accumulated_timer.milliseconds(); }
size_t gc_count() { return _num_collections; }
int num_gc_threads() { return _num_gc_threads; }
void set_num_gc_threads(int count) { _num_gc_threads = count; }
void gc_begin(bool recordGCBeginTime, bool recordPreGCUsage,
bool recordAccumulatedGCTime);
void gc_end(bool recordPostGCUsage, bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection, GCCause::Cause cause,
bool allMemoryPoolsAffected);
void reset_gc_stat() { _num_collections = 0; _accumulated_timer.reset(); }
size_t get_last_gc_stat(GCStatInfo* dest);
void set_notification_enabled(bool enabled) { _notification_enabled = enabled; }
bool is_notification_enabled() { return _notification_enabled; }
virtual MemoryManager::Name kind() = 0;
};
class CopyMemoryManager : public GCMemoryManager {
private:
public:
CopyMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::Copy; }
const char* name() { return "Copy"; }
};
class MSCMemoryManager : public GCMemoryManager {
private:
public:
MSCMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::MarkSweepCompact; }
const char* name() { return "MarkSweepCompact"; }
};
class ParNewMemoryManager : public GCMemoryManager {
private:
public:
ParNewMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::ParNew; }
const char* name() { return "ParNew"; }
};
class CMSMemoryManager : public GCMemoryManager {
private:
public:
CMSMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::ConcurrentMarkSweep; }
const char* name() { return "ConcurrentMarkSweep";}
};
class PSScavengeMemoryManager : public GCMemoryManager {
private:
public:
PSScavengeMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::PSScavenge; }
const char* name() { return "PS Scavenge"; }
};
class PSMarkSweepMemoryManager : public GCMemoryManager {
private:
public:
PSMarkSweepMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
const char* name() { return "PS MarkSweep"; }
};
class G1YoungGenMemoryManager : public GCMemoryManager {
private:
public:
G1YoungGenMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::G1YoungGen; }
const char* name() { return "G1 Young Generation"; }
};
class G1OldGenMemoryManager : public GCMemoryManager {
private:
public:
G1OldGenMemoryManager() : GCMemoryManager() {}
MemoryManager::Name kind() { return MemoryManager::G1OldGen; }
const char* name() { return "G1 Old Generation"; }
};
#endif // SHARE_VM_SERVICES_MEMORYMANAGER_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memoryPool.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/metaspace.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "services/lowMemoryDetector.hpp"
#include "services/management.hpp"
#include "services/memoryManager.hpp"
#include "services/memoryPool.hpp"
#include "utilities/macros.hpp"
#include "utilities/globalDefinitions.hpp"
MemoryPool::MemoryPool(const char* name,
PoolType type,
size_t init_size,
size_t max_size,
bool support_usage_threshold,
bool support_gc_threshold) {
_name = name;
_initial_size = init_size;
_max_size = max_size;
(void)const_cast<instanceOop&>(_memory_pool_obj = instanceOop(NULL));
_available_for_allocation = true;
_num_managers = 0;
_type = type;
_after_gc_usage = MemoryUsage(_initial_size, 0, 0, _max_size);
_usage_sensor = NULL;
_gc_usage_sensor = NULL;
_usage_threshold = new ThresholdSupport(support_usage_threshold, support_usage_threshold);
_gc_usage_threshold = new ThresholdSupport(support_gc_threshold, support_gc_threshold);
}
void MemoryPool::add_manager(MemoryManager* mgr) {
assert(_num_managers < MemoryPool::max_num_managers, "_num_managers exceeds the max");
if (_num_managers < MemoryPool::max_num_managers) {
_managers[_num_managers] = mgr;
_num_managers++;
}
}
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
instanceOop pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
if (pool_obj == NULL) {
Klass* k = Management::sun_management_ManagementFactory_klass(CHECK_NULL);
instanceKlassHandle ik(THREAD, k);
Handle pool_name = java_lang_String::create_from_str(_name, CHECK_NULL);
jlong usage_threshold_value = (_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
jlong gc_usage_threshold_value = (_gc_usage_threshold->is_high_threshold_supported() ? 0 : -1L);
JavaValue result(T_OBJECT);
JavaCallArguments args;
args.push_oop(pool_name); // Argument 1
args.push_int((int) is_heap()); // Argument 2
Symbol* method_name = vmSymbols::createMemoryPool_name();
Symbol* signature = vmSymbols::createMemoryPool_signature();
args.push_long(usage_threshold_value); // Argument 3
args.push_long(gc_usage_threshold_value); // Argument 4
JavaCalls::call_static(&result,
ik,
method_name,
signature,
&args,
CHECK_NULL);
instanceOop p = (instanceOop) result.get_jobject();
instanceHandle pool(THREAD, p);
{
MutexLocker ml(Management_lock);
pool_obj = (instanceOop)OrderAccess::load_ptr_acquire(&_memory_pool_obj);
if (pool_obj != NULL) {
return pool_obj;
}
pool_obj = pool();
OrderAccess::release_store_ptr(&_memory_pool_obj, pool_obj);
}
}
return pool_obj;
}
inline static size_t get_max_value(size_t val1, size_t val2) {
return (val1 > val2 ? val1 : val2);
}
void MemoryPool::record_peak_memory_usage() {
MemoryUsage usage = get_memory_usage();
size_t peak_used = get_max_value(usage.used(), _peak_usage.used());
size_t peak_committed = get_max_value(usage.committed(), _peak_usage.committed());
size_t peak_max_size = get_max_value(usage.max_size(), _peak_usage.max_size());
_peak_usage = MemoryUsage(initial_size(), peak_used, peak_committed, peak_max_size);
}
static void set_sensor_obj_at(SensorInfo** sensor_ptr, instanceHandle sh) {
assert(*sensor_ptr == NULL, "Should be called only once");
SensorInfo* sensor = new SensorInfo();
sensor->set_sensor(sh());
}
void MemoryPool::set_usage_sensor_obj(instanceHandle sh) {
set_sensor_obj_at(&_usage_sensor, sh);
}
void MemoryPool::set_gc_usage_sensor_obj(instanceHandle sh) {
set_sensor_obj_at(&_gc_usage_sensor, sh);
}
void MemoryPool::oops_do(OopClosure* f) {
f->do_oop((oop*) &_memory_pool_obj);
if (_usage_sensor != NULL) {
_usage_sensor->oops_do(f);
}
if (_gc_usage_sensor != NULL) {
_gc_usage_sensor->oops_do(f);
}
}
ContiguousSpacePool::ContiguousSpacePool(ContiguousSpace* space,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, space->capacity(), max_size,
support_usage_threshold), _space(space) {
}
MemoryUsage ContiguousSpacePool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = _space->capacity();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->from()->capacity(), max_size,
support_usage_threshold), _gen(gen) {
}
MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = committed_in_bytes();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
#if INCLUDE_ALL_GCS
CompactibleFreeListSpacePool::CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, space->capacity(), max_size,
support_usage_threshold), _space(space) {
}
MemoryUsage CompactibleFreeListSpacePool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = _space->capacity();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
#endif // INCLUDE_ALL_GCS
GenerationPool::GenerationPool(Generation* gen,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->capacity(), gen->max_capacity(),
support_usage_threshold), _gen(gen) {
}
MemoryUsage GenerationPool::get_memory_usage() {
size_t used = used_in_bytes();
size_t committed = _gen->capacity();
size_t maxSize = (available_for_allocation() ? max_size() : 0);
return MemoryUsage(initial_size(), used, committed, maxSize);
}
CodeHeapPool::CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold) :
MemoryPool(name, NonHeap, codeHeap->capacity(), codeHeap->max_capacity(),
support_usage_threshold, false), _codeHeap(codeHeap) {
}
MemoryUsage CodeHeapPool::get_memory_usage() {
size_t used = used_in_bytes();
size_t committed = _codeHeap->capacity();
size_t maxSize = (available_for_allocation() ? max_size() : 0);
return MemoryUsage(initial_size(), used, committed, maxSize);
}
MetaspacePool::MetaspacePool() :
MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
MemoryUsage MetaspacePool::get_memory_usage() {
size_t committed = MetaspaceAux::committed_bytes();
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
size_t MetaspacePool::used_in_bytes() {
return MetaspaceAux::used_bytes();
}
size_t MetaspacePool::calculate_max_size() const {
return !FLAG_IS_DEFAULT(MaxMetaspaceSize) ? MaxMetaspaceSize :
MemoryUsage::undefined_size();
}
CompressedKlassSpacePool::CompressedKlassSpacePool() :
MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
size_t CompressedKlassSpacePool::used_in_bytes() {
return MetaspaceAux::used_bytes(Metaspace::ClassType);
}
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
C:\hotspot-69087d08d473\src\share\vm/services/memoryPool.hpp
#ifndef SHARE_VM_SERVICES_MEMORYPOOL_HPP
#define SHARE_VM_SERVICES_MEMORYPOOL_HPP
#include "gc_implementation/shared/mutableSpace.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/heap.hpp"
#include "memory/space.hpp"
#include "services/memoryUsage.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#endif // INCLUDE_ALL_GCS
class MemoryManager;
class SensorInfo;
class Generation;
class DefNewGeneration;
class ThresholdSupport;
class MemoryPool : public CHeapObj<mtInternal> {
friend class MemoryManager;
public:
enum PoolType {
Heap = 1,
NonHeap = 2
};
private:
enum {
max_num_managers = 5
};
const char* _name;
PoolType _type;
size_t _initial_size;
size_t _max_size;
bool _available_for_allocation; // Default is true
MemoryManager* _managers[max_num_managers];
int _num_managers;
MemoryUsage _peak_usage; // Peak memory usage
MemoryUsage _after_gc_usage; // After GC memory usage
ThresholdSupport* _usage_threshold;
ThresholdSupport* _gc_usage_threshold;
SensorInfo* _usage_sensor;
SensorInfo* _gc_usage_sensor;
volatile instanceOop _memory_pool_obj;
void add_manager(MemoryManager* mgr);
public:
MemoryPool(const char* name,
PoolType type,
size_t init_size,
size_t max_size,
bool support_usage_threshold,
bool support_gc_threshold);
const char* name() { return _name; }
bool is_heap() { return _type == Heap; }
bool is_non_heap() { return _type == NonHeap; }
size_t initial_size() const { return _initial_size; }
int num_memory_managers() const { return _num_managers; }
virtual size_t max_size() const { return _max_size; }
bool is_pool(instanceHandle pool) { return (pool() == _memory_pool_obj); }
bool available_for_allocation() { return _available_for_allocation; }
bool set_available_for_allocation(bool value) {
bool prev = _available_for_allocation;
_available_for_allocation = value;
return prev;
}
MemoryManager* get_memory_manager(int index) {
assert(index >= 0 && index < _num_managers, "Invalid index");
return _managers[index];
}
void record_peak_memory_usage();
MemoryUsage get_peak_memory_usage() {
record_peak_memory_usage();
return _peak_usage;
}
void reset_peak_memory_usage() {
_peak_usage = get_memory_usage();
}
ThresholdSupport* usage_threshold() { return _usage_threshold; }
ThresholdSupport* gc_usage_threshold() { return _gc_usage_threshold; }
SensorInfo* usage_sensor() { return _usage_sensor; }
SensorInfo* gc_usage_sensor() { return _gc_usage_sensor; }
void set_usage_sensor_obj(instanceHandle s);
void set_gc_usage_sensor_obj(instanceHandle s);
void set_last_collection_usage(MemoryUsage u) { _after_gc_usage = u; }
virtual instanceOop get_memory_pool_instance(TRAPS);
virtual MemoryUsage get_memory_usage() = 0;
virtual size_t used_in_bytes() = 0;
virtual bool is_collected_pool() { return false; }
virtual MemoryUsage get_last_collection_usage() { return _after_gc_usage; }
void oops_do(OopClosure* f);
};
class CollectedMemoryPool : public MemoryPool {
public:
CollectedMemoryPool(const char* name, PoolType type, size_t init_size, size_t max_size, bool support_usage_threshold) :
MemoryPool(name, type, init_size, max_size, support_usage_threshold, true) {};
bool is_collected_pool() { return true; }
};
class ContiguousSpacePool : public CollectedMemoryPool {
private:
ContiguousSpace* _space;
public:
ContiguousSpacePool(ContiguousSpace* space, const char* name, PoolType type, size_t max_size, bool support_usage_threshold);
ContiguousSpace* space() { return _space; }
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return space()->used(); }
};
class SurvivorContiguousSpacePool : public CollectedMemoryPool {
private:
DefNewGeneration* _gen;
public:
SurvivorContiguousSpacePool(DefNewGeneration* gen,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() {
return _gen->from()->used();
}
size_t committed_in_bytes() {
return _gen->from()->capacity();
}
};
#if INCLUDE_ALL_GCS
class CompactibleFreeListSpacePool : public CollectedMemoryPool {
private:
CompactibleFreeListSpace* _space;
public:
CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
const char* name,
PoolType type,
size_t max_size,
bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return _space->used_stable(); }
};
#endif // INCLUDE_ALL_GCS
class GenerationPool : public CollectedMemoryPool {
private:
Generation* _gen;
public:
GenerationPool(Generation* gen, const char* name, PoolType type, bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return _gen->used(); }
};
class CodeHeapPool: public MemoryPool {
private:
CodeHeap* _codeHeap;
public:
CodeHeapPool(CodeHeap* codeHeap, const char* name, bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return _codeHeap->allocated_capacity(); }
};
class MetaspacePool : public MemoryPool {
size_t calculate_max_size() const;
public:
MetaspacePool();
MemoryUsage get_memory_usage();
size_t used_in_bytes();
};
class CompressedKlassSpacePool : public MemoryPool {
public:
CompressedKlassSpacePool();
MemoryUsage get_memory_usage();
size_t used_in_bytes();
};
#endif // SHARE_VM_SERVICES_MEMORYPOOL_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memoryService.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/generation.hpp"
#include "memory/generationSpec.hpp"
#include "memory/heap.hpp"
#include "memory/memRegion.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/javaCalls.hpp"
#include "services/classLoadingService.hpp"
#include "services/lowMemoryDetector.hpp"
#include "services/management.hpp"
#include "services/memoryManager.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryService.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "services/g1MemoryPool.hpp"
#include "services/psMemoryPool.hpp"
#endif // INCLUDE_ALL_GCS
GrowableArray<MemoryPool*>* MemoryService::_pools_list =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_pools_list_size, true);
GrowableArray<MemoryManager*>* MemoryService::_managers_list =
new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true);
GCMemoryManager* MemoryService::_minor_gc_manager = NULL;
GCMemoryManager* MemoryService::_major_gc_manager = NULL;
MemoryPool* MemoryService::_code_heap_pool = NULL;
MemoryPool* MemoryService::_metaspace_pool = NULL;
MemoryPool* MemoryService::_compressed_class_pool = NULL;
class GcThreadCountClosure: public ThreadClosure {
private:
int _count;
public:
GcThreadCountClosure() : _count(0) {};
void do_thread(Thread* thread);
int count() { return _count; }
};
void GcThreadCountClosure::do_thread(Thread* thread) {
_count++;
}
void MemoryService::set_universe_heap(CollectedHeap* heap) {
CollectedHeap::Name kind = heap->kind();
switch (kind) {
case CollectedHeap::GenCollectedHeap : {
add_gen_collected_heap_info(GenCollectedHeap::heap());
break;
}
#if INCLUDE_ALL_GCS
case CollectedHeap::ParallelScavengeHeap : {
add_parallel_scavenge_heap_info(ParallelScavengeHeap::heap());
break;
}
case CollectedHeap::G1CollectedHeap : {
add_g1_heap_info(G1CollectedHeap::heap());
break;
}
#endif // INCLUDE_ALL_GCS
default: {
guarantee(false, "Unrecognized kind of heap");
}
}
GcThreadCountClosure gctcc;
heap->gc_threads_do(&gctcc);
int count = gctcc.count();
if (count > 0) {
_minor_gc_manager->set_num_gc_threads(count);
_major_gc_manager->set_num_gc_threads(count);
}
_minor_gc_manager->initialize_gc_stat_info();
_major_gc_manager->initialize_gc_stat_info();
}
void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
CollectorPolicy* policy = heap->collector_policy();
assert(policy->is_two_generation_policy(), "Only support two generations");
guarantee(heap->n_gens() == 2, "Only support two-generation heap");
TwoGenerationCollectorPolicy* two_gen_policy = policy->as_two_generation_policy();
if (two_gen_policy != NULL) {
GenerationSpec** specs = two_gen_policy->generations();
Generation::Name kind = specs[0]->name();
switch (kind) {
case Generation::DefNew:
_minor_gc_manager = MemoryManager::get_copy_memory_manager();
break;
#if INCLUDE_ALL_GCS
case Generation::ParNew:
case Generation::ASParNew:
_minor_gc_manager = MemoryManager::get_parnew_memory_manager();
break;
#endif // INCLUDE_ALL_GCS
default:
guarantee(false, "Unrecognized generation spec");
break;
}
if (policy->is_mark_sweep_policy()) {
_major_gc_manager = MemoryManager::get_msc_memory_manager();
#if INCLUDE_ALL_GCS
} else if (policy->is_concurrent_mark_sweep_policy()) {
_major_gc_manager = MemoryManager::get_cms_memory_manager();
#endif // INCLUDE_ALL_GCS
} else {
guarantee(false, "Unknown two-gen policy");
}
} else {
guarantee(false, "Non two-gen policy");
}
_managers_list->append(_minor_gc_manager);
_managers_list->append(_major_gc_manager);
add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager);
add_generation_memory_pool(heap->get_gen(major), _major_gc_manager);
}
#if INCLUDE_ALL_GCS
void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap) {
_minor_gc_manager = MemoryManager::get_psScavenge_memory_manager();
_major_gc_manager = MemoryManager::get_psMarkSweep_memory_manager();
_managers_list->append(_minor_gc_manager);
_managers_list->append(_major_gc_manager);
add_psYoung_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager);
add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
}
void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) {
assert(UseG1GC, "sanity");
_minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager();
_major_gc_manager = MemoryManager::get_g1OldGen_memory_manager();
_managers_list->append(_minor_gc_manager);
_managers_list->append(_major_gc_manager);
add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
add_g1OldGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
}
#endif // INCLUDE_ALL_GCS
MemoryPool* MemoryService::add_gen(Generation* gen,
const char* name,
bool is_heap,
bool support_usage_threshold) {
MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
GenerationPool* pool = new GenerationPool(gen, name, type, support_usage_threshold);
_pools_list->append(pool);
return (MemoryPool*) pool;
}
MemoryPool* MemoryService::add_space(ContiguousSpace* space,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold) {
MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
ContiguousSpacePool* pool = new ContiguousSpacePool(space, name, type, max_size, support_usage_threshold);
_pools_list->append(pool);
return (MemoryPool*) pool;
}
MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold) {
MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold);
_pools_list->append(pool);
return (MemoryPool*) pool;
}
#if INCLUDE_ALL_GCS
MemoryPool* MemoryService::add_cms_space(CompactibleFreeListSpace* space,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold) {
MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap);
CompactibleFreeListSpacePool* pool = new CompactibleFreeListSpacePool(space, name, type, max_size, support_usage_threshold);
_pools_list->append(pool);
return (MemoryPool*) pool;
}
#endif // INCLUDE_ALL_GCS
void MemoryService::add_generation_memory_pool(Generation* gen,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr) {
guarantee(gen != NULL, "No generation for memory pool");
Generation::Name kind = gen->kind();
int index = _pools_list->length();
switch (kind) {
case Generation::DefNew: {
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
DefNewGeneration* young_gen = (DefNewGeneration*) gen;
MemoryPool* eden = add_space(young_gen->eden(),
"Eden Space",
true, /* is_heap */
young_gen->max_eden_size(),
false /* support_usage_threshold */);
MemoryPool* survivor = add_survivor_spaces(young_gen,
"Survivor Space",
true, /* is_heap */
young_gen->max_survivor_size(),
false /* support_usage_threshold */);
break;
}
#if INCLUDE_ALL_GCS
case Generation::ParNew:
case Generation::ASParNew:
{
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
ParNewGeneration* parnew_gen = (ParNewGeneration*) gen;
MemoryPool* eden = add_space(parnew_gen->eden(),
"Par Eden Space",
true /* is_heap */,
parnew_gen->max_eden_size(),
false /* support_usage_threshold */);
MemoryPool* survivor = add_survivor_spaces(parnew_gen,
"Par Survivor Space",
true, /* is_heap */
parnew_gen->max_survivor_size(),
false /* support_usage_threshold */);
break;
}
#endif // INCLUDE_ALL_GCS
case Generation::MarkSweepCompact: {
assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
add_gen(gen,
"Tenured Gen",
true, /* is_heap */
true /* support_usage_threshold */);
break;
}
#if INCLUDE_ALL_GCS
case Generation::ConcurrentMarkSweep:
case Generation::ASConcurrentMarkSweep:
{
assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
MemoryPool* pool = add_cms_space(cms->cmsSpace(),
"CMS Old Gen",
true, /* is_heap */
cms->reserved().byte_size(),
true /* support_usage_threshold */);
break;
}
#endif // INCLUDE_ALL_GCS
default:
assert(false, "should not reach here");
break;
}
assert(major_mgr != NULL, "Should have at least one manager");
for (int i = index; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
major_mgr->add_pool(pool);
if (minor_mgr != NULL) {
minor_mgr->add_pool(pool);
}
}
}
#if INCLUDE_ALL_GCS
void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr) {
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen,
gen->eden_space(),
"PS Eden Space",
MemoryPool::Heap,
false /* support_usage_threshold */);
SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen,
"PS Survivor Space",
MemoryPool::Heap,
false /* support_usage_threshold */);
major_mgr->add_pool(eden);
major_mgr->add_pool(survivor);
minor_mgr->add_pool(eden);
minor_mgr->add_pool(survivor);
_pools_list->append(eden);
_pools_list->append(survivor);
}
void MemoryService::add_psOld_memory_pool(PSOldGen* gen, GCMemoryManager* mgr) {
PSGenerationPool* old_gen = new PSGenerationPool(gen,
"PS Old Gen",
MemoryPool::Heap,
true /* support_usage_threshold */);
mgr->add_pool(old_gen);
_pools_list->append(old_gen);
}
void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr) {
assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
G1EdenPool* eden = new G1EdenPool(g1h);
G1SurvivorPool* survivor = new G1SurvivorPool(g1h);
major_mgr->add_pool(eden);
major_mgr->add_pool(survivor);
minor_mgr->add_pool(eden);
minor_mgr->add_pool(survivor);
_pools_list->append(eden);
_pools_list->append(survivor);
}
void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr) {
assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
G1OldGenPool* old_gen = new G1OldGenPool(g1h);
major_mgr->add_pool(old_gen);
minor_mgr->add_pool(old_gen, false /* always_affected_by_gc */);
_pools_list->append(old_gen);
}
#endif // INCLUDE_ALL_GCS
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
_code_heap_pool = new CodeHeapPool(heap,
"Code Cache",
true /* support_usage_threshold */);
MemoryManager* mgr = MemoryManager::get_code_cache_memory_manager();
mgr->add_pool(_code_heap_pool);
_pools_list->append(_code_heap_pool);
_managers_list->append(mgr);
}
void MemoryService::add_metaspace_memory_pools() {
MemoryManager* mgr = MemoryManager::get_metaspace_memory_manager();
_metaspace_pool = new MetaspacePool();
mgr->add_pool(_metaspace_pool);
_pools_list->append(_metaspace_pool);
if (UseCompressedClassPointers) {
_compressed_class_pool = new CompressedKlassSpacePool();
mgr->add_pool(_compressed_class_pool);
_pools_list->append(_compressed_class_pool);
}
_managers_list->append(mgr);
}
MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) {
for (int i = 0; i < _managers_list->length(); i++) {
MemoryManager* mgr = _managers_list->at(i);
if (mgr->is_manager(mh)) {
return mgr;
}
}
return NULL;
}
MemoryPool* MemoryService::get_memory_pool(instanceHandle ph) {
for (int i = 0; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
if (pool->is_pool(ph)) {
return pool;
}
}
return NULL;
}
void MemoryService::track_memory_usage() {
for (int i = 0; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
pool->record_peak_memory_usage();
}
LowMemoryDetector::detect_low_memory();
}
void MemoryService::track_memory_pool_usage(MemoryPool* pool) {
pool->record_peak_memory_usage();
if (LowMemoryDetector::is_enabled(pool)) {
LowMemoryDetector::detect_low_memory(pool);
}
}
void MemoryService::gc_begin(bool fullGC, bool recordGCBeginTime,
bool recordAccumulatedGCTime,
bool recordPreGCUsage, bool recordPeakUsage) {
GCMemoryManager* mgr;
if (fullGC) {
mgr = _major_gc_manager;
} else {
mgr = _minor_gc_manager;
}
assert(mgr->is_gc_memory_manager(), "Sanity check");
mgr->gc_begin(recordGCBeginTime, recordPreGCUsage, recordAccumulatedGCTime);
if (recordPeakUsage) {
for (int i = 0; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
pool->record_peak_memory_usage();
}
}
}
void MemoryService::gc_end(bool fullGC, bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause,
bool allMemoryPoolsAffected) {
GCMemoryManager* mgr;
if (fullGC) {
mgr = (GCMemoryManager*) _major_gc_manager;
} else {
mgr = (GCMemoryManager*) _minor_gc_manager;
}
assert(mgr->is_gc_memory_manager(), "Sanity check");
mgr->gc_end(recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
countCollection, cause, allMemoryPoolsAffected);
}
void MemoryService::oops_do(OopClosure* f) {
int i;
for (i = 0; i < _pools_list->length(); i++) {
MemoryPool* pool = _pools_list->at(i);
pool->oops_do(f);
}
for (i = 0; i < _managers_list->length(); i++) {
MemoryManager* mgr = _managers_list->at(i);
mgr->oops_do(f);
}
}
bool MemoryService::set_verbose(bool verbose) {
MutexLocker m(Management_lock);
bool succeed = CommandLineFlags::boolAtPut((char*)"PrintGC", &verbose, Flag::MANAGEMENT);
assert(succeed, "Setting PrintGC flag fails");
ClassLoadingService::reset_trace_class_unloading();
return verbose;
}
Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
Klass* k = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);
instanceKlassHandle ik(THREAD, k);
instanceHandle obj = ik->allocate_instance_handle(CHECK_NH);
JavaValue result(T_VOID);
JavaCallArguments args(10);
args.push_oop(obj); // receiver
args.push_long(usage.init_size_as_jlong()); // Argument 1
args.push_long(usage.used_as_jlong()); // Argument 2
args.push_long(usage.committed_as_jlong()); // Argument 3
args.push_long(usage.max_size_as_jlong()); // Argument 4
JavaCalls::call_special(&result,
ik,
vmSymbols::object_initializer_name(),
vmSymbols::long_long_long_long_void_signature(),
&args,
CHECK_NH);
return obj;
}
TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
switch (kind) {
case Generation::DefNew:
#if INCLUDE_ALL_GCS
case Generation::ParNew:
case Generation::ASParNew:
#endif // INCLUDE_ALL_GCS
_fullGC=false;
break;
case Generation::MarkSweepCompact:
#if INCLUDE_ALL_GCS
case Generation::ConcurrentMarkSweep:
case Generation::ASConcurrentMarkSweep:
#endif // INCLUDE_ALL_GCS
_fullGC=true;
break;
default:
assert(false, "Unrecognized gc generation kind.");
}
initialize(_fullGC, cause, true, true, true, true, true, true, true, true);
}
TraceMemoryManagerStats::TraceMemoryManagerStats(bool fullGC,
GCCause::Cause cause,
bool allMemoryPoolsAffected,
bool recordGCBeginTime,
bool recordPreGCUsage,
bool recordPeakUsage,
bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime,
bool countCollection) {
initialize(fullGC, cause, allMemoryPoolsAffected,
recordGCBeginTime, recordPreGCUsage, recordPeakUsage,
recordPostGCUsage, recordAccumulatedGCTime, recordGCEndTime,
countCollection);
}
void TraceMemoryManagerStats::initialize(bool fullGC,
GCCause::Cause cause,
bool allMemoryPoolsAffected,
bool recordGCBeginTime,
bool recordPreGCUsage,
bool recordPeakUsage,
bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime,
bool countCollection) {
_fullGC = fullGC;
_allMemoryPoolsAffected = allMemoryPoolsAffected;
_recordGCBeginTime = recordGCBeginTime;
_recordPreGCUsage = recordPreGCUsage;
_recordPeakUsage = recordPeakUsage;
_recordPostGCUsage = recordPostGCUsage;
_recordAccumulatedGCTime = recordAccumulatedGCTime;
_recordGCEndTime = recordGCEndTime;
_countCollection = countCollection;
_cause = cause;
MemoryService::gc_begin(_fullGC, _recordGCBeginTime, _recordAccumulatedGCTime,
_recordPreGCUsage, _recordPeakUsage);
}
TraceMemoryManagerStats::~TraceMemoryManagerStats() {
MemoryService::gc_end(_fullGC, _recordPostGCUsage, _recordAccumulatedGCTime,
_recordGCEndTime, _countCollection, _cause, _allMemoryPoolsAffected);
}
C:\hotspot-69087d08d473\src\share\vm/services/memoryService.hpp
#ifndef SHARE_VM_SERVICES_MEMORYSERVICE_HPP
#define SHARE_VM_SERVICES_MEMORYSERVICE_HPP
#include "memory/allocation.hpp"
#include "memory/generation.hpp"
#include "runtime/handles.hpp"
#include "services/memoryUsage.hpp"
#include "gc_interface/gcCause.hpp"
class MemoryPool;
class MemoryManager;
class GCMemoryManager;
class CollectedHeap;
class Generation;
class DefNewGeneration;
class PSYoungGen;
class PSOldGen;
class CodeHeap;
class ContiguousSpace;
class CompactibleFreeListSpace;
class GenCollectedHeap;
class ParallelScavengeHeap;
class G1CollectedHeap;
class MemoryService : public AllStatic {
private:
enum {
init_pools_list_size = 10,
init_managers_list_size = 5
};
enum {
minor = 0,
major = 1,
n_gens = 2
};
static GrowableArray<MemoryPool*>* _pools_list;
static GrowableArray<MemoryManager*>* _managers_list;
static GCMemoryManager* _major_gc_manager;
static GCMemoryManager* _minor_gc_manager;
static MemoryPool* _code_heap_pool;
static MemoryPool* _metaspace_pool;
static MemoryPool* _compressed_class_pool;
static void add_generation_memory_pool(Generation* gen,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr);
static void add_generation_memory_pool(Generation* gen,
GCMemoryManager* major_mgr) {
add_generation_memory_pool(gen, major_mgr, NULL);
}
static void add_psYoung_memory_pool(PSYoungGen* gen,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr);
static void add_psOld_memory_pool(PSOldGen* gen,
GCMemoryManager* mgr);
static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr);
static void add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
GCMemoryManager* major_mgr,
GCMemoryManager* minor_mgr);
static MemoryPool* add_space(ContiguousSpace* space,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold);
static MemoryPool* add_survivor_spaces(DefNewGeneration* gen,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold);
static MemoryPool* add_gen(Generation* gen,
const char* name,
bool is_heap,
bool support_usage_threshold);
static MemoryPool* add_cms_space(CompactibleFreeListSpace* space,
const char* name,
bool is_heap,
size_t max_size,
bool support_usage_threshold);
static void add_gen_collected_heap_info(GenCollectedHeap* heap);
static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
static void add_g1_heap_info(G1CollectedHeap* g1h);
public:
static void set_universe_heap(CollectedHeap* heap);
static void add_code_heap_memory_pool(CodeHeap* heap);
static void add_metaspace_memory_pools();
static MemoryPool* get_memory_pool(instanceHandle pool);
static MemoryManager* get_memory_manager(instanceHandle mgr);
static const int num_memory_pools() {
return _pools_list->length();
}
static const int num_memory_managers() {
return _managers_list->length();
}
static MemoryPool* get_memory_pool(int index) {
return _pools_list->at(index);
}
static MemoryManager* get_memory_manager(int index) {
return _managers_list->at(index);
}
static void track_memory_usage();
static void track_code_cache_memory_usage() {
track_memory_pool_usage(_code_heap_pool);
}
static void track_metaspace_memory_usage() {
track_memory_pool_usage(_metaspace_pool);
}
static void track_compressed_class_memory_usage() {
track_memory_pool_usage(_compressed_class_pool);
}
static void track_memory_pool_usage(MemoryPool* pool);
static void gc_begin(bool fullGC, bool recordGCBeginTime,
bool recordAccumulatedGCTime,
bool recordPreGCUsage, bool recordPeakUsage);
static void gc_end(bool fullGC, bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime, bool countCollection,
GCCause::Cause cause,
bool allMemoryPoolsAffected);
static void oops_do(OopClosure* f);
static bool get_verbose() { return PrintGC; }
static bool set_verbose(bool verbose);
static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS);
static const GCMemoryManager* get_minor_gc_manager() {
return _minor_gc_manager;
}
static const GCMemoryManager* get_major_gc_manager() {
return _major_gc_manager;
}
};
class TraceMemoryManagerStats : public StackObj {
private:
bool _fullGC;
bool _allMemoryPoolsAffected;
bool _recordGCBeginTime;
bool _recordPreGCUsage;
bool _recordPeakUsage;
bool _recordPostGCUsage;
bool _recordAccumulatedGCTime;
bool _recordGCEndTime;
bool _countCollection;
GCCause::Cause _cause;
public:
TraceMemoryManagerStats() {}
TraceMemoryManagerStats(bool fullGC,
GCCause::Cause cause,
bool allMemoryPoolsAffected = true,
bool recordGCBeginTime = true,
bool recordPreGCUsage = true,
bool recordPeakUsage = true,
bool recordPostGCUsage = true,
bool recordAccumulatedGCTime = true,
bool recordGCEndTime = true,
bool countCollection = true);
void initialize(bool fullGC,
GCCause::Cause cause,
bool allMemoryPoolsAffected,
bool recordGCBeginTime,
bool recordPreGCUsage,
bool recordPeakUsage,
bool recordPostGCUsage,
bool recordAccumulatedGCTime,
bool recordGCEndTime,
bool countCollection);
TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause);
~TraceMemoryManagerStats();
};
#endif // SHARE_VM_SERVICES_MEMORYSERVICE_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memoryUsage.hpp
#ifndef SHARE_VM_SERVICES_MEMORYUSAGE_HPP
#define SHARE_VM_SERVICES_MEMORYUSAGE_HPP
#include "utilities/globalDefinitions.hpp"
class MemoryUsage VALUE_OBJ_CLASS_SPEC {
private:
size_t _initSize;
size_t _used;
size_t _committed;
size_t _maxSize;
public:
MemoryUsage(size_t i, size_t u, size_t c, size_t m) :
_initSize(i), _used(u), _committed(c), _maxSize(m) {};
MemoryUsage() :
_initSize(0), _used(0), _committed(0), _maxSize(0) {};
size_t init_size() const { return _initSize; }
size_t used() const { return _used; }
size_t committed() const { return _committed; }
size_t max_size() const { return _maxSize; }
static size_t undefined_size() { return (size_t) -1; }
inline static jlong convert_to_jlong(size_t val) {
jlong ret;
if (val == undefined_size()) {
ret = -1L;
} else {
NOT_LP64(ret = val;)
LP64_ONLY(ret = MIN2(val, (size_t)max_jlong);)
}
return ret;
}
jlong init_size_as_jlong() const { return convert_to_jlong(_initSize); }
jlong used_as_jlong() const { return convert_to_jlong(_used); }
jlong committed_as_jlong() const { return convert_to_jlong(_committed); }
jlong max_size_as_jlong() const { return convert_to_jlong(_maxSize); }
};
#endif // SHARE_VM_SERVICES_MEMORYUSAGE_HPP
C:\hotspot-69087d08d473\src\share\vm/services/memReporter.cpp
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "services/mallocTracker.hpp"
#include "services/memReporter.hpp"
#include "services/virtualMemoryTracker.hpp"
#include "utilities/globalDefinitions.hpp"
size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
}
size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
return malloc->malloc_size() + malloc->arena_size() + vm->committed();
}
void MemReporterBase::print_total(size_t reserved, size_t committed) const {
const char* scale = current_scale();
output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
}
void MemReporterBase::print_malloc(size_t amount, size_t count, MEMFLAGS flag) const {
const char* scale = current_scale();
outputStream* out = output();
if (flag != mtNone) {
out->print("(malloc=" SIZE_FORMAT "%s type=%s",
amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag));
} else {
out->print("(malloc=" SIZE_FORMAT "%s",
amount_in_current_scale(amount), scale);
}
if (count > 0) {
out->print(" #" SIZE_FORMAT "", count);
}
out->print(")");
}
void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const {
const char* scale = current_scale();
output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)",
amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
}
void MemReporterBase::print_malloc_line(size_t amount, size_t count) const {
output()->print("%28s", " ");
print_malloc(amount, count);
output()->print_cr(" ");
}
void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const {
output()->print("%28s", " ");
print_virtual_memory(reserved, committed);
output()->print_cr(" ");
}
void MemReporterBase::print_arena_line(size_t amount, size_t count) const {
const char* scale = current_scale();
output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")", " ",
amount_in_current_scale(amount), scale, count);
}
void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
const char* scale = current_scale();
output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
}
void MemSummaryReporter::report() {
const char* scale = current_scale();
outputStream* out = output();
size_t total_reserved_amount = _malloc_snapshot->total() +
_vm_snapshot->total_reserved();
size_t total_committed_amount = _malloc_snapshot->total() +
_vm_snapshot->total_committed();
out->print_cr("\nNative Memory Tracking:\n");
out->print("Total: ");
print_total(total_reserved_amount, total_committed_amount);
out->print("\n");
for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index);
if (flag == mtThreadStack) continue;
MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
report_summary_of_type(flag, malloc_memory, virtual_memory);
}
}
void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
MallocMemory* malloc_memory, VirtualMemory* virtual_memory) {
size_t reserved_amount = reserved_total (malloc_memory, virtual_memory);
size_t committed_amount = committed_total(malloc_memory, virtual_memory);
if (flag == mtThread) {
const VirtualMemory* thread_stack_usage =
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
reserved_amount += thread_stack_usage->reserved();
committed_amount += thread_stack_usage->committed();
} else if (flag == mtNMT) {
reserved_amount += _malloc_snapshot->malloc_overhead()->size();
committed_amount += _malloc_snapshot->malloc_overhead()->size();
}
if (amount_in_current_scale(reserved_amount) > 0) {
outputStream* out = output();
const char* scale = current_scale();
out->print("-%26s (", NMTUtil::flag_to_name(flag));
print_total(reserved_amount, committed_amount);
out->print_cr(")");
if (flag == mtClass) {
out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count);
} else if (flag == mtThread) {
out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
const VirtualMemory* thread_stack_usage =
_vm_snapshot->by_type(mtThreadStack);
out->print("%27s (stack: ", " ");
print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
out->print_cr(")");
}
if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) {
size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count();
print_malloc_line(malloc_memory->malloc_size(), count);
}
if (amount_in_current_scale(virtual_memory->reserved()) > 0) {
print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed());
}
if (amount_in_current_scale(malloc_memory->arena_size()) > 0) {
print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count());
}
if (flag == mtNMT &&
amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) {
out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
}
out->print_cr(" ");
}
}
void MemDetailReporter::report_detail() {
outputStream* out = output();
out->print_cr("Details:\n");
report_malloc_sites();
report_virtual_memory_allocation_sites();
}
void MemDetailReporter::report_malloc_sites() {
MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
if (malloc_itr.is_empty()) return;
outputStream* out = output();
const MallocSite* malloc_site;
while ((malloc_site = malloc_itr.next()) != NULL) {
if (amount_in_current_scale(malloc_site->size()) == 0)
continue;
const NativeCallStack* stack = malloc_site->call_stack();
stack->print_on(out);
out->print("%29s", " ");
MEMFLAGS flag = malloc_site->flag();
assert((flag >= 0 && flag < (int)mt_number_of_types) && flag != mtNone,
"Must have a valid memory type");
print_malloc(malloc_site->size(), malloc_site->count(),flag);
out->print_cr("\n");
}
}
void MemDetailReporter::report_virtual_memory_allocation_sites() {
VirtualMemorySiteIterator virtual_memory_itr =
_baseline.virtual_memory_sites(MemBaseline::by_size);
if (virtual_memory_itr.is_empty()) return;
outputStream* out = output();
const VirtualMemoryAllocationSite* virtual_memory_site;
while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) {
if (amount_in_current_scale(virtual_memory_site->reserved()) == 0)
continue;
const NativeCallStack* stack = virtual_memory_site->call_stack();
stack->print_on(out);
out->print("%28s (", " ");
print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
MEMFLAGS flag = virtual_memory_site->flag();
if (flag != mtNone) {
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
}
out->print_cr(")\n");
}
}
void MemDetailReporter::report_virtual_memory_map() {
VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
output()->print_cr("Virtual memory map:");
while ((rgn = itr.next()) != NULL) {
report_virtual_memory_region(rgn);
}
}
void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
assert(reserved_rgn != NULL, "NULL pointer");
if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
outputStream* out = output();
const char* scale = current_scale();
const NativeCallStack* stack = reserved_rgn->call_stack();
bool all_committed = reserved_rgn->all_committed();
const char* region_type = (all_committed ? "reserved and committed" : "reserved");
out->print_cr(" ");
print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
if (stack->is_empty()) {
out->print_cr(" ");
} else {
out->print_cr(" from");
stack->print_on(out, 4);
}
if (all_committed) return;
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
const CommittedMemoryRegion* committed_rgn;
while ((committed_rgn = itr.next()) != NULL) {
if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
stack = committed_rgn->call_stack();
out->print("\n\t");
print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
if (stack->is_empty()) {
out->print_cr(" ");
} else {
out->print_cr(" from");
stack->print_on(out, 12);
}
}
}
void MemSummaryDiffReporter::report_diff() {
const char* scale = current_scale();
outputStream* out = output();
out->print_cr("\nNative Memory Tracking:\n");
out->print("Total: ");
print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
_current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
_early_baseline.total_committed_memory());
out->print_cr("\n");
for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index);
if (flag == mtThreadStack) continue;
diff_summary_of_type(flag, _early_baseline.malloc_memory(flag),
_early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag),
_current_baseline.virtual_memory(flag));
}
}
void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count, MEMFLAGS flags) const {
const char* scale = current_scale();
outputStream* out = output();
out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
if (flags != mtNone) {
out->print(" type=%s", NMTUtil::flag_to_name(flags));
}
long amount_diff = diff_in_current_scale(current_amount, early_amount);
if (amount_diff != 0) {
out->print(" %+ld%s", amount_diff, scale);
}
if (current_count > 0) {
out->print(" #" SIZE_FORMAT "", current_count);
if (current_count != early_count) {
out->print(" %+d", (int)(current_count - early_count));
}
}
}
void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count) const {
const char* scale = current_scale();
outputStream* out = output();
out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
if (diff_in_current_scale(current_amount, early_amount) != 0) {
out->print(" %+ld", diff_in_current_scale(current_amount, early_amount));
}
out->print(" #" SIZE_FORMAT "", current_count);
if (current_count != early_count) {
out->print(" %+d", (int)(current_count - early_count));
}
}
void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
size_t early_reserved, size_t early_committed) const {
const char* scale = current_scale();
outputStream* out = output();
out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
long reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
if (reserved_diff != 0) {
out->print(" %+ld%s", reserved_diff, scale);
}
out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
long committed_diff = diff_in_current_scale(current_committed, early_committed);
if (committed_diff != 0) {
out->print(" %+ld%s", committed_diff, scale);
}
}
void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc,
const VirtualMemory* early_vm, const MallocMemory* current_malloc,
const VirtualMemory* current_vm) const {
outputStream* out = output();
const char* scale = current_scale();
size_t current_reserved_amount = reserved_total (current_malloc, current_vm);
size_t current_committed_amount = committed_total(current_malloc, current_vm);
size_t early_reserved_amount = reserved_total(early_malloc, early_vm);
size_t early_committed_amount = committed_total(early_malloc, early_vm);
if (flag == mtThread) {
const VirtualMemory* early_thread_stack_usage =
_early_baseline.virtual_memory(mtThreadStack);
const VirtualMemory* current_thread_stack_usage =
_current_baseline.virtual_memory(mtThreadStack);
early_reserved_amount += early_thread_stack_usage->reserved();
early_committed_amount += early_thread_stack_usage->committed();
current_reserved_amount += current_thread_stack_usage->reserved();
current_committed_amount += current_thread_stack_usage->committed();
} else if (flag == mtNMT) {
early_reserved_amount += _early_baseline.malloc_tracking_overhead();
early_committed_amount += _early_baseline.malloc_tracking_overhead();
current_reserved_amount += _current_baseline.malloc_tracking_overhead();
current_committed_amount += _current_baseline.malloc_tracking_overhead();
}
if (amount_in_current_scale(current_reserved_amount) > 0 ||
diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
out->print("-%26s (", NMTUtil::flag_to_name(flag));
print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
early_reserved_amount, early_committed_amount);
out->print_cr(")");
if (flag == mtClass) {
out->print("%27s (classes #" SIZE_FORMAT "", " ", _current_baseline.class_count());
int class_count_diff = (int)(_current_baseline.class_count() -
_early_baseline.class_count());
if (_current_baseline.class_count() != _early_baseline.class_count()) {
out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count()));
}
out->print_cr(")");
} else if (flag == mtThread) {
out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count());
int thread_count_diff = (int)(_current_baseline.thread_count() -
_early_baseline.thread_count());
if (thread_count_diff != 0) {
out->print(" %+d", thread_count_diff);
}
out->print_cr(")");
const VirtualMemory* current_thread_stack =
_current_baseline.virtual_memory(mtThreadStack);
const VirtualMemory* early_thread_stack =
_early_baseline.virtual_memory(mtThreadStack);
out->print("%27s (stack: ", " ");
print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
early_thread_stack->reserved(), early_thread_stack->committed());
out->print_cr(")");
}
size_t current_malloc_amount = current_malloc->malloc_size();
size_t early_malloc_amount = early_malloc->malloc_size();
if (amount_in_current_scale(current_malloc_amount) > 0 ||
diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
out->print("%28s(", " ");
print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
early_malloc_amount, early_malloc->malloc_count(), mtNone);
out->print_cr(")");
}
if (amount_in_current_scale(current_vm->reserved()) > 0 ||
diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
out->print("%27s (mmap: ", " ");
print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
early_vm->reserved(), early_vm->committed());
out->print_cr(")");
}
if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
out->print("%28s(", " ");
print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
early_malloc->arena_size(), early_malloc->arena_count());
out->print_cr(")");
}
if (flag == mtNMT) {
size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
out->print("%27s (tracking overhead=" SIZE_FORMAT "%s", " ",
amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
_early_baseline.malloc_tracking_overhead());
if (overhead_diff != 0) {
out->print(" %+ld%s", overhead_diff, scale);
}
out->print_cr(")");
}
out->print_cr(" ");
}
}
void MemDetailDiffReporter::report_diff() {
MemSummaryDiffReporter::report_diff();
diff_malloc_sites();
diff_virtual_memory_sites();
}
void MemDetailDiffReporter::diff_malloc_sites() const {
MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type);
MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type);
const MallocSite* early_site = early_itr.next();
const MallocSite* current_site = current_itr.next();
while (early_site != NULL || current_site != NULL) {
if (early_site == NULL) {
new_malloc_site(current_site);
current_site = current_itr.next();
} else if (current_site == NULL) {
old_malloc_site(early_site);
early_site = early_itr.next();
} else {
int compVal = current_site->call_stack()->compare(*early_site->call_stack());
if (compVal < 0) {
new_malloc_site(current_site);
current_site = current_itr.next();
} else if (compVal > 0) {
old_malloc_site(early_site);
early_site = early_itr.next();
} else {
diff_malloc_site(early_site, current_site);
early_site = early_itr.next();
current_site = current_itr.next();
}
}
}
}
void MemDetailDiffReporter::diff_virtual_memory_sites() const {
VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
const VirtualMemoryAllocationSite* early_site = early_itr.next();
const VirtualMemoryAllocationSite* current_site = current_itr.next();
while (early_site != NULL || current_site != NULL) {
if (early_site == NULL) {
new_virtual_memory_site(current_site);
current_site = current_itr.next();
} else if (current_site == NULL) {
old_virtual_memory_site(early_site);
early_site = early_itr.next();
} else {
int compVal = current_site->call_stack()->compare(*early_site->call_stack());
if (compVal < 0) {
new_virtual_memory_site(current_site);
current_site = current_itr.next();
} else if (compVal > 0) {
old_virtual_memory_site(early_site);
early_site = early_itr.next();
} else {
diff_virtual_memory_site(early_site, current_site);
early_site = early_itr.next();
current_site = current_itr.next();
}
}
}
}
void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
0, 0, malloc_site->flag());
}
void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
malloc_site->count(), malloc_site->flag());
}
void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
const MallocSite* current) const {
if (early->flag() != current->flag()) {
old_malloc_site(early);
new_malloc_site(current);
} else {
diff_malloc_site(current->call_stack(), current->size(), current->count(),
early->size(), early->count(), early->flag());
}
}
void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const {
outputStream* out = output();
assert(stack != NULL, "NULL stack");
if (diff_in_current_scale(current_size, early_size) == 0) {
return;
}
stack->print_on(out);
out->print("%28s (", " ");
print_malloc_diff(current_size, current_count,
early_size, early_count, flags);
out->print_cr(")\n");
}
void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag());
}
void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag());
}
void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
const VirtualMemoryAllocationSite* current) const {
assert(early->flag() == current->flag(), "Should be the same");
diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
early->reserved(), early->committed(), current->flag());
}
void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const {
outputStream* out = output();
if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
diff_in_current_scale(current_committed, early_committed) == 0) {
return;
}
stack->print_on(out);
out->print("%28s (mmap: ", " ");
print_virtual_memory_diff(current_reserved, current_committed,
early_reserved, early_committed);
if (flag != mtNone) {
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
}
out->print_cr(")\n");
}
C:\hotspot-69087d08d473\src\share\vm/services/memReporter.hpp
#ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
#define SHARE_VM_SERVICES_MEM_REPORTER_HPP
#if INCLUDE_NMT
#include "oops/instanceKlass.hpp"
#include "services/memBaseline.hpp"
#include "services/nmtCommon.hpp"
#include "services/mallocTracker.hpp"
#include "services/virtualMemoryTracker.hpp"
class MemReporterBase : public StackObj {
private:
size_t _scale; // report in this scale
outputStream* _output; // destination
public:
MemReporterBase(outputStream* out = NULL, size_t scale = K)
: _scale(scale) {
_output = (out == NULL) ? tty : out;
}
protected:
inline outputStream* output() const {
return _output;
}
inline const char* current_scale() const {
return NMTUtil::scale_name(_scale);
}
inline size_t amount_in_current_scale(size_t amount) const {
return NMTUtil::amount_in_scale(amount, _scale);
}
inline long diff_in_current_scale(size_t s1, size_t s2) const {
long amount = (long)(s1 - s2);
long scale = (long)_scale;
amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2);
return amount / scale;
}
size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
void print_total(size_t reserved, size_t committed) const;
void print_malloc(size_t amount, size_t count, MEMFLAGS flag = mtNone) const;
void print_virtual_memory(size_t reserved, size_t committed) const;
void print_malloc_line(size_t amount, size_t count) const;
void print_virtual_memory_line(size_t reserved, size_t committed) const;
void print_arena_line(size_t amount, size_t count) const;
void print_virtual_memory_region(const char* type, address base, size_t size) const;
};
class MemSummaryReporter : public MemReporterBase {
private:
MallocMemorySnapshot* _malloc_snapshot;
VirtualMemorySnapshot* _vm_snapshot;
size_t _class_count;
public:
MemSummaryReporter(MemBaseline& baseline, outputStream* output,
size_t scale = K) : MemReporterBase(output, scale),
_malloc_snapshot(baseline.malloc_memory_snapshot()),
_vm_snapshot(baseline.virtual_memory_snapshot()),
_class_count(baseline.class_count()) { }
virtual void report();
private:
void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
VirtualMemory* virtual_memory);
};
class MemDetailReporter : public MemSummaryReporter {
private:
MemBaseline& _baseline;
public:
MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) :
MemSummaryReporter(baseline, output, scale),
_baseline(baseline) { }
virtual void report() {
MemSummaryReporter::report();
report_virtual_memory_map();
report_detail();
}
private:
void report_detail();
void report_virtual_memory_map();
void report_malloc_sites();
void report_virtual_memory_allocation_sites();
void report_virtual_memory_region(const ReservedMemoryRegion* rgn);
};
class MemSummaryDiffReporter : public MemReporterBase {
protected:
MemBaseline& _early_baseline;
MemBaseline& _current_baseline;
public:
MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
outputStream* output, size_t scale = K) : MemReporterBase(output, scale),
_early_baseline(early_baseline), _current_baseline(current_baseline) {
assert(early_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
}
virtual void report_diff();
private:
void diff_summary_of_type(MEMFLAGS type,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
protected:
void print_malloc_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count, MEMFLAGS flags) const;
void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
size_t early_reserved, size_t early_committed) const;
void print_arena_diff(size_t current_amount, size_t current_count,
size_t early_amount, size_t early_count) const;
};
class MemDetailDiffReporter : public MemSummaryDiffReporter {
public:
MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
outputStream* output, size_t scale = K) :
MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { }
virtual void report_diff();
void diff_malloc_sites() const;
void diff_virtual_memory_sites() const;
void new_malloc_site (const MallocSite* site) const;
void old_malloc_site (const MallocSite* site) const;
void diff_malloc_site(const MallocSite* early, const MallocSite* current) const;
void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
const VirtualMemoryAllocationSite* current) const;
void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
size_t currrent_count, size_t early_size, size_t early_count, MEMFLAGS flags) const;
void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const;
};
#endif // INCLUDE_NMT
#endif
C:\hotspot-69087d08d473\src\share\vm/services/memTracker.cpp
#include "precompiled.hpp"
#include "runtime/mutex.hpp"
#include "services/memBaseline.hpp"
#include "services/memReporter.hpp"
#include "services/mallocTracker.inline.hpp"
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp"
#ifdef SOLARIS
volatile bool NMT_stack_walkable = false;
#else
volatile bool NMT_stack_walkable = true;
#endif
volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
MemBaseline MemTracker::_baseline;
Mutex* MemTracker::_query_lock = NULL;
bool MemTracker::_is_nmt_env_valid = true;
NMT_TrackingLevel MemTracker::init_tracking_level() {
NMT_TrackingLevel level = NMT_off;
char buf[64];
char nmt_option[64];
jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
if (strcmp(nmt_option, "summary") == 0) {
level = NMT_summary;
} else if (strcmp(nmt_option, "detail") == 0) {
#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
level = NMT_detail;
#else
level = NMT_summary;
#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
} else if (strcmp(nmt_option, "off") != 0) {
_is_nmt_env_valid = false;
}
os::unsetenv(buf);
}
::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
if (!MallocTracker::initialize(level) ||
!VirtualMemoryTracker::initialize(level)) {
level = NMT_off;
}
return level;
}
void MemTracker::init() {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
if (!VirtualMemoryTracker::late_initialize(level)) {
shutdown();
return;
}
_query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
if (_query_lock == NULL) {
shutdown();
}
}
}
bool MemTracker::check_launcher_nmt_support(const char* value) {
if (strcmp(value, "=detail") == 0) {
#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
jio_fprintf(defaultStream::error_stream(),
"NMT detail is not supported on this platform. Using NMT summary instead.\n");
if (MemTracker::tracking_level() != NMT_summary) {
return false;
}
#else
if (MemTracker::tracking_level() != NMT_detail) {
return false;
}
#endif
} else if (strcmp(value, "=summary") == 0) {
if (MemTracker::tracking_level() != NMT_summary) {
return false;
}
} else if (strcmp(value, "=off") == 0) {
if (MemTracker::tracking_level() != NMT_off) {
return false;
}
} else {
_is_nmt_env_valid = false;
}
return true;
}
bool MemTracker::verify_nmt_option() {
return _is_nmt_env_valid;
}
void* MemTracker::malloc_base(void* memblock) {
return MallocTracker::get_base(memblock);
}
void Tracker::record(address addr, size_t size) {
if (MemTracker::tracking_level() < NMT_summary) return;
switch(_type) {
case uncommit:
VirtualMemoryTracker::remove_uncommitted_region(addr, size);
break;
case release:
VirtualMemoryTracker::remove_released_region(addr, size);
break;
default:
ShouldNotReachHere();
}
}
void MemTracker::shutdown() {
if (tracking_level () > NMT_minimal) {
transition_to(NMT_minimal);
}
}
bool MemTracker::transition_to(NMT_TrackingLevel level) {
NMT_TrackingLevel current_level = tracking_level();
assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
if (current_level == level) {
return true;
} else if (current_level > level) {
_tracking_level = level;
OrderAccess::fence();
VirtualMemoryTracker::transition(current_level, level);
MallocTracker::transition(current_level, level);
} else {
}
return true;
}
void MemTracker::report(bool summary_only, outputStream* output) {
assert(output != NULL, "No output stream");
MemBaseline baseline;
if (baseline.baseline(summary_only)) {
if (summary_only) {
MemSummaryReporter rpt(baseline, output);
rpt.report();
} else {
MemDetailReporter rpt(baseline, output);
rpt.report();
}
}
}
class StatisticsWalker : public MallocSiteWalker {
private:
enum Threshold {
report_threshold = 20
};
private:
int _empty_entries;
int _total_entries;
int _stack_depth_distribution[NMT_TrackingStackDepth];
int _hash_distribution[report_threshold];
int _bucket_over_threshold;
int _current_hash_bucket;
int _current_bucket_length;
int _used_buckets;
int _longest_bucket_length;
public:
StatisticsWalker() : _empty_entries(0), _total_entries(0) {
int index = 0;
for (index = 0; index < NMT_TrackingStackDepth; index ++) {
_stack_depth_distribution[index] = 0;
}
for (index = 0; index < report_threshold; index ++) {
_hash_distribution[index] = 0;
}
_bucket_over_threshold = 0;
_longest_bucket_length = 0;
_current_hash_bucket = -1;
_current_bucket_length = 0;
_used_buckets = 0;
}
virtual bool at(const MallocSite* e) {
if (e->size() == 0) _empty_entries ++;
_total_entries ++;
int frames = e->call_stack()->frames();
_stack_depth_distribution[frames - 1] ++;
int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
if (_current_hash_bucket == -1) {
_current_hash_bucket = hash_bucket;
_current_bucket_length = 1;
} else if (_current_hash_bucket == hash_bucket) {
_current_bucket_length ++;
} else {
record_bucket_length(_current_bucket_length);
_current_hash_bucket = hash_bucket;
_current_bucket_length = 1;
}
return true;
}
void completed() {
record_bucket_length(_current_bucket_length);
}
void report_statistics(outputStream* out) {
int index;
out->print_cr("Malloc allocation site table:");
out->print_cr("\tTotal entries: %d", _total_entries);
out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
out->print_cr(" ");
out->print_cr("Hash distribution:");
if (_used_buckets < MallocSiteTable::hash_buckets()) {
out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
}
for (index = 0; index < report_threshold; index ++) {
if (_hash_distribution[index] != 0) {
if (index == 0) {
out->print_cr(" %d entry: %d", 1, _hash_distribution[0]);
} else if (index < 9) { // single digit
out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
} else {
out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
}
}
}
if (_bucket_over_threshold > 0) {
out->print_cr(" >%d entries: %d", report_threshold, _bucket_over_threshold);
}
out->print_cr("most entries: %d", _longest_bucket_length);
out->print_cr(" ");
out->print_cr("Call stack depth distribution:");
for (index = 0; index < NMT_TrackingStackDepth; index ++) {
if (_stack_depth_distribution[index] > 0) {
out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
}
}
}
private:
void record_bucket_length(int length) {
_used_buckets ++;
if (length <= report_threshold) {
_hash_distribution[length - 1] ++;
} else {
_bucket_over_threshold ++;
}
_longest_bucket_length = MAX2(_longest_bucket_length, length);
}
};
void MemTracker::tuning_statistics(outputStream* out) {
StatisticsWalker walker;
MallocSiteTable::walk_malloc_site(&walker);
walker.completed();
out->print_cr("Native Memory Tracking Statistics:");
out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth);
NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
out->print_cr(" ");
walker.report_statistics(out);
}
C:\hotspot-69087d08d473\src\share\vm/services/memTracker.hpp
#ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
#define SHARE_VM_SERVICES_MEM_TRACKER_HPP
#include "services/nmtCommon.hpp"
#include "utilities/nativeCallStack.hpp"
#if !INCLUDE_NMT
#define CURRENT_PC NativeCallStack::empty_stack()
#define CALLER_PC NativeCallStack::empty_stack()
class Tracker : public StackObj {
public:
Tracker() { }
void record(address addr, size_t size) { }
};
class MemTracker : AllStatic {
public:
static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
static inline void shutdown() { }
static inline void init() { }
static bool check_launcher_nmt_support(const char* value) { return true; }
static bool verify_nmt_option() { return true; }
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
static inline size_t malloc_header_size(void* memblock) { return 0; }
static inline void* malloc_base(void* memblock) { return memblock; }
static inline void* record_free(void* memblock) { return memblock; }
static inline void record_new_arena(MEMFLAGS flag) { }
static inline void record_arena_free(MEMFLAGS flag) { }
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { }
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) { }
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
static inline Tracker get_virtual_memory_release_tracker() { return Tracker(); }
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
static inline void record_thread_stack(void* addr, size_t size) { }
static inline void release_thread_stack(void* addr, size_t size) { }
static void final_report(outputStream*) { }
static void error_report(outputStream*) { }
};
#else
#include "runtime/atomic.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "services/mallocTracker.hpp"
#include "services/virtualMemoryTracker.hpp"
extern volatile bool NMT_stack_walkable;
#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
NativeCallStack(0, true) : NativeCallStack::empty_stack())
#define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
NativeCallStack(1, true) : NativeCallStack::empty_stack())
class MemBaseline;
class Mutex;
class Tracker : public StackObj {
public:
enum TrackerType {
uncommit,
release
};
public:
Tracker(enum TrackerType type) : _type(type) { }
void record(address addr, size_t size);
private:
enum TrackerType _type;
ThreadCritical _tc;
};
class MemTracker : AllStatic {
public:
static inline NMT_TrackingLevel tracking_level() {
if (_tracking_level == NMT_unknown) {
_tracking_level = init_tracking_level();
_cmdline_tracking_level = _tracking_level;
}
return _tracking_level;
}
static void init();
static void shutdown();
static bool check_launcher_nmt_support(const char* value);
static bool verify_nmt_option();
static bool transition_to(NMT_TrackingLevel level);
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
const NativeCallStack& stack, NMT_TrackingLevel level) {
return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
}
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
return MallocTracker::malloc_header_size(level);
}
static size_t malloc_header_size(void* memblock) {
if (tracking_level() != NMT_off) {
return MallocTracker::get_header_size(memblock);
}
return 0;
}
static void* malloc_base(void* memblock);
static inline void* record_free(void* memblock) {
return MallocTracker::record_free(memblock);
}
static inline void record_new_arena(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_new_arena(flag);
}
static inline void record_arena_free(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_arena_free(flag);
}
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
MallocTracker::record_arena_size_change(diff, flag);
}
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
}
}
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size,
stack, flag, true);
}
}
static inline void record_virtual_memory_commit(void* addr, size_t size,
const NativeCallStack& stack) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
}
static inline Tracker get_virtual_memory_uncommit_tracker() {
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::uncommit);
}
static inline Tracker get_virtual_memory_release_tracker() {
assert(tracking_level() >= NMT_summary, "Check by caller");
return Tracker(Tracker::release);
}
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
}
}
static inline void record_thread_stack(void* addr, size_t size) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
MallocMemorySummary::record_malloc(0, mtThreadStack);
record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
}
}
static inline void release_thread_stack(void* addr, size_t size) {
if (tracking_level() < NMT_summary) return;
if (addr != NULL) {
MallocMemorySummary::record_free(0, mtThreadStack);
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::remove_released_region((address)addr, size);
}
}
static inline Mutex* query_lock() { return _query_lock; }
static void error_report(outputStream* output) {
if (tracking_level() >= NMT_summary) {
report(true, output); // just print summary for error case.
}
}
static void final_report(outputStream* output) {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
report(level == NMT_summary, output);
}
}
static inline MemBaseline& get_baseline() {
return _baseline;
}
static NMT_TrackingLevel cmdline_tracking_level() {
return _cmdline_tracking_level;
}
static void tuning_statistics(outputStream* out);
private:
static NMT_TrackingLevel init_tracking_level();
static void report(bool summary_only, outputStream* output);
private:
static volatile NMT_TrackingLevel _tracking_level;
static bool _is_nmt_env_valid;
static NMT_TrackingLevel _cmdline_tracking_level;
static MemBaseline _baseline;
static Mutex* _query_lock;
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
C:\hotspot-69087d08d473\src\share\vm/services/nmtCommon.cpp
#include "precompiled.hpp"
#include "services/nmtCommon.hpp"
const char* NMTUtil::_memory_type_names[] = {
"Java Heap",
"Class",
"Thread",
"Thread Stack",
"Code",
"GC",
"Compiler",
"Internal",
"Other",
"Symbol",
"Native Memory Tracking",
"Shared class space",
"Arena Chunk",
"Test",
"Tracing",
"Unknown"
};
const char* NMTUtil::scale_name(size_t scale) {
switch(scale) {
case K: return "KB";
case M: return "MB";
case G: return "GB";
}
ShouldNotReachHere();
return NULL;
}
size_t NMTUtil::scale_from_name(const char* scale) {
assert(scale != NULL, "Null pointer check");
if (strncmp(scale, "KB", 2) == 0 ||
strncmp(scale, "kb", 2) == 0) {
return K;
} else if (strncmp(scale, "MB", 2) == 0 ||
strncmp(scale, "mb", 2) == 0) {
return M;
} else if (strncmp(scale, "GB", 2) == 0 ||
strncmp(scale, "gb", 2) == 0) {
return G;
} else {
return 0; // Invalid value
}
return K;
}
C:\hotspot-69087d08d473\src\share\vm/services/nmtCommon.hpp
#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP
#define SHARE_VM_SERVICES_NMT_COMMON_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
#ifdef _LP64
typedef jlong MemoryCounterType;
#else
typedef jint MemoryCounterType;
#endif
enum NMT_TrackingLevel {
NMT_unknown = 0xFF,
NMT_off = 0x00,
NMT_minimal = 0x01,
NMT_summary = 0x02,
NMT_detail = 0x03
};
const int NMT_TrackingStackDepth = 4;
class NMTUtil : AllStatic {
public:
static inline int flag_to_index(MEMFLAGS flag) {
return (flag & 0xff);
}
static const char* flag_to_name(MEMFLAGS flag) {
return _memory_type_names[flag_to_index(flag)];
}
static MEMFLAGS index_to_flag(int index) {
return (MEMFLAGS)index;
}
static const char* scale_name(size_t scale);
static size_t scale_from_name(const char* scale);
static size_t amount_in_scale(size_t amount, size_t scale) {
return (amount + scale / 2) / scale;
}
private:
static const char* _memory_type_names[mt_number_of_types];
};
#endif
C:\hotspot-69087d08d473\src\share\vm/services/nmtDCmd.cpp
#include "precompiled.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/nmtDCmd.hpp"
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
#include "utilities/globalDefinitions.hpp"
NMTDCmd::NMTDCmd(outputStream* output,
bool heap): DCmdWithParser(output, heap),
_summary("summary", "request runtime to report current memory summary, " \
"which includes total reserved and committed memory, along " \
"with memory usage summary by each subsytem.",
"BOOLEAN", false, "false"),
_detail("detail", "request runtime to report memory allocation >= "
"1K by each callsite.",
"BOOLEAN", false, "false"),
_baseline("baseline", "request runtime to baseline current memory usage, " \
"so it can be compared against in later time.",
"BOOLEAN", false, "false"),
_summary_diff("summary.diff", "request runtime to report memory summary " \
"comparison against previous baseline.",
"BOOLEAN", false, "false"),
_detail_diff("detail.diff", "request runtime to report memory detail " \
"comparison against previous baseline, which shows the memory " \
"allocation activities at different callsites.",
"BOOLEAN", false, "false"),
_shutdown("shutdown", "request runtime to shutdown itself and free the " \
"memory used by runtime.",
"BOOLEAN", false, "false"),
_statistics("statistics", "print tracker statistics for tuning purpose.", \
"BOOLEAN", false, "false"),
_scale("scale", "Memory usage in which scale, KB, MB or GB",
"STRING", false, "KB") {
_dcmdparser.add_dcmd_option(&_summary);
_dcmdparser.add_dcmd_option(&_detail);
_dcmdparser.add_dcmd_option(&_baseline);
_dcmdparser.add_dcmd_option(&_summary_diff);
_dcmdparser.add_dcmd_option(&_detail_diff);
_dcmdparser.add_dcmd_option(&_shutdown);
_dcmdparser.add_dcmd_option(&_statistics);
_dcmdparser.add_dcmd_option(&_scale);
}
size_t NMTDCmd::get_scale(const char* scale) const {
if (scale == NULL) return 0;
return NMTUtil::scale_from_name(scale);
}
void NMTDCmd::execute(DCmdSource source, TRAPS) {
if (MemTracker::tracking_level() == NMT_off) {
output()->print_cr("Native memory tracking is not enabled");
return;
} else if (MemTracker::tracking_level() == NMT_minimal) {
output()->print_cr("Native memory tracking has been shutdown");
return;
}
const char* scale_value = _scale.value();
size_t scale_unit = get_scale(scale_value);
if (scale_unit == 0) {
output()->print_cr("Incorrect scale value: %s", scale_value);
return;
}
int nopt = 0;
if (_summary.is_set() && _summary.value()) { ++nopt; }
if (_detail.is_set() && _detail.value()) { ++nopt; }
if (_baseline.is_set() && _baseline.value()) { ++nopt; }
if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
if (_statistics.is_set() && _statistics.value()) { ++nopt; }
if (nopt > 1) {
output()->print_cr("At most one of the following option can be specified: " \
"summary, detail, baseline, summary.diff, detail.diff, shutdown");
return;
} else if (nopt == 0) {
if (_summary.is_set()) {
output()->print_cr("No command to execute");
return;
} else {
_summary.set_value(true);
}
}
MutexLocker locker(MemTracker::query_lock());
if (_summary.value()) {
report(true, scale_unit);
} else if (_detail.value()) {
if (!check_detail_tracking_level(output())) {
return;
}
report(false, scale_unit);
} else if (_baseline.value()) {
MemBaseline& baseline = MemTracker::get_baseline();
if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) {
output()->print_cr("Baseline failed");
} else {
output()->print_cr("Baseline succeeded");
}
} else if (_summary_diff.value()) {
MemBaseline& baseline = MemTracker::get_baseline();
if (baseline.baseline_type() >= MemBaseline::Summary_baselined) {
report_diff(true, scale_unit);
} else {
output()->print_cr("No baseline for comparison");
}
} else if (_detail_diff.value()) {
if (!check_detail_tracking_level(output())) {
return;
}
MemBaseline& baseline = MemTracker::get_baseline();
if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
report_diff(false, scale_unit);
} else {
output()->print_cr("No detail baseline for comparison");
}
} else if (_shutdown.value()) {
MemTracker::shutdown();
output()->print_cr("Native memory tracking has been turned off");
} else if (_statistics.value()) {
if (check_detail_tracking_level(output())) {
MemTracker::tuning_statistics(output());
}
} else {
ShouldNotReachHere();
output()->print_cr("Unknown command");
}
}
int NMTDCmd::num_arguments() {
ResourceMark rm;
NMTDCmd* dcmd = new NMTDCmd(NULL, false);
if (dcmd != NULL) {
DCmdMark mark(dcmd);
return dcmd->_dcmdparser.num_arguments();
} else {
return 0;
}
}
void NMTDCmd::report(bool summaryOnly, size_t scale_unit) {
MemBaseline baseline;
if (baseline.baseline(summaryOnly)) {
if (summaryOnly) {
MemSummaryReporter rpt(baseline, output(), scale_unit);
rpt.report();
} else {
MemDetailReporter rpt(baseline, output(), scale_unit);
rpt.report();
}
}
}
void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) {
MemBaseline& early_baseline = MemTracker::get_baseline();
assert(early_baseline.baseline_type() != MemBaseline::Not_baselined,
"Not yet baselined");
assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined,
"Not a detail baseline");
MemBaseline baseline;
if (baseline.baseline(summaryOnly)) {
if (summaryOnly) {
MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
rpt.report_diff();
} else {
MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
rpt.report_diff();
}
}
}
bool NMTDCmd::check_detail_tracking_level(outputStream* out) {
if (MemTracker::tracking_level() == NMT_detail) {
return true;
} else if (MemTracker::cmdline_tracking_level() == NMT_detail) {
out->print_cr("Tracking level has been downgraded due to lack of resources");
return false;
} else {
out->print_cr("Detail tracking is not enabled");
return false;
}
}
C:\hotspot-69087d08d473\src\share\vm/services/nmtDCmd.hpp
#ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
#define SHARE_VM_SERVICES_NMT_DCMD_HPP
#if INCLUDE_NMT
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
#include "services/memBaseline.hpp"
#include "services/mallocTracker.hpp"
class NMTDCmd: public DCmdWithParser {
protected:
DCmdArgument<bool> _summary;
DCmdArgument<bool> _detail;
DCmdArgument<bool> _baseline;
DCmdArgument<bool> _summary_diff;
DCmdArgument<bool> _detail_diff;
DCmdArgument<bool> _shutdown;
DCmdArgument<bool> _statistics;
DCmdArgument<char*> _scale;
public:
NMTDCmd(outputStream* output, bool heap);
static const char* name() { return "VM.native_memory"; }
static const char* description() {
return "Print native memory usage";
}
static const char* impact() {
return "Medium";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", NULL};
return p;
}
static int num_arguments();
virtual void execute(DCmdSource source, TRAPS);
private:
void report(bool summaryOnly, size_t scale);
void report_diff(bool summaryOnly, size_t scale);
size_t get_scale(const char* scale) const;
bool check_detail_tracking_level(outputStream* out);
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
C:\hotspot-69087d08d473\src\share\vm/services/psMemoryPool.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "services/lowMemoryDetector.hpp"
#include "services/management.hpp"
#include "services/memoryManager.hpp"
#include "services/psMemoryPool.hpp"
PSGenerationPool::PSGenerationPool(PSOldGen* gen,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->capacity_in_bytes(),
gen->reserved().byte_size(), support_usage_threshold), _gen(gen) {
}
MemoryUsage PSGenerationPool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = _gen->capacity_in_bytes();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen,
MutableSpace* space,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, space->capacity_in_bytes(),
(gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()),
support_usage_threshold),
_gen(gen), _space(space) {
}
MemoryUsage EdenMutableSpacePool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = _space->capacity_in_bytes();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen,
const char* name,
PoolType type,
bool support_usage_threshold) :
CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(),
gen->from_space()->capacity_in_bytes(),
support_usage_threshold), _gen(gen) {
}
MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {
size_t maxSize = (available_for_allocation() ? max_size() : 0);
size_t used = used_in_bytes();
size_t committed = committed_in_bytes();
return MemoryUsage(initial_size(), used, committed, maxSize);
}
C:\hotspot-69087d08d473\src\share\vm/services/psMemoryPool.hpp
#ifndef SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
#define SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/heap.hpp"
#include "memory/space.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryUsage.hpp"
#endif // INCLUDE_ALL_GCS
class PSGenerationPool : public CollectedMemoryPool {
private:
PSOldGen* _gen;
public:
PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return _gen->used_in_bytes(); }
size_t max_size() const { return _gen->reserved().byte_size(); }
};
class EdenMutableSpacePool : public CollectedMemoryPool {
private:
PSYoungGen* _gen;
MutableSpace* _space;
public:
EdenMutableSpacePool(PSYoungGen* gen,
MutableSpace* space,
const char* name,
PoolType type,
bool support_usage_threshold);
MutableSpace* space() { return _space; }
MemoryUsage get_memory_usage();
size_t used_in_bytes() { return space()->used_in_bytes(); }
size_t max_size() const {
return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes();
}
};
class SurvivorMutableSpacePool : public CollectedMemoryPool {
private:
PSYoungGen* _gen;
public:
SurvivorMutableSpacePool(PSYoungGen* gen,
const char* name,
PoolType type,
bool support_usage_threshold);
MemoryUsage get_memory_usage();
size_t used_in_bytes() {
return _gen->from_space()->used_in_bytes();
}
size_t committed_in_bytes() {
return _gen->from_space()->capacity_in_bytes();
}
size_t max_size() const {
return _gen->from_space()->capacity_in_bytes();
}
};
#endif // SHARE_VM_SERVICES_PSMEMORYPOOL_HPP
C:\hotspot-69087d08d473\src\share\vm/services/runtimeService.cpp
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "services/attachListener.hpp"
#include "services/management.hpp"
#include "services/runtimeService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#ifndef USDT2
HS_DTRACE_PROBE_DECL(hs_private, safepoint__begin);
HS_DTRACE_PROBE_DECL(hs_private, safepoint__end);
#endif /* !USDT2 */
#if INCLUDE_MANAGEMENT
TimeStamp RuntimeService::_app_timer;
TimeStamp RuntimeService::_safepoint_timer;
PerfCounter* RuntimeService::_sync_time_ticks = NULL;
PerfCounter* RuntimeService::_total_safepoints = NULL;
PerfCounter* RuntimeService::_safepoint_time_ticks = NULL;
PerfCounter* RuntimeService::_application_time_ticks = NULL;
PerfCounter* RuntimeService::_thread_interrupt_signaled_count = NULL;
PerfCounter* RuntimeService::_interrupted_before_count = NULL;
PerfCounter* RuntimeService::_interrupted_during_count = NULL;
double RuntimeService::_last_safepoint_sync_time_sec = 0.0;
void RuntimeService::init() {
Abstract_VM_Version::initialize();
if (UsePerfData) {
EXCEPTION_MARK;
_sync_time_ticks =
PerfDataManager::create_counter(SUN_RT, "safepointSyncTime",
PerfData::U_Ticks, CHECK);
_total_safepoints =
PerfDataManager::create_counter(SUN_RT, "safepoints",
PerfData::U_Events, CHECK);
_safepoint_time_ticks =
PerfDataManager::create_counter(SUN_RT, "safepointTime",
PerfData::U_Ticks, CHECK);
_application_time_ticks =
PerfDataManager::create_counter(SUN_RT, "applicationTime",
PerfData::U_Ticks, CHECK);
PerfDataManager::create_constant(SUN_RT, "jvmVersion", PerfData::U_None,
(jlong) Abstract_VM_Version::jvm_version(), CHECK);
_thread_interrupt_signaled_count =
PerfDataManager::create_counter(SUN_RT,
"threadInterruptSignaled", PerfData::U_Events, CHECK);
_interrupted_before_count =
PerfDataManager::create_counter(SUN_RT, "interruptedBeforeIO",
PerfData::U_Events, CHECK);
_interrupted_during_count =
PerfDataManager::create_counter(SUN_RT, "interruptedDuringIO",
PerfData::U_Events, CHECK);
char capabilities[65];
size_t len = sizeof(capabilities);
memset((void*) capabilities, '0', len);
capabilities[len-1] = '\0';
capabilities[0] = AttachListener::is_attach_supported() ? '1' : '0';
#if INCLUDE_SERVICES
capabilities[1] = '1';
#endif // INCLUDE_SERVICES
PerfDataManager::create_string_constant(SUN_RT, "jvmCapabilities",
capabilities, CHECK);
}
}
void RuntimeService::record_safepoint_begin() {
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, safepoint__begin);
#else /* USDT2 */
HS_PRIVATE_SAFEPOINT_BEGIN();
#endif /* USDT2 */
if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("Application time: %3.7f seconds",
last_application_time_sec());
}
_safepoint_timer.update();
_last_safepoint_sync_time_sec = 0.0;
if (UsePerfData) {
_total_safepoints->inc();
if (_app_timer.is_updated()) {
_application_time_ticks->inc(_app_timer.ticks_since_update());
}
}
}
ssssssss82
最新推荐文章于 2024-08-03 21:02:21 发布