void RuntimeService::record_safepoint_synchronized() {
if (UsePerfData) {
_sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
}
if (PrintGCApplicationStoppedTime) {
_last_safepoint_sync_time_sec = last_safepoint_time_sec();
}
}
void RuntimeService::record_safepoint_end() {
#ifndef USDT2
HS_DTRACE_PROBE(hs_private, safepoint__end);
#else /* USDT2 */
HS_PRIVATE_SAFEPOINT_END();
#endif /* USDT2 */
if (PrintGCApplicationStoppedTime) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print_cr("Total time for which application threads "
"were stopped: %3.7f seconds, "
"Stopping threads took: %3.7f seconds",
last_safepoint_time_sec(),
_last_safepoint_sync_time_sec);
}
_app_timer.update();
if (UsePerfData) {
_safepoint_time_ticks->inc(_safepoint_timer.ticks_since_update());
}
}
void RuntimeService::record_application_start() {
_app_timer.update();
}
jlong RuntimeService::safepoint_sync_time_ms() {
return UsePerfData ?
Management::ticks_to_ms(_sync_time_ticks->get_value()) : -1;
}
jlong RuntimeService::safepoint_count() {
return UsePerfData ?
_total_safepoints->get_value() : -1;
}
jlong RuntimeService::safepoint_time_ms() {
return UsePerfData ?
Management::ticks_to_ms(_safepoint_time_ticks->get_value()) : -1;
}
jlong RuntimeService::application_time_ms() {
return UsePerfData ?
Management::ticks_to_ms(_application_time_ticks->get_value()) : -1;
}
void RuntimeService::record_interrupted_before_count() {
if (UsePerfData) {
_interrupted_before_count->inc();
}
}
void RuntimeService::record_interrupted_during_count() {
if (UsePerfData) {
_interrupted_during_count->inc();
}
}
void RuntimeService::record_thread_interrupt_signaled_count() {
if (UsePerfData) {
_thread_interrupt_signaled_count->inc();
}
}
#endif // INCLUDE_MANAGEMENT
C:\hotspot-69087d08d473\src\share\vm/services/runtimeService.hpp
#ifndef SHARE_VM_SERVICES_RUNTIMESERVICE_HPP
#define SHARE_VM_SERVICES_RUNTIMESERVICE_HPP
#include "runtime/perfData.hpp"
#include "runtime/timer.hpp"
class RuntimeService : public AllStatic {
private:
static PerfCounter* _sync_time_ticks; // Accumulated time spent getting to safepoints
static PerfCounter* _total_safepoints;
static PerfCounter* _safepoint_time_ticks; // Accumulated time at safepoints
static PerfCounter* _application_time_ticks; // Accumulated time not at safepoints
static PerfCounter* _thread_interrupt_signaled_count;// os:interrupt thr_kill
static PerfCounter* _interrupted_before_count; // _INTERRUPTIBLE OS_INTRPT
static PerfCounter* _interrupted_during_count; // _INTERRUPTIBLE OS_INTRPT
static TimeStamp _safepoint_timer;
static TimeStamp _app_timer;
static double _last_safepoint_sync_time_sec;
public:
static void init();
static jlong safepoint_sync_time_ms();
static jlong safepoint_count();
static jlong safepoint_time_ms();
static jlong application_time_ms();
static double last_safepoint_time_sec() { return _safepoint_timer.seconds(); }
static double last_application_time_sec() { return _app_timer.seconds(); }
static void record_safepoint_begin() NOT_MANAGEMENT_RETURN;
static void record_safepoint_synchronized() NOT_MANAGEMENT_RETURN;
static void record_safepoint_end() NOT_MANAGEMENT_RETURN;
static void record_application_start() NOT_MANAGEMENT_RETURN;
static void record_interrupted_before_count() NOT_MANAGEMENT_RETURN;
static void record_interrupted_during_count() NOT_MANAGEMENT_RETURN;
static void record_thread_interrupt_signaled_count() NOT_MANAGEMENT_RETURN;
};
#endif // SHARE_VM_SERVICES_RUNTIMESERVICE_HPP
C:\hotspot-69087d08d473\src\share\vm/services/serviceUtil.hpp
#ifndef SHARE_VM_SERVICES_SERVICEUTIL_HPP
#define SHARE_VM_SERVICES_SERVICEUTIL_HPP
#include "classfile/systemDictionary.hpp"
#include "oops/objArrayOop.hpp"
class ServiceUtil : public AllStatic {
public:
static inline bool visible_oop(oop o) {
if (o == JNIHandles::deleted_handle()) {
return false;
}
if (o->is_instance()) {
if (o->klass() != SystemDictionary::Class_klass()) {
return true;
}
if (java_lang_Class::is_primitive(o)) {
return true;
}
Klass* k = java_lang_Class::as_Klass(o);
if (k->is_klass()) {
if (k->oop_is_instance()) {
return true;
}
if (k->oop_is_objArray()) {
return true;
}
if (k->oop_is_typeArray()) {
return true;
}
}
return false;
}
if (o->is_objArray()) {
return true;
}
if (o->is_typeArray()) {
return true;
}
return false;
}; // end of visible_oop()
};
#endif // SHARE_VM_SERVICES_SERVICEUTIL_HPP
C:\hotspot-69087d08d473\src\share\vm/services/threadService.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/allocation.hpp"
#include "memory/heapInspection.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/thread.hpp"
#include "runtime/vframe.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/threadService.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
bool ThreadService::_thread_monitoring_contention_enabled = false;
bool ThreadService::_thread_cpu_time_enabled = false;
bool ThreadService::_thread_allocated_memory_enabled = false;
PerfCounter* ThreadService::_total_threads_count = NULL;
PerfVariable* ThreadService::_live_threads_count = NULL;
PerfVariable* ThreadService::_peak_threads_count = NULL;
PerfVariable* ThreadService::_daemon_threads_count = NULL;
volatile int ThreadService::_exiting_threads_count = 0;
volatile int ThreadService::_exiting_daemon_threads_count = 0;
ThreadDumpResult* ThreadService::_threaddump_list = NULL;
static const int INITIAL_ARRAY_SIZE = 10;
void ThreadService::init() {
EXCEPTION_MARK;
_total_threads_count =
PerfDataManager::create_counter(JAVA_THREADS, "started",
PerfData::U_Events, CHECK);
_live_threads_count =
PerfDataManager::create_variable(JAVA_THREADS, "live",
PerfData::U_None, CHECK);
_peak_threads_count =
PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
PerfData::U_None, CHECK);
_daemon_threads_count =
PerfDataManager::create_variable(JAVA_THREADS, "daemon",
PerfData::U_None, CHECK);
if (os::is_thread_cpu_time_supported()) {
_thread_cpu_time_enabled = true;
}
_thread_allocated_memory_enabled = true; // Always on, so enable it
}
void ThreadService::reset_peak_thread_count() {
MutexLockerEx mu(Threads_lock);
_peak_threads_count->set_value(get_live_thread_count());
}
void ThreadService::add_thread(JavaThread* thread, bool daemon) {
if (thread->is_hidden_from_external_view() ||
thread->is_jvmti_agent_thread()) {
return;
}
_total_threads_count->inc();
_live_threads_count->inc();
if (_live_threads_count->get_value() > _peak_threads_count->get_value()) {
_peak_threads_count->set_value(_live_threads_count->get_value());
}
if (daemon) {
_daemon_threads_count->inc();
}
}
void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
Atomic::dec((jint*) &_exiting_threads_count);
if (thread->is_hidden_from_external_view() ||
thread->is_jvmti_agent_thread()) {
return;
}
_live_threads_count->set_value(_live_threads_count->get_value() - 1);
if (daemon) {
_daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1);
Atomic::dec((jint*) &_exiting_daemon_threads_count);
}
}
void ThreadService::current_thread_exiting(JavaThread* jt) {
assert(jt == JavaThread::current(), "Called by current thread");
Atomic::inc((jint*) &_exiting_threads_count);
oop threadObj = jt->threadObj();
if (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)) {
Atomic::inc((jint*) &_exiting_daemon_threads_count);
}
}
Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
assert(thread != NULL, "should be non-NULL");
assert(Threads_lock->owned_by_self(), "must grab Threads_lock or be at safepoint");
ObjectMonitor *wait_obj = thread->current_waiting_monitor();
oop obj = NULL;
if (wait_obj != NULL) {
obj = (oop) wait_obj->object();
assert(obj != NULL, "Object.wait() should have an object");
} else {
ObjectMonitor *enter_obj = thread->current_pending_monitor();
if (enter_obj != NULL) {
obj = (oop) enter_obj->object();
}
}
Handle h(obj);
return h;
}
bool ThreadService::set_thread_monitoring_contention(bool flag) {
MutexLocker m(Management_lock);
bool prev = _thread_monitoring_contention_enabled;
_thread_monitoring_contention_enabled = flag;
return prev;
}
bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
MutexLocker m(Management_lock);
bool prev = _thread_cpu_time_enabled;
_thread_cpu_time_enabled = flag;
return prev;
}
bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
MutexLocker m(Management_lock);
bool prev = _thread_allocated_memory_enabled;
_thread_allocated_memory_enabled = flag;
return prev;
}
void ThreadService::oops_do(OopClosure* f) {
for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
dump->oops_do(f);
}
}
void ThreadService::metadata_do(void f(Metadata*)) {
for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
dump->metadata_do(f);
}
}
void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
MutexLocker ml(Management_lock);
if (_threaddump_list == NULL) {
_threaddump_list = dump;
} else {
dump->set_next(_threaddump_list);
_threaddump_list = dump;
}
}
void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
MutexLocker ml(Management_lock);
ThreadDumpResult* prev = NULL;
bool found = false;
for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
if (d == dump) {
if (prev == NULL) {
_threaddump_list = dump->next();
} else {
prev->set_next(dump->next());
}
found = true;
break;
}
}
assert(found, "The threaddump result to be removed must exist.");
}
Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
int num_threads,
TRAPS) {
assert(num_threads > 0, "just checking");
ThreadDumpResult dump_result;
VM_ThreadDump op(&dump_result,
threads,
num_threads,
-1, /* entire stack */
false, /* with locked monitors */
false /* with locked synchronizers */);
VMThread::execute(&op);
ResourceMark rm(THREAD);
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
ObjArrayKlass* ik = ObjArrayKlass::cast(k);
objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
objArrayHandle result_obj(THREAD, r);
int num_snapshots = dump_result.num_snapshots();
assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
int i = 0;
for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
ThreadStackTrace* stacktrace = ts->get_stack_trace();
if (stacktrace == NULL) {
result_obj->obj_at_put(i, NULL);
} else {
Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
result_obj->obj_at_put(i, backtrace_h());
}
}
return result_obj;
}
void ThreadService::reset_contention_count_stat(JavaThread* thread) {
ThreadStatistics* stat = thread->get_thread_stat();
if (stat != NULL) {
stat->reset_count_stat();
}
}
void ThreadService::reset_contention_time_stat(JavaThread* thread) {
ThreadStatistics* stat = thread->get_thread_stat();
if (stat != NULL) {
stat->reset_time_stat();
}
}
DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(bool concurrent_locks) {
int globalDfn = 0, thisDfn;
ObjectMonitor* waitingToLockMonitor = NULL;
oop waitingToLockBlocker = NULL;
bool blocked_on_monitor = false;
JavaThread *currentThread, *previousThread;
int num_deadlocks = 0;
for (JavaThread* p = Threads::first(); p != NULL; p = p->next()) {
p->set_depth_first_number(-1);
}
DeadlockCycle* deadlocks = NULL;
DeadlockCycle* last = NULL;
DeadlockCycle* cycle = new DeadlockCycle();
for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
if (jt->depth_first_number() >= 0) {
continue;
}
thisDfn = globalDfn;
jt->set_depth_first_number(globalDfn++);
previousThread = jt;
currentThread = jt;
cycle->reset();
waitingToLockMonitor = (ObjectMonitor*)jt->current_pending_monitor();
if (concurrent_locks) {
waitingToLockBlocker = jt->current_park_blocker();
}
while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
cycle->add_thread(currentThread);
if (waitingToLockMonitor != NULL) {
address currentOwner = (address)waitingToLockMonitor->owner();
if (currentOwner != NULL) {
currentThread = Threads::owning_thread_from_monitor_owner(
currentOwner,
false /* no locking needed */);
if (currentThread == NULL) {
num_deadlocks++;
cycle->set_deadlock(true);
if (deadlocks == NULL) {
deadlocks = cycle;
} else {
last->set_next(cycle);
}
last = cycle;
cycle = new DeadlockCycle();
break;
}
}
} else {
if (concurrent_locks) {
if (waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
} else {
currentThread = NULL;
}
}
}
if (currentThread == NULL) {
break;
}
if (currentThread->depth_first_number() < 0) {
currentThread->set_depth_first_number(globalDfn++);
} else if (currentThread->depth_first_number() < thisDfn) {
break;
} else if (currentThread == previousThread) {
break;
} else {
num_deadlocks++;
cycle->set_deadlock(true);
if (deadlocks == NULL) {
deadlocks = cycle;
} else {
last->set_next(cycle);
}
last = cycle;
cycle = new DeadlockCycle();
break;
}
previousThread = currentThread;
waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
if (concurrent_locks) {
waitingToLockBlocker = currentThread->current_park_blocker();
}
}
}
delete cycle;
return deadlocks;
}
ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
ThreadService::add_thread_dump(this);
}
ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _next(NULL), _last(NULL) {
ThreadService::add_thread_dump(this);
}
ThreadDumpResult::~ThreadDumpResult() {
ThreadService::remove_thread_dump(this);
ThreadSnapshot* ts = _snapshots;
while (ts != NULL) {
ThreadSnapshot* p = ts;
ts = ts->next();
delete p;
}
}
void ThreadDumpResult::add_thread_snapshot(ThreadSnapshot* ts) {
assert(_num_threads == 0 || _num_snapshots < _num_threads,
"_num_snapshots must be less than _num_threads");
_num_snapshots++;
if (_snapshots == NULL) {
_snapshots = ts;
} else {
_last->set_next(ts);
}
_last = ts;
}
void ThreadDumpResult::oops_do(OopClosure* f) {
for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
ts->oops_do(f);
}
}
void ThreadDumpResult::metadata_do(void f(Metadata*)) {
for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
ts->metadata_do(f);
}
}
StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
_method = jvf->method();
_bci = jvf->bci();
_class_holder = _method->method_holder()->klass_holder();
_locked_monitors = NULL;
if (with_lock_info) {
ResourceMark rm;
GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
int length = list->length();
if (length > 0) {
_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(length, true);
for (int i = 0; i < length; i++) {
MonitorInfo* monitor = list->at(i);
assert(monitor->owner(), "This monitor must have an owning object");
_locked_monitors->append(monitor->owner());
}
}
}
}
void StackFrameInfo::oops_do(OopClosure* f) {
if (_locked_monitors != NULL) {
int length = _locked_monitors->length();
for (int i = 0; i < length; i++) {
f->do_oop((oop*) _locked_monitors->adr_at(i));
}
}
f->do_oop(&_class_holder);
}
void StackFrameInfo::metadata_do(void f(Metadata*)) {
f(_method);
}
void StackFrameInfo::print_on(outputStream* st) const {
ResourceMark rm;
java_lang_Throwable::print_stack_element(st, method(), bci());
int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
for (int i = 0; i < len; i++) {
oop o = _locked_monitors->at(i);
InstanceKlass* ik = InstanceKlass::cast(o->klass());
st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", (address)o, ik->external_name());
}
}
class InflatedMonitorsClosure: public MonitorClosure {
private:
ThreadStackTrace* _stack_trace;
Thread* _thread;
public:
InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) {
_thread = t;
_stack_trace = st;
}
void do_monitor(ObjectMonitor* mid) {
if (mid->owner() == _thread) {
oop object = (oop) mid->object();
if (!_stack_trace->is_owned_monitor_on_stack(object)) {
_stack_trace->add_jni_locked_monitor(object);
}
}
}
};
ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
_thread = t;
_frames = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
_depth = 0;
_with_locked_monitors = with_locked_monitors;
if (_with_locked_monitors) {
_jni_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
} else {
_jni_locked_monitors = NULL;
}
}
ThreadStackTrace::~ThreadStackTrace() {
for (int i = 0; i < _frames->length(); i++) {
delete _frames->at(i);
}
delete _frames;
if (_jni_locked_monitors != NULL) {
delete _jni_locked_monitors;
}
}
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (_thread->has_last_Java_frame()) {
RegisterMap reg_map(_thread);
vframe* start_vf = _thread->last_java_vframe(®_map);
int count = 0;
for (vframe* f = start_vf; f; f = f->sender() ) {
if (maxDepth >= 0 && count == maxDepth) {
break;
}
if (f->is_java_frame()) {
javaVFrame* jvf = javaVFrame::cast(f);
add_stack_frame(jvf);
count++;
} else {
}
}
}
if (_with_locked_monitors) {
InflatedMonitorsClosure imc(_thread, this);
ObjectSynchronizer::monitors_iterate(&imc);
}
}
bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
bool found = false;
int num_frames = get_stack_depth();
for (int depth = 0; depth < num_frames; depth++) {
StackFrameInfo* frame = stack_frame_at(depth);
int len = frame->num_locked_monitors();
GrowableArray<oop>* locked_monitors = frame->locked_monitors();
for (int j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j);
assert(monitor != NULL && monitor->is_instance(), "must be a Java object");
if (monitor == object) {
found = true;
break;
}
}
}
return found;
}
Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
instanceKlassHandle ik(THREAD, k);
objArrayOop ste = oopFactory::new_objArray(ik(), _depth, CHECK_NH);
objArrayHandle backtrace(THREAD, ste);
for (int j = 0; j < _depth; j++) {
StackFrameInfo* frame = _frames->at(j);
methodHandle mh(THREAD, frame->method());
oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
backtrace->obj_at_put(j, element);
}
return backtrace;
}
void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
_frames->append(frame);
_depth++;
}
void ThreadStackTrace::oops_do(OopClosure* f) {
int length = _frames->length();
for (int i = 0; i < length; i++) {
_frames->at(i)->oops_do(f);
}
length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0);
for (int j = 0; j < length; j++) {
f->do_oop((oop*) _jni_locked_monitors->adr_at(j));
}
}
void ThreadStackTrace::metadata_do(void f(Metadata*)) {
int length = _frames->length();
for (int i = 0; i < length; i++) {
_frames->at(i)->metadata_do(f);
}
}
ConcurrentLocksDump::~ConcurrentLocksDump() {
if (_retain_map_on_free) {
return;
}
for (ThreadConcurrentLocks* t = _map; t != NULL;) {
ThreadConcurrentLocks* tcl = t;
t = t->next();
delete tcl;
}
}
void ConcurrentLocksDump::dump_at_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (JDK_Version::is_gte_jdk16x_version()) {
ResourceMark rm;
GrowableArray<oop>* aos_objects = new GrowableArray<oop>(INITIAL_ARRAY_SIZE);
HeapInspection::find_instances_at_safepoint(SystemDictionary::abstract_ownable_synchronizer_klass(),
aos_objects);
build_map(aos_objects);
}
}
void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
int length = aos_objects->length();
for (int i = 0; i < length; i++) {
oop o = aos_objects->at(i);
oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
if (owner_thread_obj != NULL) {
JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
assert(o->is_instance(), "Must be an instanceOop");
add_lock(thread, (instanceOop) o);
}
}
}
void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
if (tcl != NULL) {
tcl->add_lock(o);
return;
}
tcl = new ThreadConcurrentLocks(thread);
tcl->add_lock(o);
if (_map == NULL) {
_map = tcl;
} else {
_last->set_next(tcl);
}
_last = tcl;
}
ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
if (tcl->java_thread() == thread) {
return tcl;
}
}
return NULL;
}
void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
st->print_cr(" Locked ownable synchronizers:");
ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
if (locks == NULL || locks->is_empty()) {
st->print_cr("\t- None");
st->cr();
return;
}
for (int i = 0; i < locks->length(); i++) {
instanceOop obj = locks->at(i);
InstanceKlass* ik = InstanceKlass::cast(obj->klass());
st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", (address)obj, ik->external_name());
}
st->cr();
}
ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
_thread = thread;
_owned_locks = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
_next = NULL;
}
ThreadConcurrentLocks::~ThreadConcurrentLocks() {
delete _owned_locks;
}
void ThreadConcurrentLocks::add_lock(instanceOop o) {
_owned_locks->append(o);
}
void ThreadConcurrentLocks::oops_do(OopClosure* f) {
int length = _owned_locks->length();
for (int i = 0; i < length; i++) {
f->do_oop((oop*) _owned_locks->adr_at(i));
}
}
ThreadStatistics::ThreadStatistics() {
_contended_enter_count = 0;
_monitor_wait_count = 0;
_sleep_count = 0;
_count_pending_reset = false;
_timer_pending_reset = false;
memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
}
ThreadSnapshot::ThreadSnapshot(JavaThread* thread) {
_thread = thread;
_threadObj = thread->threadObj();
_stack_trace = NULL;
_concurrent_locks = NULL;
_next = NULL;
ThreadStatistics* stat = thread->get_thread_stat();
_contended_enter_ticks = stat->contended_enter_ticks();
_contended_enter_count = stat->contended_enter_count();
_monitor_wait_ticks = stat->monitor_wait_ticks();
_monitor_wait_count = stat->monitor_wait_count();
_sleep_ticks = stat->sleep_ticks();
_sleep_count = stat->sleep_count();
_blocker_object = NULL;
_blocker_object_owner = NULL;
_thread_status = java_lang_Thread::get_thread_status(_threadObj);
_is_ext_suspended = thread->is_being_ext_suspended();
_is_in_native = (thread->thread_state() == _thread_in_native);
if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER ||
_thread_status == java_lang_Thread::IN_OBJECT_WAIT ||
_thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) {
Handle obj = ThreadService::get_current_contended_monitor(thread);
if (obj() == NULL) {
_thread_status = java_lang_Thread::RUNNABLE;
} else {
_blocker_object = obj();
JavaThread* owner = ObjectSynchronizer::get_lock_owner(obj, false);
if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
|| (owner != NULL && owner->is_attaching_via_jni())) {
_thread_status = java_lang_Thread::RUNNABLE;
_blocker_object = NULL;
} else if (owner != NULL) {
_blocker_object_owner = owner->threadObj();
}
}
}
if (JDK_Version::current().supports_thread_park_blocker() &&
(_thread_status == java_lang_Thread::PARKED ||
_thread_status == java_lang_Thread::PARKED_TIMED)) {
_blocker_object = thread->current_park_blocker();
if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) {
_blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object);
}
}
}
ThreadSnapshot::~ThreadSnapshot() {
delete _stack_trace;
delete _concurrent_locks;
}
void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
_stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
_stack_trace->dump_stack_at_safepoint(max_depth);
}
void ThreadSnapshot::oops_do(OopClosure* f) {
f->do_oop(&_threadObj);
f->do_oop(&_blocker_object);
f->do_oop(&_blocker_object_owner);
if (_stack_trace != NULL) {
_stack_trace->oops_do(f);
}
if (_concurrent_locks != NULL) {
_concurrent_locks->oops_do(f);
}
}
void ThreadSnapshot::metadata_do(void f(Metadata*)) {
if (_stack_trace != NULL) {
_stack_trace->metadata_do(f);
}
}
DeadlockCycle::DeadlockCycle() {
_is_deadlock = false;
_threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
_next = NULL;
}
DeadlockCycle::~DeadlockCycle() {
delete _threads;
}
void DeadlockCycle::print_on(outputStream* st) const {
st->cr();
st->print_cr("Found one Java-level deadlock:");
st->print("=============================");
JavaThread* currentThread;
ObjectMonitor* waitingToLockMonitor;
oop waitingToLockBlocker;
int len = _threads->length();
for (int i = 0; i < len; i++) {
currentThread = _threads->at(i);
waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
waitingToLockBlocker = currentThread->current_park_blocker();
st->cr();
st->print_cr("\"%s\":", currentThread->get_thread_name());
const char* owner_desc = ",\n which is held by";
if (waitingToLockMonitor != NULL) {
st->print(" waiting to lock monitor " INTPTR_FORMAT, waitingToLockMonitor);
oop obj = (oop)waitingToLockMonitor->object();
if (obj != NULL) {
st->print(" (object " INTPTR_FORMAT ", a %s)", (address)obj,
(InstanceKlass::cast(obj->klass()))->external_name());
if (!currentThread->current_pending_monitor_is_from_java()) {
owner_desc = "\n in JNI, which is held by";
}
} else {
owner_desc = " (JVMTI raw monitor),\n which is held by";
}
currentThread = Threads::owning_thread_from_monitor_owner(
(address)waitingToLockMonitor->owner(),
false /* no locking needed */);
if (currentThread == NULL) {
st->print("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
(address)waitingToLockMonitor->owner());
continue;
}
} else {
st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
(address)waitingToLockBlocker,
(InstanceKlass::cast(waitingToLockBlocker->klass()))->external_name());
assert(waitingToLockBlocker->is_a(SystemDictionary::abstract_ownable_synchronizer_klass()),
"Must be an AbstractOwnableSynchronizer");
oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
currentThread = java_lang_Thread::thread(ownerObj);
}
st->print("%s \"%s\"", owner_desc, currentThread->get_thread_name());
}
st->cr();
st->cr();
bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
JavaMonitorsInStackTrace = true;
st->print_cr("Java stack information for the threads listed above:");
st->print_cr("===================================================");
for (int j = 0; j < len; j++) {
currentThread = _threads->at(j);
st->print_cr("\"%s\":", currentThread->get_thread_name());
currentThread->print_stack_on(st);
}
JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
}
ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
bool include_jvmti_agent_threads,
bool include_jni_attaching_threads) {
assert(cur_thread == Thread::current(), "Check current thread");
int init_size = ThreadService::get_live_thread_count();
_threads_array = new GrowableArray<instanceHandle>(init_size);
MutexLockerEx ml(Threads_lock);
for (JavaThread* jt = Threads::first(); jt != NULL; jt = jt->next()) {
if (jt->threadObj() == NULL ||
jt->is_exiting() ||
!java_lang_Thread::is_alive(jt->threadObj()) ||
jt->is_hidden_from_external_view()) {
continue;
}
if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
continue;
}
if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
continue;
}
instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
_threads_array->append(h);
}
}
C:\hotspot-69087d08d473\src\share\vm/services/threadService.hpp
#ifndef SHARE_VM_SERVICES_THREADSERVICE_HPP
#define SHARE_VM_SERVICES_THREADSERVICE_HPP
#include "classfile/javaClasses.hpp"
#include "runtime/handles.hpp"
#include "runtime/init.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/perfData.hpp"
#include "services/management.hpp"
#include "services/serviceUtil.hpp"
class OopClosure;
class ThreadDumpResult;
class ThreadStackTrace;
class ThreadSnapshot;
class StackFrameInfo;
class ThreadConcurrentLocks;
class DeadlockCycle;
class ThreadService : public AllStatic {
private:
static PerfCounter* _total_threads_count;
static PerfVariable* _live_threads_count;
static PerfVariable* _peak_threads_count;
static PerfVariable* _daemon_threads_count;
static volatile int _exiting_threads_count;
static volatile int _exiting_daemon_threads_count;
static bool _thread_monitoring_contention_enabled;
static bool _thread_cpu_time_enabled;
static bool _thread_allocated_memory_enabled;
static ThreadDumpResult* _threaddump_list;
public:
static void init();
static void add_thread(JavaThread* thread, bool daemon);
static void remove_thread(JavaThread* thread, bool daemon);
static void current_thread_exiting(JavaThread* jt);
static bool set_thread_monitoring_contention(bool flag);
static bool is_thread_monitoring_contention() { return _thread_monitoring_contention_enabled; }
static bool set_thread_cpu_time_enabled(bool flag);
static bool is_thread_cpu_time_enabled() { return _thread_cpu_time_enabled; }
static bool set_thread_allocated_memory_enabled(bool flag);
static bool is_thread_allocated_memory_enabled() { return _thread_cpu_time_enabled; }
static jlong get_total_thread_count() { return _total_threads_count->get_value(); }
static jlong get_peak_thread_count() { return _peak_threads_count->get_value(); }
static jlong get_live_thread_count() { return _live_threads_count->get_value() - _exiting_threads_count; }
static jlong get_daemon_thread_count() { return _daemon_threads_count->get_value() - _exiting_daemon_threads_count; }
static int exiting_threads_count() { return _exiting_threads_count; }
static int exiting_daemon_threads_count() { return _exiting_daemon_threads_count; }
static void add_thread_dump(ThreadDumpResult* dump);
static void remove_thread_dump(ThreadDumpResult* dump);
static Handle get_current_contended_monitor(JavaThread* thread);
static Handle dump_stack_traces(GrowableArray<instanceHandle>* threads,
int num_threads, TRAPS);
static void reset_peak_thread_count();
static void reset_contention_count_stat(JavaThread* thread);
static void reset_contention_time_stat(JavaThread* thread);
static DeadlockCycle* find_deadlocks_at_safepoint(bool object_monitors_only);
static void oops_do(OopClosure* f);
static void metadata_do(void f(Metadata*));
};
class ThreadStatistics : public CHeapObj<mtInternal> {
private:
jlong _contended_enter_count;
elapsedTimer _contended_enter_timer;
jlong _monitor_wait_count;
elapsedTimer _monitor_wait_timer;
jlong _sleep_count;
elapsedTimer _sleep_timer;
bool _count_pending_reset;
bool _timer_pending_reset;
int _perf_recursion_counts[6];
elapsedTimer _perf_timers[6];
void check_and_reset_count() {
if (!_count_pending_reset) return;
_contended_enter_count = 0;
_monitor_wait_count = 0;
_sleep_count = 0;
_count_pending_reset = 0;
}
void check_and_reset_timer() {
if (!_timer_pending_reset) return;
_contended_enter_timer.reset();
_monitor_wait_timer.reset();
_sleep_timer.reset();
_timer_pending_reset = 0;
}
public:
ThreadStatistics();
jlong contended_enter_count() { return (_count_pending_reset ? 0 : _contended_enter_count); }
jlong contended_enter_ticks() { return (_timer_pending_reset ? 0 : _contended_enter_timer.active_ticks()); }
jlong monitor_wait_count() { return (_count_pending_reset ? 0 : _monitor_wait_count); }
jlong monitor_wait_ticks() { return (_timer_pending_reset ? 0 : _monitor_wait_timer.active_ticks()); }
jlong sleep_count() { return (_count_pending_reset ? 0 : _sleep_count); }
jlong sleep_ticks() { return (_timer_pending_reset ? 0 : _sleep_timer.active_ticks()); }
void monitor_wait() { check_and_reset_count(); _monitor_wait_count++; }
void monitor_wait_begin() { check_and_reset_timer(); _monitor_wait_timer.start(); }
void monitor_wait_end() { _monitor_wait_timer.stop(); check_and_reset_timer(); }
void thread_sleep() { check_and_reset_count(); _sleep_count++; }
void thread_sleep_begin() { check_and_reset_timer(); _sleep_timer.start(); }
void thread_sleep_end() { _sleep_timer.stop(); check_and_reset_timer(); }
void contended_enter() { check_and_reset_count(); _contended_enter_count++; }
void contended_enter_begin() { check_and_reset_timer(); _contended_enter_timer.start(); }
void contended_enter_end() { _contended_enter_timer.stop(); check_and_reset_timer(); }
void reset_count_stat() { _count_pending_reset = true; }
void reset_time_stat() { _timer_pending_reset = true; }
int* perf_recursion_counts_addr() { return _perf_recursion_counts; }
elapsedTimer* perf_timers_addr() { return _perf_timers; }
};
class ThreadSnapshot : public CHeapObj<mtInternal> {
private:
JavaThread* _thread;
oop _threadObj;
java_lang_Thread::ThreadStatus _thread_status;
bool _is_ext_suspended;
bool _is_in_native;
jlong _contended_enter_ticks;
jlong _contended_enter_count;
jlong _monitor_wait_ticks;
jlong _monitor_wait_count;
jlong _sleep_ticks;
jlong _sleep_count;
oop _blocker_object;
oop _blocker_object_owner;
ThreadStackTrace* _stack_trace;
ThreadConcurrentLocks* _concurrent_locks;
ThreadSnapshot* _next;
public:
ThreadSnapshot() : _thread(NULL), _threadObj(NULL), _stack_trace(NULL), _concurrent_locks(NULL), _next(NULL),
_blocker_object(NULL), _blocker_object_owner(NULL) {};
ThreadSnapshot(JavaThread* thread);
~ThreadSnapshot();
java_lang_Thread::ThreadStatus thread_status() { return _thread_status; }
oop threadObj() const { return _threadObj; }
void set_next(ThreadSnapshot* n) { _next = n; }
bool is_ext_suspended() { return _is_ext_suspended; }
bool is_in_native() { return _is_in_native; }
jlong contended_enter_count() { return _contended_enter_count; }
jlong contended_enter_ticks() { return _contended_enter_ticks; }
jlong monitor_wait_count() { return _monitor_wait_count; }
jlong monitor_wait_ticks() { return _monitor_wait_ticks; }
jlong sleep_count() { return _sleep_count; }
jlong sleep_ticks() { return _sleep_ticks; }
oop blocker_object() { return _blocker_object; }
oop blocker_object_owner() { return _blocker_object_owner; }
ThreadSnapshot* next() const { return _next; }
ThreadStackTrace* get_stack_trace() { return _stack_trace; }
ThreadConcurrentLocks* get_concurrent_locks() { return _concurrent_locks; }
void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors);
void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
class ThreadStackTrace : public CHeapObj<mtInternal> {
private:
JavaThread* _thread;
int _depth; // number of stack frames added
bool _with_locked_monitors;
GrowableArray<StackFrameInfo*>* _frames;
GrowableArray<oop>* _jni_locked_monitors;
public:
ThreadStackTrace(JavaThread* thread, bool with_locked_monitors);
~ThreadStackTrace();
JavaThread* thread() { return _thread; }
StackFrameInfo* stack_frame_at(int i) { return _frames->at(i); }
int get_stack_depth() { return _depth; }
void add_stack_frame(javaVFrame* jvf);
void dump_stack_at_safepoint(int max_depth);
Handle allocate_fill_stack_trace_element_array(TRAPS);
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
GrowableArray<oop>* jni_locked_monitors() { return _jni_locked_monitors; }
int num_jni_locked_monitors() { return (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); }
bool is_owned_monitor_on_stack(oop object);
void add_jni_locked_monitor(oop object) { _jni_locked_monitors->append(object); }
};
class StackFrameInfo : public CHeapObj<mtInternal> {
private:
Method* _method;
int _bci;
GrowableArray<oop>* _locked_monitors; // list of object monitors locked by this frame
oop _class_holder;
public:
StackFrameInfo(javaVFrame* jvf, bool with_locked_monitors);
~StackFrameInfo() {
if (_locked_monitors != NULL) {
delete _locked_monitors;
}
};
Method* method() const { return _method; }
int bci() const { return _bci; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
int num_locked_monitors() { return (_locked_monitors != NULL ? _locked_monitors->length() : 0); }
GrowableArray<oop>* locked_monitors() { return _locked_monitors; }
void print_on(outputStream* st) const;
};
class ThreadConcurrentLocks : public CHeapObj<mtInternal> {
private:
GrowableArray<instanceOop>* _owned_locks;
ThreadConcurrentLocks* _next;
JavaThread* _thread;
public:
ThreadConcurrentLocks(JavaThread* thread);
~ThreadConcurrentLocks();
void add_lock(instanceOop o);
void set_next(ThreadConcurrentLocks* n) { _next = n; }
ThreadConcurrentLocks* next() { return _next; }
JavaThread* java_thread() { return _thread; }
GrowableArray<instanceOop>* owned_locks() { return _owned_locks; }
void oops_do(OopClosure* f);
};
class ConcurrentLocksDump : public StackObj {
private:
ThreadConcurrentLocks* _map;
ThreadConcurrentLocks* _last; // Last ThreadConcurrentLocks in the map
bool _retain_map_on_free;
void build_map(GrowableArray<oop>* aos_objects);
void add_lock(JavaThread* thread, instanceOop o);
public:
ConcurrentLocksDump(bool retain_map_on_free) : _map(NULL), _last(NULL), _retain_map_on_free(retain_map_on_free) {};
ConcurrentLocksDump() : _map(NULL), _last(NULL), _retain_map_on_free(false) {};
~ConcurrentLocksDump();
void dump_at_safepoint();
ThreadConcurrentLocks* thread_concurrent_locks(JavaThread* thread);
void print_locks_on(JavaThread* t, outputStream* st);
};
class ThreadDumpResult : public StackObj {
private:
int _num_threads;
int _num_snapshots;
ThreadSnapshot* _snapshots;
ThreadSnapshot* _last;
ThreadDumpResult* _next;
public:
ThreadDumpResult();
ThreadDumpResult(int num_threads);
~ThreadDumpResult();
void add_thread_snapshot(ThreadSnapshot* ts);
void set_next(ThreadDumpResult* next) { _next = next; }
ThreadDumpResult* next() { return _next; }
int num_threads() { return _num_threads; }
int num_snapshots() { return _num_snapshots; }
ThreadSnapshot* snapshots() { return _snapshots; }
void oops_do(OopClosure* f);
void metadata_do(void f(Metadata*));
};
class DeadlockCycle : public CHeapObj<mtInternal> {
private:
bool _is_deadlock;
GrowableArray<JavaThread*>* _threads;
DeadlockCycle* _next;
public:
DeadlockCycle();
~DeadlockCycle();
DeadlockCycle* next() { return _next; }
void set_next(DeadlockCycle* d) { _next = d; }
void add_thread(JavaThread* t) { _threads->append(t); }
void reset() { _is_deadlock = false; _threads->clear(); }
void set_deadlock(bool value) { _is_deadlock = value; }
bool is_deadlock() { return _is_deadlock; }
int num_threads() { return _threads->length(); }
GrowableArray<JavaThread*>* threads() { return _threads; }
void print_on(outputStream* st) const;
};
class ThreadsListEnumerator : public StackObj {
private:
GrowableArray<instanceHandle>* _threads_array;
public:
ThreadsListEnumerator(Thread* cur_thread,
bool include_jvmti_agent_threads = false,
bool include_jni_attaching_threads = true);
int num_threads() { return _threads_array->length(); }
instanceHandle get_threadObj(int index) { return _threads_array->at(index); }
};
class JavaThreadStatusChanger : public StackObj {
private:
java_lang_Thread::ThreadStatus _old_state;
JavaThread* _java_thread;
bool _is_alive;
void save_old_state(JavaThread* java_thread) {
_java_thread = java_thread;
_is_alive = is_alive(java_thread);
if (is_alive()) {
_old_state = java_lang_Thread::get_thread_status(_java_thread->threadObj());
}
}
public:
static void set_thread_status(JavaThread* java_thread,
java_lang_Thread::ThreadStatus state) {
java_lang_Thread::set_thread_status(java_thread->threadObj(), state);
}
void set_thread_status(java_lang_Thread::ThreadStatus state) {
if (is_alive()) {
set_thread_status(_java_thread, state);
}
}
JavaThreadStatusChanger(JavaThread* java_thread,
java_lang_Thread::ThreadStatus state) : _old_state(java_lang_Thread::NEW) {
save_old_state(java_thread);
set_thread_status(state);
}
JavaThreadStatusChanger(JavaThread* java_thread) : _old_state(java_lang_Thread::NEW) {
save_old_state(java_thread);
}
~JavaThreadStatusChanger() {
set_thread_status(_old_state);
}
static bool is_alive(JavaThread* java_thread) {
return java_thread != NULL && java_thread->threadObj() != NULL;
}
bool is_alive() {
return _is_alive;
}
};
class JavaThreadInObjectWaitState : public JavaThreadStatusChanger {
private:
ThreadStatistics* _stat;
bool _active;
public:
JavaThreadInObjectWaitState(JavaThread *java_thread, bool timed) :
JavaThreadStatusChanger(java_thread,
timed ? java_lang_Thread::IN_OBJECT_WAIT_TIMED : java_lang_Thread::IN_OBJECT_WAIT) {
if (is_alive()) {
_stat = java_thread->get_thread_stat();
_active = ThreadService::is_thread_monitoring_contention();
_stat->monitor_wait();
if (_active) {
_stat->monitor_wait_begin();
}
} else {
_active = false;
}
}
~JavaThreadInObjectWaitState() {
if (_active) {
_stat->monitor_wait_end();
}
}
};
class JavaThreadParkedState : public JavaThreadStatusChanger {
private:
ThreadStatistics* _stat;
bool _active;
public:
JavaThreadParkedState(JavaThread *java_thread, bool timed) :
JavaThreadStatusChanger(java_thread,
timed ? java_lang_Thread::PARKED_TIMED : java_lang_Thread::PARKED) {
if (is_alive()) {
_stat = java_thread->get_thread_stat();
_active = ThreadService::is_thread_monitoring_contention();
_stat->monitor_wait();
if (_active) {
_stat->monitor_wait_begin();
}
} else {
_active = false;
}
}
~JavaThreadParkedState() {
if (_active) {
_stat->monitor_wait_end();
}
}
};
class JavaThreadBlockedOnMonitorEnterState : public JavaThreadStatusChanger {
private:
ThreadStatistics* _stat;
bool _active;
static bool contended_enter_begin(JavaThread *java_thread) {
set_thread_status(java_thread, java_lang_Thread::BLOCKED_ON_MONITOR_ENTER);
ThreadStatistics* stat = java_thread->get_thread_stat();
stat->contended_enter();
bool active = ThreadService::is_thread_monitoring_contention();
if (active) {
stat->contended_enter_begin();
}
return active;
}
public:
static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) {
assert((java_thread != NULL), "Java thread should not be null here");
bool active = false;
if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) {
active = contended_enter_begin(java_thread);
}
return active;
}
static void wait_reenter_end(JavaThread *java_thread, bool active) {
if (active) {
java_thread->get_thread_stat()->contended_enter_end();
}
set_thread_status(java_thread, java_lang_Thread::RUNNABLE);
}
JavaThreadBlockedOnMonitorEnterState(JavaThread *java_thread, ObjectMonitor *obj_m) :
_stat(NULL), _active(false), JavaThreadStatusChanger(java_thread) {
assert((java_thread != NULL), "Java thread should not be null here");
_active = false;
if (is_alive() && ServiceUtil::visible_oop((oop)obj_m->object()) && obj_m->contentions() > 0) {
_stat = java_thread->get_thread_stat();
_active = contended_enter_begin(java_thread);
}
}
~JavaThreadBlockedOnMonitorEnterState() {
if (_active) {
_stat->contended_enter_end();
}
}
};
class JavaThreadSleepState : public JavaThreadStatusChanger {
private:
ThreadStatistics* _stat;
bool _active;
public:
JavaThreadSleepState(JavaThread *java_thread) :
JavaThreadStatusChanger(java_thread, java_lang_Thread::SLEEPING) {
if (is_alive()) {
_stat = java_thread->get_thread_stat();
_active = ThreadService::is_thread_monitoring_contention();
_stat->thread_sleep();
if (_active) {
_stat->thread_sleep_begin();
}
} else {
_active = false;
}
}
~JavaThreadSleepState() {
if (_active) {
_stat->thread_sleep_end();
}
}
};
#endif // SHARE_VM_SERVICES_THREADSERVICE_HPP
C:\hotspot-69087d08d473\src\share\vm/services/virtualMemoryTracker.cpp
#include "precompiled.hpp"
#include "runtime/threadCritical.hpp"
#include "services/virtualMemoryTracker.hpp"
size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
void VirtualMemorySummary::initialize() {
assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
return r1.compare(r2);
}
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
return r1.compare(r2);
}
bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(contain_region(addr, size), "Not contain this region");
if (all_committed()) return true;
CommittedMemoryRegion committed_rgn(addr, size, stack);
LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
if (node != NULL) {
CommittedMemoryRegion* rgn = node->data();
if (rgn->same_region(addr, size)) {
return true;
}
if (rgn->adjacent_to(addr, size)) {
LinkedListNode<CommittedMemoryRegion>* next =
node->next();
if (next != NULL && next->data()->contain_region(addr, size)) {
if (next->data()->same_region(addr, size)) {
next->data()->set_call_stack(stack);
}
return true;
}
if (rgn->call_stack()->equals(stack)) {
VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
rgn->expand_region(addr, size);
VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
return true;
}
VirtualMemorySummary::record_committed_memory(size, flag());
if (rgn->base() > addr) {
return _committed_regions.insert_before(committed_rgn, node) != NULL;
} else {
return _committed_regions.insert_after(committed_rgn, node) != NULL;
}
}
assert(rgn->contain_region(addr, size), "Must cover this region");
return true;
} else {
VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn);
}
}
void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) {
_all_committed = b;
if (b) {
VirtualMemorySummary::record_committed_memory(size(), flag());
}
}
}
bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
CommittedMemoryRegion* rgn = node->data();
assert(rgn->contain_region(addr, size), "Has to be contained");
assert(!rgn->same_region(addr, size), "Can not be the same region");
if (rgn->base() == addr ||
rgn->end() == addr + size) {
rgn->exclude_region(addr, size);
return true;
} else {
address top =rgn->end();
size_t exclude_size = rgn->end() - addr;
rgn->exclude_region(addr, exclude_size);
address high_base = addr + size;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
assert(high_node == NULL || node->next() == high_node, "Should be right after");
return (high_node != NULL);
}
return false;
}
bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
if (flag() == mtThreadStack && !same_region(addr, sz)) {
return true;
}
assert(addr != NULL, "Invalid address");
assert(sz > 0, "Invalid size");
if (all_committed()) {
assert(_committed_regions.is_empty(), "Sanity check");
assert(contain_region(addr, sz), "Reserved region does not contain this region");
set_all_committed(false);
VirtualMemorySummary::record_uncommitted_memory(sz, flag());
if (same_region(addr, sz)) {
return true;
} else {
CommittedMemoryRegion rgn(base(), size(), *call_stack());
if (rgn.base() == addr || rgn.end() == (addr + sz)) {
rgn.exclude_region(addr, sz);
return add_committed_region(rgn);
} else {
address top =rgn.end();
size_t exclude_size = rgn.end() - addr;
rgn.exclude_region(addr, exclude_size);
if (add_committed_region(rgn)) {
address high_base = addr + sz;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::empty_stack());
return add_committed_region(high_rgn);
} else {
return false;
}
}
}
} else {
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
VirtualMemoryRegion uncommitted_rgn(addr, sz);
while (head != NULL && !uncommitted_rgn.is_empty()) {
CommittedMemoryRegion* crgn = head->data();
if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
_committed_regions.remove_after(prev);
return true;
} else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
} else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
size_t exclude_size = crgn->end() - uncommitted_rgn.base();
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
LinkedListNode<CommittedMemoryRegion>* tmp = head;
head = head->next();
_committed_regions.remove_after(prev);
continue;
} else if (crgn->contain_address(uncommitted_rgn.base())) {
size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
} else if (uncommitted_rgn.contain_address(crgn->base())) {
size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), toUncommitted);
uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
toUncommitted);
VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
}
}
prev = head;
head = head->next();
}
}
return true;
}
void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
assert(addr != NULL, "Invalid address");
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
while (head != NULL) {
if (head->data()->base() >= addr) {
break;
}
prev = head;
head = head->next();
}
if (head != NULL) {
if (prev != NULL) {
prev->set_next(head->next());
} else {
_committed_regions.set_head(NULL);
}
}
rgn._committed_regions.set_head(head);
}
size_t ReservedMemoryRegion::committed_size() const {
if (all_committed()) {
return size();
} else {
size_t committed = 0;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
while (head != NULL) {
committed += head->data()->size();
head = head->next();
}
return committed;
}
}
void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
assert((flag() == mtNone || flag() == f), "Overwrite memory type");
if (flag() != f) {
VirtualMemorySummary::move_reserved_memory(flag(), f, size());
VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
_flag = f;
}
}
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
VirtualMemorySummary::initialize();
}
return true;
}
bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
_reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
return (_reserved_regions != NULL);
}
return true;
}
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
assert(base_addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(base_addr, size, stack, flag);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
LinkedListNode<ReservedMemoryRegion>* node;
if (reserved_rgn == NULL) {
VirtualMemorySummary::record_reserved_memory(size, flag);
node = _reserved_regions->add(rgn);
if (node != NULL) {
node->data()->set_all_committed(all_committed);
return true;
} else {
return false;
}
} else {
if (reserved_rgn->same_region(base_addr, size)) {
reserved_rgn->set_call_stack(stack);
reserved_rgn->set_flag(flag);
return true;
} else if (reserved_rgn->adjacent_to(base_addr, size)) {
VirtualMemorySummary::record_reserved_memory(size, flag);
reserved_rgn->expand_region(base_addr, size);
reserved_rgn->set_call_stack(stack);
return true;
} else {
if (reserved_rgn->flag() == mtThreadStack) {
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
return true;
}
if (reserved_rgn->flag() == mtClassShared) {
assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
return true;
}
ShouldNotReachHere();
return false;
}
}
}
void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
assert(addr != NULL, "Invalid address");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(addr, 1);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
if (reserved_rgn != NULL) {
assert(reserved_rgn->contain_address(addr), "Containment");
if (reserved_rgn->flag() != flag) {
assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
reserved_rgn->set_flag(flag);
}
}
}
bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->add_committed_region(addr, size, stack);
}
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
return reserved_rgn->remove_uncommitted_region(addr, size);
}
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != NULL, "Sanity check");
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != NULL, "No reserved region");
if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
return false;
}
VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
if (reserved_rgn->same_region(addr, size)) {
return _reserved_regions->remove(rgn);
} else {
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
if (reserved_rgn->base() == addr ||
reserved_rgn->end() == addr + size) {
reserved_rgn->exclude_region(addr, size);
return true;
} else {
address top = reserved_rgn->end();
address high_base = addr + size;
ReservedMemoryRegion high_rgn(high_base, top - high_base,
reserved_rgn->exclude_region(addr, top - addr);
LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
if (new_rgn == NULL) {
return false;
} else {
reserved_rgn->move_committed_regions(addr, *new_rgn->data());
return true;
}
}
}
}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check");
ThreadCritical tc;
if (_reserved_regions != NULL) {
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
while (head != NULL) {
const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
return false;
}
head = head->next();
}
}
return true;
}
bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check");
ThreadCritical tc;
if (_reserved_regions != NULL) {
delete _reserved_regions;
_reserved_regions = NULL;
}
}
return true;
}
C:\hotspot-69087d08d473\src\share\vm/services/virtualMemoryTracker.hpp
#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "services/allocationSite.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/nativeCallStack.hpp"
#include "utilities/ostream.hpp"
class VirtualMemory VALUE_OBJ_CLASS_SPEC {
private:
size_t _reserved;
size_t _committed;
public:
VirtualMemory() : _reserved(0), _committed(0) { }
inline void reserve_memory(size_t sz) { _reserved += sz; }
inline void commit_memory (size_t sz) {
_committed += sz;
assert(_committed <= _reserved, "Sanity check");
}
inline void release_memory (size_t sz) {
assert(_reserved >= sz, "Negative amount");
_reserved -= sz;
}
inline void uncommit_memory(size_t sz) {
assert(_committed >= sz, "Negative amount");
_committed -= sz;
}
inline size_t reserved() const { return _reserved; }
inline size_t committed() const { return _committed; }
};
class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
public:
VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) :
AllocationSite<VirtualMemory>(stack, flag) { }
inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
inline void release_memory(size_t sz) { data()->release_memory(sz); }
inline size_t reserved() const { return peek()->reserved(); }
inline size_t committed() const { return peek()->committed(); }
};
class VirtualMemorySummary;
class VirtualMemorySnapshot : public ResourceObj {
friend class VirtualMemorySummary;
private:
VirtualMemory _virtual_memory[mt_number_of_types];
public:
inline VirtualMemory* by_type(MEMFLAGS flag) {
int index = NMTUtil::flag_to_index(flag);
return &_virtual_memory[index];
}
inline VirtualMemory* by_index(int index) {
assert(index >= 0, "Index out of bound");
assert(index < mt_number_of_types, "Index out of bound");
return &_virtual_memory[index];
}
inline size_t total_reserved() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _virtual_memory[index].reserved();
}
return amount;
}
inline size_t total_committed() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _virtual_memory[index].committed();
}
return amount;
}
void copy_to(VirtualMemorySnapshot* s) {
for (int index = 0; index < mt_number_of_types; index ++) {
s->_virtual_memory[index] = _virtual_memory[index];
}
}
};
class VirtualMemorySummary : AllStatic {
public:
static void initialize();
static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->reserve_memory(size);
}
static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->commit_memory(size);
}
static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->uncommit_memory(size);
}
static inline void record_released_memory(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->release_memory(size);
}
static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
as_snapshot()->by_type(from)->release_memory(size);
as_snapshot()->by_type(to)->reserve_memory(size);
}
static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
as_snapshot()->by_type(from)->uncommit_memory(size);
as_snapshot()->by_type(to)->commit_memory(size);
}
static inline void snapshot(VirtualMemorySnapshot* s) {
as_snapshot()->copy_to(s);
}
static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot;
}
private:
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
};
class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
private:
address _base_address;
size_t _size;
public:
VirtualMemoryRegion(address addr, size_t size) :
_base_address(addr), _size(size) {
assert(addr != NULL, "Invalid address");
assert(size > 0, "Invalid size");
}
inline address base() const { return _base_address; }
inline address end() const { return base() + size(); }
inline size_t size() const { return _size; }
inline bool is_empty() const { return size() == 0; }
inline bool contain_address(address addr) const {
return (addr >= base() && addr < end());
}
inline bool contain_region(address addr, size_t size) const {
return contain_address(addr) && contain_address(addr + size - 1);
}
inline bool same_region(address addr, size_t sz) const {
return (addr == base() && sz == size());
}
inline bool overlap_region(address addr, size_t sz) const {
VirtualMemoryRegion rgn(addr, sz);
return contain_address(addr) ||
contain_address(addr + sz - 1) ||
rgn.contain_address(base()) ||
rgn.contain_address(end() - 1);
}
inline bool adjacent_to(address addr, size_t sz) const {
return (addr == end() || (addr + sz) == base());
}
void exclude_region(address addr, size_t sz) {
assert(contain_region(addr, sz), "Not containment");
assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
size_t new_size = size() - sz;
if (addr == base()) {
set_base(addr + sz);
}
set_size(new_size);
}
void expand_region(address addr, size_t sz) {
assert(adjacent_to(addr, sz), "Not adjacent regions");
if (base() == addr + sz) {
set_base(addr);
}
set_size(size() + sz);
}
protected:
void set_base(address base) {
assert(base != NULL, "Sanity check");
_base_address = base;
}
void set_size(size_t size) {
assert(size > 0, "Sanity check");
_size = size;
}
};
class CommittedMemoryRegion : public VirtualMemoryRegion {
private:
NativeCallStack _stack;
public:
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
VirtualMemoryRegion(addr, size), _stack(stack) { }
inline int compare(const CommittedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size()) ||
adjacent_to (rgn.base(), rgn.size())) {
return 0;
} else {
if (base() == rgn.base()) {
return 0;
} else if (base() > rgn.base()) {
return 1;
} else {
return -1;
}
}
}
inline bool equals(const CommittedMemoryRegion& rgn) const {
return compare(rgn) == 0;
}
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
};
typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
class ReservedMemoryRegion : public VirtualMemoryRegion {
private:
SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
_committed_regions;
NativeCallStack _stack;
MEMFLAGS _flag;
bool _all_committed;
public:
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) :
VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
_all_committed(false) { }
ReservedMemoryRegion(address base, size_t size) :
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone),
_all_committed(false) { }
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
VirtualMemoryRegion(rr.base(), rr.size()) {
}
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
void set_flag(MEMFLAGS flag);
inline MEMFLAGS flag() const { return _flag; }
inline int compare(const ReservedMemoryRegion& rgn) const {
if (overlap_region(rgn.base(), rgn.size())) {
return 0;
} else {
if (base() == rgn.base()) {
return 0;
} else if (base() > rgn.base()) {
return 1;
} else {
return -1;
}
}
}
inline bool equals(const ReservedMemoryRegion& rgn) const {
return compare(rgn) == 0;
}
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
bool remove_uncommitted_region(address addr, size_t size);
size_t committed_size() const;
void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
inline bool all_committed() const { return _all_committed; }
void set_all_committed(bool b);
CommittedRegionIterator iterate_committed_regions() const {
return CommittedRegionIterator(_committed_regions.head());
}
ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
set_base(other.base());
set_size(other.size());
_stack = *other.call_stack();
_flag = other.flag();
_all_committed = other.all_committed();
if (other.all_committed()) {
set_all_committed(true);
} else {
CommittedRegionIterator itr = other.iterate_committed_regions();
const CommittedMemoryRegion* rgn = itr.next();
while (rgn != NULL) {
_committed_regions.add(*rgn);
rgn = itr.next();
}
}
return *this;
}
private:
bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t sz);
bool add_committed_region(const CommittedMemoryRegion& rgn) {
assert(rgn.base() != NULL, "Invalid base address");
assert(size() > 0, "Invalid size");
return _committed_regions.add(rgn) != NULL;
}
};
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
class VirtualMemoryWalker : public StackObj {
public:
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
};
class VirtualMemoryTracker : AllStatic {
public:
static bool initialize(NMT_TrackingLevel level);
static bool late_initialize(NMT_TrackingLevel level);
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone, bool all_committed = false);
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static bool remove_uncommitted_region (address base_addr, size_t size);
static bool remove_released_region (address base_addr, size_t size);
static void set_reserved_region_type (address addr, MEMFLAGS flag);
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
private:
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/llvmHeaders.hpp
#ifndef SHARE_VM_SHARK_LLVMHEADERS_HPP
#define SHARE_VM_SHARK_LLVMHEADERS_HPP
#ifdef assert
#undef assert
#endif
#ifdef DEBUG
#define SHARK_DEBUG
#undef DEBUG
#endif
#include <llvm/Analysis/Verifier.h>
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#if SHARK_LLVM_VERSION <= 31
#include <llvm/Support/IRBuilder.h>
#include <llvm/Type.h>
#include <llvm/Argument.h>
#include <llvm/Constants.h>
#include <llvm/DerivedTypes.h>
#include <llvm/Instructions.h>
#include <llvm/LLVMContext.h>
#include <llvm/Module.h>
#elif SHARK_LLVM_VERSION <= 32
#include <llvm/IRBuilder.h>
#include <llvm/Type.h>
#include <llvm/Argument.h>
#include <llvm/Constants.h>
#include <llvm/DerivedTypes.h>
#include <llvm/Instructions.h>
#include <llvm/LLVMContext.h>
#include <llvm/Module.h>
#else // SHARK_LLVM_VERSION <= 34
#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/Argument.h>
#include <llvm/IR/Constants.h>
#include <llvm/IR/DerivedTypes.h>
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/IR/Instructions.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/IR/Module.h>
#include <llvm/ADT/StringRef.h>
#include <llvm/IR/Type.h>
#endif
#include <llvm/Support/Threading.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/ExecutionEngine/JITMemoryManager.h>
#include <llvm/Support/CommandLine.h>
#include <llvm/ExecutionEngine/MCJIT.h>
#include <llvm/ExecutionEngine/JIT.h>
#include <llvm/ADT/StringMap.h>
#include <llvm/Support/Debug.h>
#include <llvm/Support/Host.h>
#include <map>
#ifdef assert
#undef assert
#endif
#ifdef ASSERT
#ifndef USE_REPEATED_ASSERTS
#define assert(p, msg) \
do { \
if (!(p)) { \
report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \
BREAKPOINT; \
} \
} while (0)
#else // #ifndef USE_REPEATED_ASSERTS
#define assert(p, msg)
do { \
for (int __i = 0; __i < AssertRepeat; __i++) { \
if (!(p)) { \
report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \
BREAKPOINT; \
} \
} \
} while (0)
#endif // #ifndef USE_REPEATED_ASSERTS
#else
#define assert(p, msg)
#endif
#ifdef DEBUG
#undef DEBUG
#endif
#ifdef SHARK_DEBUG
#define DEBUG
#undef SHARK_DEBUG
#endif
#endif // SHARE_VM_SHARK_LLVMHEADERS_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/llvmValue.hpp
#ifndef SHARE_VM_SHARK_LLVMVALUE_HPP
#define SHARE_VM_SHARK_LLVMVALUE_HPP
#include "shark/llvmHeaders.hpp"
#include "shark/sharkContext.hpp"
#include "shark/sharkType.hpp"
class LLVMValue : public AllStatic {
public:
static llvm::ConstantInt* jbyte_constant(jbyte value)
{
return llvm::ConstantInt::get(SharkType::jbyte_type(), value, true);
}
static llvm::ConstantInt* jint_constant(jint value)
{
return llvm::ConstantInt::get(SharkType::jint_type(), value, true);
}
static llvm::ConstantInt* jlong_constant(jlong value)
{
return llvm::ConstantInt::get(SharkType::jlong_type(), value, true);
}
static llvm::ConstantFP* jfloat_constant(jfloat value)
{
return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
}
static llvm::ConstantFP* jdouble_constant(jdouble value)
{
return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
}
static llvm::ConstantPointerNull* null()
{
return llvm::ConstantPointerNull::get(SharkType::oop_type());
}
static llvm::ConstantPointerNull* nullKlass()
{
return llvm::ConstantPointerNull::get(SharkType::klass_type());
}
public:
static llvm::ConstantInt* bit_constant(int value)
{
return llvm::ConstantInt::get(SharkType::bit_type(), value, false);
}
static llvm::ConstantInt* intptr_constant(intptr_t value)
{
return llvm::ConstantInt::get(SharkType::intptr_type(), value, false);
}
};
#endif // SHARE_VM_SHARK_LLVMVALUE_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkBlock.cpp
#include "precompiled.hpp"
#include "interpreter/bytecodes.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkBlock.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkConstant.hpp"
#include "shark/sharkState.hpp"
#include "shark/sharkValue.hpp"
#include "shark/shark_globals.hpp"
#include "utilities/debug.hpp"
using namespace llvm;
void SharkBlock::parse_bytecode(int start, int limit) {
SharkValue *a, *b, *c, *d;
int i;
current_state();
iter()->reset_to_bci(start);
while (iter()->next_bci() < limit) {
NOT_PRODUCT(a = b = c = d = NULL);
iter()->next();
if (SharkTraceBytecodes)
tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
if (has_trap() && trap_bci() == bci()) {
do_trap(trap_request());
return;
}
if (UseLoopSafepoints) {
switch (bc()) {
case Bytecodes::_goto:
case Bytecodes::_ifnull:
case Bytecodes::_ifnonnull:
case Bytecodes::_if_acmpeq:
case Bytecodes::_if_acmpne:
case Bytecodes::_ifeq:
case Bytecodes::_ifne:
case Bytecodes::_iflt:
case Bytecodes::_ifle:
case Bytecodes::_ifgt:
case Bytecodes::_ifge:
case Bytecodes::_if_icmpeq:
case Bytecodes::_if_icmpne:
case Bytecodes::_if_icmplt:
case Bytecodes::_if_icmple:
case Bytecodes::_if_icmpgt:
case Bytecodes::_if_icmpge:
if (iter()->get_dest() <= bci())
maybe_add_backedge_safepoint();
break;
case Bytecodes::_goto_w:
if (iter()->get_far_dest() <= bci())
maybe_add_backedge_safepoint();
break;
case Bytecodes::_tableswitch:
case Bytecodes::_lookupswitch:
if (switch_default_dest() <= bci()) {
maybe_add_backedge_safepoint();
break;
}
int len = switch_table_length();
for (int i = 0; i < len; i++) {
if (switch_dest(i) <= bci()) {
maybe_add_backedge_safepoint();
break;
}
}
break;
}
}
switch (bc()) {
case Bytecodes::_nop:
break;
case Bytecodes::_aconst_null:
push(SharkValue::null());
break;
case Bytecodes::_iconst_m1:
push(SharkValue::jint_constant(-1));
break;
case Bytecodes::_iconst_0:
push(SharkValue::jint_constant(0));
break;
case Bytecodes::_iconst_1:
push(SharkValue::jint_constant(1));
break;
case Bytecodes::_iconst_2:
push(SharkValue::jint_constant(2));
break;
case Bytecodes::_iconst_3:
push(SharkValue::jint_constant(3));
break;
case Bytecodes::_iconst_4:
push(SharkValue::jint_constant(4));
break;
case Bytecodes::_iconst_5:
push(SharkValue::jint_constant(5));
break;
case Bytecodes::_lconst_0:
push(SharkValue::jlong_constant(0));
break;
case Bytecodes::_lconst_1:
push(SharkValue::jlong_constant(1));
break;
case Bytecodes::_fconst_0:
push(SharkValue::jfloat_constant(0));
break;
case Bytecodes::_fconst_1:
push(SharkValue::jfloat_constant(1));
break;
case Bytecodes::_fconst_2:
push(SharkValue::jfloat_constant(2));
break;
case Bytecodes::_dconst_0:
push(SharkValue::jdouble_constant(0));
break;
case Bytecodes::_dconst_1:
push(SharkValue::jdouble_constant(1));
break;
case Bytecodes::_bipush:
push(SharkValue::jint_constant(iter()->get_constant_u1()));
break;
case Bytecodes::_sipush:
push(SharkValue::jint_constant(iter()->get_constant_u2()));
break;
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w: {
SharkConstant* constant = SharkConstant::for_ldc(iter());
assert(constant->is_loaded(), "trap should handle unloaded classes");
push(constant->value(builder()));
break;
}
case Bytecodes::_iload_0:
case Bytecodes::_lload_0:
case Bytecodes::_fload_0:
case Bytecodes::_dload_0:
case Bytecodes::_aload_0:
push(local(0));
break;
case Bytecodes::_iload_1:
case Bytecodes::_lload_1:
case Bytecodes::_fload_1:
case Bytecodes::_dload_1:
case Bytecodes::_aload_1:
push(local(1));
break;
case Bytecodes::_iload_2:
case Bytecodes::_lload_2:
case Bytecodes::_fload_2:
case Bytecodes::_dload_2:
case Bytecodes::_aload_2:
push(local(2));
break;
case Bytecodes::_iload_3:
case Bytecodes::_lload_3:
case Bytecodes::_fload_3:
case Bytecodes::_dload_3:
case Bytecodes::_aload_3:
push(local(3));
break;
case Bytecodes::_iload:
case Bytecodes::_lload:
case Bytecodes::_fload:
case Bytecodes::_dload:
case Bytecodes::_aload:
push(local(iter()->get_index()));
break;
case Bytecodes::_baload:
do_aload(T_BYTE);
break;
case Bytecodes::_caload:
do_aload(T_CHAR);
break;
case Bytecodes::_saload:
do_aload(T_SHORT);
break;
case Bytecodes::_iaload:
do_aload(T_INT);
break;
case Bytecodes::_laload:
do_aload(T_LONG);
break;
case Bytecodes::_faload:
do_aload(T_FLOAT);
break;
case Bytecodes::_daload:
do_aload(T_DOUBLE);
break;
case Bytecodes::_aaload:
do_aload(T_OBJECT);
break;
case Bytecodes::_istore_0:
case Bytecodes::_lstore_0:
case Bytecodes::_fstore_0:
case Bytecodes::_dstore_0:
case Bytecodes::_astore_0:
set_local(0, pop());
break;
case Bytecodes::_istore_1:
case Bytecodes::_lstore_1:
case Bytecodes::_fstore_1:
case Bytecodes::_dstore_1:
case Bytecodes::_astore_1:
set_local(1, pop());
break;
case Bytecodes::_istore_2:
case Bytecodes::_lstore_2:
case Bytecodes::_fstore_2:
case Bytecodes::_dstore_2:
case Bytecodes::_astore_2:
set_local(2, pop());
break;
case Bytecodes::_istore_3:
case Bytecodes::_lstore_3:
case Bytecodes::_fstore_3:
case Bytecodes::_dstore_3:
case Bytecodes::_astore_3:
set_local(3, pop());
break;
case Bytecodes::_istore:
case Bytecodes::_lstore:
case Bytecodes::_fstore:
case Bytecodes::_dstore:
case Bytecodes::_astore:
set_local(iter()->get_index(), pop());
break;
case Bytecodes::_bastore:
do_astore(T_BYTE);
break;
case Bytecodes::_castore:
do_astore(T_CHAR);
break;
case Bytecodes::_sastore:
do_astore(T_SHORT);
break;
case Bytecodes::_iastore:
do_astore(T_INT);
break;
case Bytecodes::_lastore:
do_astore(T_LONG);
break;
case Bytecodes::_fastore:
do_astore(T_FLOAT);
break;
case Bytecodes::_dastore:
do_astore(T_DOUBLE);
break;
case Bytecodes::_aastore:
do_astore(T_OBJECT);
break;
case Bytecodes::_pop:
xpop();
break;
case Bytecodes::_pop2:
xpop();
xpop();
break;
case Bytecodes::_swap:
a = xpop();
b = xpop();
xpush(a);
xpush(b);
break;
case Bytecodes::_dup:
a = xpop();
xpush(a);
xpush(a);
break;
case Bytecodes::_dup_x1:
a = xpop();
b = xpop();
xpush(a);
xpush(b);
xpush(a);
break;
case Bytecodes::_dup_x2:
a = xpop();
b = xpop();
c = xpop();
xpush(a);
xpush(c);
xpush(b);
xpush(a);
break;
case Bytecodes::_dup2:
a = xpop();
b = xpop();
xpush(b);
xpush(a);
xpush(b);
xpush(a);
break;
case Bytecodes::_dup2_x1:
a = xpop();
b = xpop();
c = xpop();
xpush(b);
xpush(a);
xpush(c);
xpush(b);
xpush(a);
break;
case Bytecodes::_dup2_x2:
a = xpop();
b = xpop();
c = xpop();
d = xpop();
xpush(b);
xpush(a);
xpush(d);
xpush(c);
xpush(b);
xpush(a);
break;
case Bytecodes::_arraylength:
do_arraylength();
break;
case Bytecodes::_getfield:
do_getfield();
break;
case Bytecodes::_getstatic:
do_getstatic();
break;
case Bytecodes::_putfield:
do_putfield();
break;
case Bytecodes::_putstatic:
do_putstatic();
break;
case Bytecodes::_iadd:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateAdd(a->jint_value(), b->jint_value()), false));
break;
case Bytecodes::_isub:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateSub(a->jint_value(), b->jint_value()), false));
break;
case Bytecodes::_imul:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateMul(a->jint_value(), b->jint_value()), false));
break;
case Bytecodes::_idiv:
do_idiv();
break;
case Bytecodes::_irem:
do_irem();
break;
case Bytecodes::_ineg:
a = pop();
push(SharkValue::create_jint(
builder()->CreateNeg(a->jint_value()), a->zero_checked()));
break;
case Bytecodes::_ishl:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateShl(
a->jint_value(),
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
break;
case Bytecodes::_ishr:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateAShr(
a->jint_value(),
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
break;
case Bytecodes::_iushr:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateLShr(
a->jint_value(),
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
break;
case Bytecodes::_iand:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateAnd(a->jint_value(), b->jint_value()), false));
break;
case Bytecodes::_ior:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateOr(a->jint_value(), b->jint_value()),
a->zero_checked() && b->zero_checked()));
break;
case Bytecodes::_ixor:
b = pop();
a = pop();
push(SharkValue::create_jint(
builder()->CreateXor(a->jint_value(), b->jint_value()), false));
break;
case Bytecodes::_ladd:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateAdd(a->jlong_value(), b->jlong_value()), false));
break;
case Bytecodes::_lsub:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateSub(a->jlong_value(), b->jlong_value()), false));
break;
case Bytecodes::_lmul:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateMul(a->jlong_value(), b->jlong_value()), false));
break;
case Bytecodes::_ldiv:
do_ldiv();
break;
case Bytecodes::_lrem:
do_lrem();
break;
case Bytecodes::_lneg:
a = pop();
push(SharkValue::create_jlong(
builder()->CreateNeg(a->jlong_value()), a->zero_checked()));
break;
case Bytecodes::_lshl:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateShl(
a->jlong_value(),
builder()->CreateIntCast(
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x3f)),
SharkType::jlong_type(), true)), false));
break;
case Bytecodes::_lshr:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateAShr(
a->jlong_value(),
builder()->CreateIntCast(
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x3f)),
SharkType::jlong_type(), true)), false));
break;
case Bytecodes::_lushr:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateLShr(
a->jlong_value(),
builder()->CreateIntCast(
builder()->CreateAnd(
b->jint_value(), LLVMValue::jint_constant(0x3f)),
SharkType::jlong_type(), true)), false));
break;
case Bytecodes::_land:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateAnd(a->jlong_value(), b->jlong_value()), false));
break;
case Bytecodes::_lor:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateOr(a->jlong_value(), b->jlong_value()),
a->zero_checked() && b->zero_checked()));
break;
case Bytecodes::_lxor:
b = pop();
a = pop();
push(SharkValue::create_jlong(
builder()->CreateXor(a->jlong_value(), b->jlong_value()), false));
break;
case Bytecodes::_fadd:
b = pop();
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFAdd(a->jfloat_value(), b->jfloat_value())));
break;
case Bytecodes::_fsub:
b = pop();
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFSub(a->jfloat_value(), b->jfloat_value())));
break;
case Bytecodes::_fmul:
b = pop();
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFMul(a->jfloat_value(), b->jfloat_value())));
break;
case Bytecodes::_fdiv:
b = pop();
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFDiv(a->jfloat_value(), b->jfloat_value())));
break;
case Bytecodes::_frem:
b = pop();
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFRem(a->jfloat_value(), b->jfloat_value())));
break;
case Bytecodes::_fneg:
a = pop();
push(SharkValue::create_jfloat(
builder()->CreateFNeg(a->jfloat_value())));
break;
case Bytecodes::_dadd:
b = pop();
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFAdd(a->jdouble_value(), b->jdouble_value())));
break;
case Bytecodes::_dsub:
b = pop();
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFSub(a->jdouble_value(), b->jdouble_value())));
break;
case Bytecodes::_dmul:
b = pop();
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFMul(a->jdouble_value(), b->jdouble_value())));
break;
case Bytecodes::_ddiv:
b = pop();
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFDiv(a->jdouble_value(), b->jdouble_value())));
break;
case Bytecodes::_drem:
b = pop();
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFRem(a->jdouble_value(), b->jdouble_value())));
break;
case Bytecodes::_dneg:
a = pop();
push(SharkValue::create_jdouble(
builder()->CreateFNeg(a->jdouble_value())));
break;
case Bytecodes::_iinc:
i = iter()->get_index();
set_local(
i,
SharkValue::create_jint(
builder()->CreateAdd(
LLVMValue::jint_constant(iter()->get_iinc_con()),
local(i)->jint_value()), false));
break;
case Bytecodes::_lcmp:
do_lcmp();
break;
case Bytecodes::_fcmpl:
do_fcmp(false, false);
break;
case Bytecodes::_fcmpg:
do_fcmp(false, true);
break;
case Bytecodes::_dcmpl:
do_fcmp(true, false);
break;
case Bytecodes::_dcmpg:
do_fcmp(true, true);
break;
case Bytecodes::_i2l:
a = pop();
push(SharkValue::create_jlong(
builder()->CreateIntCast(
a->jint_value(), SharkType::jlong_type(), true), a->zero_checked()));
break;
case Bytecodes::_i2f:
push(SharkValue::create_jfloat(
builder()->CreateSIToFP(
pop()->jint_value(), SharkType::jfloat_type())));
break;
case Bytecodes::_i2d:
push(SharkValue::create_jdouble(
builder()->CreateSIToFP(
pop()->jint_value(), SharkType::jdouble_type())));
break;
case Bytecodes::_l2i:
push(SharkValue::create_jint(
builder()->CreateIntCast(
pop()->jlong_value(), SharkType::jint_type(), true), false));
break;
case Bytecodes::_l2f:
push(SharkValue::create_jfloat(
builder()->CreateSIToFP(
pop()->jlong_value(), SharkType::jfloat_type())));
break;
case Bytecodes::_l2d:
push(SharkValue::create_jdouble(
builder()->CreateSIToFP(
pop()->jlong_value(), SharkType::jdouble_type())));
break;
case Bytecodes::_f2i:
push(SharkValue::create_jint(
builder()->CreateCall(
builder()->f2i(), pop()->jfloat_value()), false));
break;
case Bytecodes::_f2l:
push(SharkValue::create_jlong(
builder()->CreateCall(
builder()->f2l(), pop()->jfloat_value()), false));
break;
case Bytecodes::_f2d:
push(SharkValue::create_jdouble(
builder()->CreateFPExt(
pop()->jfloat_value(), SharkType::jdouble_type())));
break;
case Bytecodes::_d2i:
push(SharkValue::create_jint(
builder()->CreateCall(
builder()->d2i(), pop()->jdouble_value()), false));
break;
case Bytecodes::_d2l:
push(SharkValue::create_jlong(
builder()->CreateCall(
builder()->d2l(), pop()->jdouble_value()), false));
break;
case Bytecodes::_d2f:
push(SharkValue::create_jfloat(
builder()->CreateFPTrunc(
pop()->jdouble_value(), SharkType::jfloat_type())));
break;
case Bytecodes::_i2b:
push(SharkValue::create_jint(
builder()->CreateAShr(
builder()->CreateShl(
pop()->jint_value(),
LLVMValue::jint_constant(24)),
LLVMValue::jint_constant(24)), false));
break;
case Bytecodes::_i2c:
push(SharkValue::create_jint(
builder()->CreateAnd(
pop()->jint_value(),
LLVMValue::jint_constant(0xffff)), false));
break;
case Bytecodes::_i2s:
push(SharkValue::create_jint(
builder()->CreateAShr(
builder()->CreateShl(
pop()->jint_value(),
LLVMValue::jint_constant(16)),
LLVMValue::jint_constant(16)), false));
break;
case Bytecodes::_return:
do_return(T_VOID);
break;
case Bytecodes::_ireturn:
do_return(T_INT);
break;
case Bytecodes::_lreturn:
do_return(T_LONG);
break;
case Bytecodes::_freturn:
do_return(T_FLOAT);
break;
case Bytecodes::_dreturn:
do_return(T_DOUBLE);
break;
case Bytecodes::_areturn:
do_return(T_OBJECT);
break;
case Bytecodes::_athrow:
do_athrow();
break;
case Bytecodes::_goto:
case Bytecodes::_goto_w:
do_goto();
break;
case Bytecodes::_jsr:
case Bytecodes::_jsr_w:
do_jsr();
break;
case Bytecodes::_ret:
do_ret();
break;
case Bytecodes::_ifnull:
do_if(ICmpInst::ICMP_EQ, SharkValue::null(), pop());
break;
case Bytecodes::_ifnonnull:
do_if(ICmpInst::ICMP_NE, SharkValue::null(), pop());
break;
case Bytecodes::_if_acmpeq:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_EQ, b, a);
break;
case Bytecodes::_if_acmpne:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_NE, b, a);
break;
case Bytecodes::_ifeq:
do_if(ICmpInst::ICMP_EQ, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_ifne:
do_if(ICmpInst::ICMP_NE, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_iflt:
do_if(ICmpInst::ICMP_SLT, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_ifle:
do_if(ICmpInst::ICMP_SLE, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_ifgt:
do_if(ICmpInst::ICMP_SGT, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_ifge:
do_if(ICmpInst::ICMP_SGE, SharkValue::jint_constant(0), pop());
break;
case Bytecodes::_if_icmpeq:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_EQ, b, a);
break;
case Bytecodes::_if_icmpne:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_NE, b, a);
break;
case Bytecodes::_if_icmplt:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_SLT, b, a);
break;
case Bytecodes::_if_icmple:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_SLE, b, a);
break;
case Bytecodes::_if_icmpgt:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_SGT, b, a);
break;
case Bytecodes::_if_icmpge:
b = pop();
a = pop();
do_if(ICmpInst::ICMP_SGE, b, a);
break;
case Bytecodes::_tableswitch:
case Bytecodes::_lookupswitch:
do_switch();
break;
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
do_call();
break;
case Bytecodes::_instanceof:
if (iter()->next_bci() < limit &&
(iter()->next_bc() == Bytecodes::_ifeq ||
iter()->next_bc() == Bytecodes::_ifne) &&
(!UseLoopSafepoints ||
iter()->next_get_dest() > iter()->next_bci())) {
if (maybe_do_instanceof_if()) {
iter()->next();
if (SharkTraceBytecodes)
tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
break;
}
}
case Bytecodes::_checkcast:
do_instance_check();
break;
case Bytecodes::_new:
do_new();
break;
case Bytecodes::_newarray:
do_newarray();
break;
case Bytecodes::_anewarray:
do_anewarray();
break;
case Bytecodes::_multianewarray:
do_multianewarray();
break;
case Bytecodes::_monitorenter:
do_monitorenter();
break;
case Bytecodes::_monitorexit:
do_monitorexit();
break;
default:
ShouldNotReachHere();
}
}
}
SharkState* SharkBlock::initial_current_state() {
return entry_state()->copy();
}
int SharkBlock::switch_default_dest() {
return iter()->get_dest_table(0);
}
int SharkBlock::switch_table_length() {
switch(bc()) {
case Bytecodes::_tableswitch:
return iter()->get_int_table(2) - iter()->get_int_table(1) + 1;
case Bytecodes::_lookupswitch:
return iter()->get_int_table(1);
default:
ShouldNotReachHere();
}
}
int SharkBlock::switch_key(int i) {
switch(bc()) {
case Bytecodes::_tableswitch:
return iter()->get_int_table(1) + i;
case Bytecodes::_lookupswitch:
return iter()->get_int_table(2 + 2 * i);
default:
ShouldNotReachHere();
}
}
int SharkBlock::switch_dest(int i) {
switch(bc()) {
case Bytecodes::_tableswitch:
return iter()->get_dest_table(i + 3);
case Bytecodes::_lookupswitch:
return iter()->get_dest_table(2 + 2 * i + 1);
default:
ShouldNotReachHere();
}
}
void SharkBlock::do_div_or_rem(bool is_long, bool is_rem) {
SharkValue *sb = pop();
SharkValue *sa = pop();
check_divide_by_zero(sb);
Value *a, *b, *p, *q;
if (is_long) {
a = sa->jlong_value();
b = sb->jlong_value();
p = LLVMValue::jlong_constant(0x8000000000000000LL);
q = LLVMValue::jlong_constant(-1);
}
else {
a = sa->jint_value();
b = sb->jint_value();
p = LLVMValue::jint_constant(0x80000000);
q = LLVMValue::jint_constant(-1);
}
BasicBlock *ip = builder()->GetBlockInsertionPoint();
BasicBlock *special_case = builder()->CreateBlock(ip, "special_case");
BasicBlock *general_case = builder()->CreateBlock(ip, "general_case");
BasicBlock *done = builder()->CreateBlock(ip, "done");
builder()->CreateCondBr(
builder()->CreateAnd(
builder()->CreateICmpEQ(a, p),
builder()->CreateICmpEQ(b, q)),
special_case, general_case);
builder()->SetInsertPoint(special_case);
Value *special_result;
if (is_rem) {
if (is_long)
special_result = LLVMValue::jlong_constant(0);
else
special_result = LLVMValue::jint_constant(0);
}
else {
special_result = a;
}
builder()->CreateBr(done);
builder()->SetInsertPoint(general_case);
Value *general_result;
if (is_rem)
general_result = builder()->CreateSRem(a, b);
else
general_result = builder()->CreateSDiv(a, b);
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
PHINode *result;
if (is_long)
result = builder()->CreatePHI(SharkType::jlong_type(), 0, "result");
else
result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(special_result, special_case);
result->addIncoming(general_result, general_case);
if (is_long)
push(SharkValue::create_jlong(result, false));
else
push(SharkValue::create_jint(result, false));
}
void SharkBlock::do_field_access(bool is_get, bool is_field) {
bool will_link;
ciField *field = iter()->get_field(will_link);
assert(will_link, "typeflow responsibility");
assert(is_field != field->is_static(), "mismatch");
SharkValue *value = NULL;
if (!is_get)
value = pop();
Value *object = NULL;
if (is_field) {
SharkValue *value = pop();
check_null(value);
object = value->generic_value();
}
if (is_get && field->is_constant() && field->is_static()) {
SharkConstant *constant = SharkConstant::for_field(iter());
if (constant->is_loaded())
value = constant->value(builder());
}
if (!is_get || value == NULL) {
if (!is_field) {
object = builder()->CreateInlineOop(field->holder()->java_mirror());
}
BasicType basic_type = field->type()->basic_type();
Type *stack_type = SharkType::to_stackType(basic_type);
Type *field_type = SharkType::to_arrayType(basic_type);
Type *type = field_type;
if (field->is_volatile()) {
if (field_type == SharkType::jfloat_type()) {
type = SharkType::jint_type();
} else if (field_type == SharkType::jdouble_type()) {
type = SharkType::jlong_type();
}
}
Value *addr = builder()->CreateAddressOfStructEntry(
object, in_ByteSize(field->offset_in_bytes()),
PointerType::getUnqual(type),
"addr");
if (is_get) {
Value* field_value;
if (field->is_volatile()) {
field_value = builder()->CreateAtomicLoad(addr);
field_value = builder()->CreateBitCast(field_value, field_type);
} else {
field_value = builder()->CreateLoad(addr);
}
if (field_type != stack_type) {
field_value = builder()->CreateIntCast(
field_value, stack_type, basic_type != T_CHAR);
}
value = SharkValue::create_generic(field->type(), field_value, false);
}
else {
Value *field_value = value->generic_value();
if (field_type != stack_type) {
field_value = builder()->CreateIntCast(
field_value, field_type, basic_type != T_CHAR);
}
if (field->is_volatile()) {
field_value = builder()->CreateBitCast(field_value, type);
builder()->CreateAtomicStore(field_value, addr);
} else {
builder()->CreateStore(field_value, addr);
}
if (!field->type()->is_primitive_type()) {
builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
}
}
}
if (is_get)
push(value);
}
void SharkBlock::do_lcmp() {
Value *b = pop()->jlong_value();
Value *a = pop()->jlong_value();
BasicBlock *ip = builder()->GetBlockInsertionPoint();
BasicBlock *ne = builder()->CreateBlock(ip, "lcmp_ne");
BasicBlock *lt = builder()->CreateBlock(ip, "lcmp_lt");
BasicBlock *gt = builder()->CreateBlock(ip, "lcmp_gt");
BasicBlock *done = builder()->CreateBlock(ip, "done");
BasicBlock *eq = builder()->GetInsertBlock();
builder()->CreateCondBr(builder()->CreateICmpEQ(a, b), done, ne);
builder()->SetInsertPoint(ne);
builder()->CreateCondBr(builder()->CreateICmpSLT(a, b), lt, gt);
builder()->SetInsertPoint(lt);
builder()->CreateBr(done);
builder()->SetInsertPoint(gt);
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(LLVMValue::jint_constant(-1), lt);
result->addIncoming(LLVMValue::jint_constant(0), eq);
result->addIncoming(LLVMValue::jint_constant(1), gt);
push(SharkValue::create_jint(result, false));
}
void SharkBlock::do_fcmp(bool is_double, bool unordered_is_greater) {
Value *a, *b;
if (is_double) {
b = pop()->jdouble_value();
a = pop()->jdouble_value();
}
else {
b = pop()->jfloat_value();
a = pop()->jfloat_value();
}
BasicBlock *ip = builder()->GetBlockInsertionPoint();
BasicBlock *ordered = builder()->CreateBlock(ip, "ordered");
BasicBlock *ge = builder()->CreateBlock(ip, "fcmp_ge");
BasicBlock *lt = builder()->CreateBlock(ip, "fcmp_lt");
BasicBlock *eq = builder()->CreateBlock(ip, "fcmp_eq");
BasicBlock *gt = builder()->CreateBlock(ip, "fcmp_gt");
BasicBlock *done = builder()->CreateBlock(ip, "done");
builder()->CreateCondBr(
builder()->CreateFCmpUNO(a, b),
unordered_is_greater ? gt : lt, ordered);
builder()->SetInsertPoint(ordered);
builder()->CreateCondBr(builder()->CreateFCmpULT(a, b), lt, ge);
builder()->SetInsertPoint(ge);
builder()->CreateCondBr(builder()->CreateFCmpUGT(a, b), gt, eq);
builder()->SetInsertPoint(lt);
builder()->CreateBr(done);
builder()->SetInsertPoint(gt);
builder()->CreateBr(done);
builder()->SetInsertPoint(eq);
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(LLVMValue::jint_constant(-1), lt);
result->addIncoming(LLVMValue::jint_constant(0), eq);
result->addIncoming(LLVMValue::jint_constant(1), gt);
push(SharkValue::create_jint(result, false));
}
void SharkBlock::emit_IR() {
ShouldNotCallThis();
}
SharkState* SharkBlock::entry_state() {
ShouldNotCallThis();
}
void SharkBlock::do_zero_check(SharkValue* value) {
ShouldNotCallThis();
}
void SharkBlock::maybe_add_backedge_safepoint() {
ShouldNotCallThis();
}
bool SharkBlock::has_trap() {
return false;
}
int SharkBlock::trap_request() {
ShouldNotCallThis();
}
int SharkBlock::trap_bci() {
ShouldNotCallThis();
}
void SharkBlock::do_trap(int trap_request) {
ShouldNotCallThis();
}
void SharkBlock::do_arraylength() {
ShouldNotCallThis();
}
void SharkBlock::do_aload(BasicType basic_type) {
ShouldNotCallThis();
}
void SharkBlock::do_astore(BasicType basic_type) {
ShouldNotCallThis();
}
void SharkBlock::do_return(BasicType type) {
ShouldNotCallThis();
}
void SharkBlock::do_athrow() {
ShouldNotCallThis();
}
void SharkBlock::do_goto() {
ShouldNotCallThis();
}
void SharkBlock::do_jsr() {
ShouldNotCallThis();
}
void SharkBlock::do_ret() {
ShouldNotCallThis();
}
void SharkBlock::do_if(ICmpInst::Predicate p, SharkValue* b, SharkValue* a) {
ShouldNotCallThis();
}
void SharkBlock::do_switch() {
ShouldNotCallThis();
}
void SharkBlock::do_call() {
ShouldNotCallThis();
}
void SharkBlock::do_instance_check() {
ShouldNotCallThis();
}
bool SharkBlock::maybe_do_instanceof_if() {
ShouldNotCallThis();
}
void SharkBlock::do_new() {
ShouldNotCallThis();
}
void SharkBlock::do_newarray() {
ShouldNotCallThis();
}
void SharkBlock::do_anewarray() {
ShouldNotCallThis();
}
void SharkBlock::do_multianewarray() {
ShouldNotCallThis();
}
void SharkBlock::do_monitorenter() {
ShouldNotCallThis();
}
void SharkBlock::do_monitorexit() {
ShouldNotCallThis();
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkBlock.hpp
#ifndef SHARE_VM_SHARK_SHARKBLOCK_HPP
#define SHARE_VM_SHARK_SHARKBLOCK_HPP
#include "ci/ciMethod.hpp"
#include "ci/ciStreams.hpp"
#include "memory/allocation.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkConstant.hpp"
#include "shark/sharkInvariants.hpp"
#include "shark/sharkState.hpp"
#include "shark/sharkValue.hpp"
#include "utilities/debug.hpp"
class SharkState;
class SharkBlock : public SharkTargetInvariants {
protected:
SharkBlock(const SharkTargetInvariants* parent)
: SharkTargetInvariants(parent),
_iter(target()),
_current_state(NULL) {}
SharkBlock(const SharkCompileInvariants* parent, ciMethod* target)
: SharkTargetInvariants(parent, target),
_iter(target),
_current_state(NULL) {}
private:
ciBytecodeStream _iter;
SharkState* _current_state;
public:
ciBytecodeStream* iter() {
return &_iter;
}
Bytecodes::Code bc() {
return iter()->cur_bc();
}
int bci() {
return iter()->cur_bci();
}
protected:
virtual SharkState* entry_state();
private:
SharkState* initial_current_state();
public:
SharkState* current_state() {
if (_current_state == NULL)
set_current_state(initial_current_state());
return _current_state;
}
protected:
void set_current_state(SharkState* current_state) {
_current_state = current_state;
}
protected:
SharkValue* local(int index) {
SharkValue *value = current_state()->local(index);
assert(value != NULL, "shouldn't be");
assert(value->is_one_word() ||
(index + 1 < max_locals() &&
current_state()->local(index + 1) == NULL), "should be");
return value;
}
void set_local(int index, SharkValue* value) {
assert(value != NULL, "shouldn't be");
current_state()->set_local(index, value);
if (value->is_two_word())
current_state()->set_local(index + 1, NULL);
}
protected:
void xpush(SharkValue* value) {
current_state()->push(value);
}
SharkValue* xpop() {
return current_state()->pop();
}
SharkValue* xstack(int slot) {
SharkValue *value = current_state()->stack(slot);
assert(value != NULL, "shouldn't be");
assert(value->is_one_word() ||
(slot > 0 &&
current_state()->stack(slot - 1) == NULL), "should be");
return value;
}
int xstack_depth() {
return current_state()->stack_depth();
}
protected:
void push(SharkValue* value) {
assert(value != NULL, "shouldn't be");
xpush(value);
if (value->is_two_word())
xpush(NULL);
}
SharkValue* pop() {
int size = current_state()->stack(0) == NULL ? 2 : 1;
if (size == 2)
xpop();
SharkValue *value = xpop();
assert(value && value->size() == size, "should be");
return value;
}
SharkValue* pop_result(BasicType type) {
SharkValue *result = pop();
#ifdef ASSERT
switch (result->basic_type()) {
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_SHORT:
assert(type == T_INT, "type mismatch");
break;
case T_ARRAY:
assert(type == T_OBJECT, "type mismatch");
break;
default:
assert(result->basic_type() == type, "type mismatch");
}
#endif // ASSERT
return result;
}
public:
virtual void emit_IR();
protected:
void parse_bytecode(int start, int limit);
protected:
virtual void do_zero_check(SharkValue* value);
protected:
void check_null(SharkValue* object) {
zero_check(object);
}
void check_divide_by_zero(SharkValue* value) {
zero_check(value);
}
private:
void zero_check(SharkValue* value) {
if (!value->zero_checked())
do_zero_check(value);
}
protected:
virtual void maybe_add_backedge_safepoint();
protected:
virtual bool has_trap();
virtual int trap_request();
virtual int trap_bci();
virtual void do_trap(int trap_request);
protected:
virtual void do_arraylength();
protected:
virtual void do_aload(BasicType basic_type);
virtual void do_astore(BasicType basic_type);
private:
void do_idiv() {
do_div_or_rem(false, false);
}
void do_irem() {
do_div_or_rem(false, true);
}
void do_ldiv() {
do_div_or_rem(true, false);
}
void do_lrem() {
do_div_or_rem(true, true);
}
void do_div_or_rem(bool is_long, bool is_rem);
private:
void do_getstatic() {
do_field_access(true, false);
}
void do_getfield() {
do_field_access(true, true);
}
void do_putstatic() {
do_field_access(false, false);
}
void do_putfield() {
do_field_access(false, true);
}
void do_field_access(bool is_get, bool is_field);
private:
void do_lcmp();
void do_fcmp(bool is_double, bool unordered_is_greater);
protected:
virtual void do_return(BasicType type);
virtual void do_athrow();
protected:
virtual void do_goto();
protected:
virtual void do_jsr();
virtual void do_ret();
protected:
virtual void do_if(llvm::ICmpInst::Predicate p, SharkValue* b, SharkValue* a);
protected:
int switch_default_dest();
int switch_table_length();
int switch_key(int i);
int switch_dest(int i);
virtual void do_switch();
protected:
virtual void do_call();
protected:
virtual void do_instance_check();
virtual bool maybe_do_instanceof_if();
protected:
virtual void do_new();
virtual void do_newarray();
virtual void do_anewarray();
virtual void do_multianewarray();
protected:
virtual void do_monitorenter();
virtual void do_monitorexit();
};
#endif // SHARE_VM_SHARK_SHARKBLOCK_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkBuilder.cpp
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/os.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkContext.hpp"
#include "shark/sharkRuntime.hpp"
#include "utilities/debug.hpp"
using namespace llvm;
SharkBuilder::SharkBuilder(SharkCodeBuffer* code_buffer)
: IRBuilder<>(SharkContext::current()),
_code_buffer(code_buffer) {
}
Value* SharkBuilder::CreateAddressOfStructEntry(Value* base,
ByteSize offset,
Type* type,
const char* name) {
return CreateBitCast(CreateStructGEP(base, in_bytes(offset)), type, name);
}
LoadInst* SharkBuilder::CreateValueOfStructEntry(Value* base,
ByteSize offset,
Type* type,
const char* name) {
return CreateLoad(
CreateAddressOfStructEntry(
base, offset, PointerType::getUnqual(type)),
name);
}
LoadInst* SharkBuilder::CreateArrayLength(Value* arrayoop) {
return CreateValueOfStructEntry(
arrayoop, in_ByteSize(arrayOopDesc::length_offset_in_bytes()),
SharkType::jint_type(), "length");
}
Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
Type* element_type,
int element_bytes,
ByteSize base_offset,
Value* index,
const char* name) {
Value* offset = CreateIntCast(index, SharkType::intptr_type(), false);
if (element_bytes != 1)
offset = CreateShl(
offset,
LLVMValue::intptr_constant(exact_log2(element_bytes)));
offset = CreateAdd(
LLVMValue::intptr_constant(in_bytes(base_offset)), offset);
return CreateIntToPtr(
CreateAdd(CreatePtrToInt(arrayoop, SharkType::intptr_type()), offset),
PointerType::getUnqual(element_type),
name);
}
Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
BasicType basic_type,
ByteSize base_offset,
Value* index,
const char* name) {
return CreateArrayAddress(
arrayoop,
SharkType::to_arrayType(basic_type),
type2aelembytes(basic_type),
base_offset, index, name);
}
Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
BasicType basic_type,
Value* index,
const char* name) {
return CreateArrayAddress(
arrayoop, basic_type,
in_ByteSize(arrayOopDesc::base_offset_in_bytes(basic_type)),
index, name);
}
Type* SharkBuilder::make_type(char type, bool void_ok) {
switch (type) {
case 'c':
return SharkType::jbyte_type();
case 'i':
return SharkType::jint_type();
case 'l':
return SharkType::jlong_type();
case 'x':
return SharkType::intptr_type();
case 'f':
return SharkType::jfloat_type();
case 'd':
return SharkType::jdouble_type();
case 'C':
case 'I':
case 'L':
case 'X':
case 'F':
case 'D':
return PointerType::getUnqual(make_type(tolower(type), false));
case 'T':
return SharkType::thread_type();
case 'M':
return PointerType::getUnqual(SharkType::monitor_type());
case 'O':
return SharkType::oop_type();
case 'K':
return SharkType::klass_type();
case 'v':
assert(void_ok, "should be");
return SharkType::void_type();
case '1':
return SharkType::bit_type();
default:
ShouldNotReachHere();
}
}
FunctionType* SharkBuilder::make_ftype(const char* params,
const char* ret) {
std::vector<Type*> param_types;
for (const char* c = params; *c; c++)
param_types.push_back(make_type(*c, false));
assert(strlen(ret) == 1, "should be");
Type *return_type = make_type(*ret, true);
return FunctionType::get(return_type, param_types, false);
}
Value* SharkBuilder::make_function(const char* name,
const char* params,
const char* ret) {
return SharkContext::current().get_external(name, make_ftype(params, ret));
}
Value* SharkBuilder::make_function(address func,
const char* params,
const char* ret) {
return CreateIntToPtr(
LLVMValue::intptr_constant((intptr_t) func),
PointerType::getUnqual(make_ftype(params, ret)));
}
Value* SharkBuilder::find_exception_handler() {
return make_function(
(address) SharkRuntime::find_exception_handler, "TIi", "i");
}
Value* SharkBuilder::monitorenter() {
return make_function((address) SharkRuntime::monitorenter, "TM", "v");
}
Value* SharkBuilder::monitorexit() {
return make_function((address) SharkRuntime::monitorexit, "TM", "v");
}
Value* SharkBuilder::new_instance() {
return make_function((address) SharkRuntime::new_instance, "Ti", "v");
}
Value* SharkBuilder::newarray() {
return make_function((address) SharkRuntime::newarray, "Tii", "v");
}
Value* SharkBuilder::anewarray() {
return make_function((address) SharkRuntime::anewarray, "Tii", "v");
}
Value* SharkBuilder::multianewarray() {
return make_function((address) SharkRuntime::multianewarray, "TiiI", "v");
}
Value* SharkBuilder::register_finalizer() {
return make_function((address) SharkRuntime::register_finalizer, "TO", "v");
}
Value* SharkBuilder::safepoint() {
return make_function((address) SafepointSynchronize::block, "T", "v");
}
Value* SharkBuilder::throw_ArithmeticException() {
return make_function(
(address) SharkRuntime::throw_ArithmeticException, "TCi", "v");
}
Value* SharkBuilder::throw_ArrayIndexOutOfBoundsException() {
return make_function(
(address) SharkRuntime::throw_ArrayIndexOutOfBoundsException, "TCii", "v");
}
Value* SharkBuilder::throw_ClassCastException() {
return make_function(
(address) SharkRuntime::throw_ClassCastException, "TCi", "v");
}
Value* SharkBuilder::throw_NullPointerException() {
return make_function(
(address) SharkRuntime::throw_NullPointerException, "TCi", "v");
}
Value* SharkBuilder::f2i() {
return make_function((address) SharedRuntime::f2i, "f", "i");
}
Value* SharkBuilder::f2l() {
return make_function((address) SharedRuntime::f2l, "f", "l");
}
Value* SharkBuilder::d2i() {
return make_function((address) SharedRuntime::d2i, "d", "i");
}
Value* SharkBuilder::d2l() {
return make_function((address) SharedRuntime::d2l, "d", "l");
}
Value* SharkBuilder::is_subtype_of() {
return make_function((address) SharkRuntime::is_subtype_of, "KK", "c");
}
Value* SharkBuilder::current_time_millis() {
return make_function((address) os::javaTimeMillis, "", "l");
}
Value* SharkBuilder::sin() {
return make_function("llvm.sin.f64", "d", "d");
}
Value* SharkBuilder::cos() {
return make_function("llvm.cos.f64", "d", "d");
}
Value* SharkBuilder::tan() {
return make_function((address) ::tan, "d", "d");
}
Value* SharkBuilder::atan2() {
return make_function((address) ::atan2, "dd", "d");
}
Value* SharkBuilder::sqrt() {
return make_function("llvm.sqrt.f64", "d", "d");
}
Value* SharkBuilder::log() {
return make_function("llvm.log.f64", "d", "d");
}
Value* SharkBuilder::log10() {
return make_function("llvm.log10.f64", "d", "d");
}
Value* SharkBuilder::pow() {
return make_function("llvm.pow.f64", "dd", "d");
}
Value* SharkBuilder::exp() {
return make_function("llvm.exp.f64", "d", "d");
}
Value* SharkBuilder::fabs() {
return make_function((address) ::fabs, "d", "d");
}
Value* SharkBuilder::unsafe_field_offset_to_byte_offset() {
extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
return make_function((address) Unsafe_field_offset_to_byte_offset, "l", "l");
}
Value* SharkBuilder::osr_migration_end() {
return make_function((address) SharedRuntime::OSR_migration_end, "C", "v");
}
Value* SharkBuilder::throw_StackOverflowError() {
return make_function((address) ZeroStack::handle_overflow, "T", "v");
}
Value* SharkBuilder::uncommon_trap() {
return make_function((address) SharkRuntime::uncommon_trap, "Ti", "i");
}
Value* SharkBuilder::deoptimized_entry_point() {
return make_function((address) CppInterpreter::main_loop, "iT", "v");
}
Value* SharkBuilder::check_special_condition_for_native_trans() {
return make_function(
(address) JavaThread::check_special_condition_for_native_trans,
"T", "v");
}
Value* SharkBuilder::frame_address() {
return make_function("llvm.frameaddress", "i", "C");
}
Value* SharkBuilder::memset() {
return make_function("llvm.memset.p0i8.i32", "Cciii", "v");
}
Value* SharkBuilder::unimplemented() {
return make_function((address) report_unimplemented, "Ci", "v");
}
Value* SharkBuilder::should_not_reach_here() {
return make_function((address) report_should_not_reach_here, "Ci", "v");
}
Value* SharkBuilder::dump() {
return make_function((address) SharkRuntime::dump, "Cx", "v");
}
CallInst* SharkBuilder::CreateGetFrameAddress() {
return CreateCall(frame_address(), LLVMValue::jint_constant(0));
}
CallInst* SharkBuilder::CreateMemset(Value* dst,
Value* value,
Value* len,
Value* align) {
return CreateCall5(memset(), dst, value, len, align,
LLVMValue::jint_constant(0));
}
CallInst* SharkBuilder::CreateUnimplemented(const char* file, int line) {
return CreateCall2(
unimplemented(),
CreateIntToPtr(
LLVMValue::intptr_constant((intptr_t) file),
PointerType::getUnqual(SharkType::jbyte_type())),
LLVMValue::jint_constant(line));
}
CallInst* SharkBuilder::CreateShouldNotReachHere(const char* file, int line) {
return CreateCall2(
should_not_reach_here(),
CreateIntToPtr(
LLVMValue::intptr_constant((intptr_t) file),
PointerType::getUnqual(SharkType::jbyte_type())),
LLVMValue::jint_constant(line));
}
#ifndef PRODUCT
CallInst* SharkBuilder::CreateDump(Value* value) {
const char *name;
if (value->hasName())
name = strdup(value->getName().str().c_str());
else
name = "unnamed_value";
if (isa<PointerType>(value->getType()))
value = CreatePtrToInt(value, SharkType::intptr_type());
else if (value->getType()->
isIntegerTy()
)
value = CreateIntCast(value, SharkType::intptr_type(), false);
else
Unimplemented();
return CreateCall2(
dump(),
CreateIntToPtr(
LLVMValue::intptr_constant((intptr_t) name),
PointerType::getUnqual(SharkType::jbyte_type())),
value);
}
#endif // PRODUCT
void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
if (bs->kind() != BarrierSet::CardTableModRef)
Unimplemented();
CreateStore(
LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card),
CreateIntToPtr(
CreateAdd(
LLVMValue::intptr_constant(
(intptr_t) ((CardTableModRefBS *) bs)->byte_map_base),
CreateLShr(
CreatePtrToInt(field, SharkType::intptr_type()),
LLVMValue::intptr_constant(CardTableModRefBS::card_shift))),
PointerType::getUnqual(SharkType::jbyte_type())));
}
Value* SharkBuilder::code_buffer_address(int offset) {
return CreateAdd(
code_buffer()->base_pc(),
LLVMValue::intptr_constant(offset));
}
Value* SharkBuilder::CreateInlineOop(jobject object, const char* name) {
return CreateLoad(
CreateIntToPtr(
code_buffer_address(code_buffer()->inline_oop(object)),
PointerType::getUnqual(SharkType::oop_type())),
name);
}
Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) {
assert(metadata != NULL, "inlined metadata must not be NULL");
assert(metadata->is_metaspace_object(), "sanity check");
return CreateLoad(
CreateIntToPtr(
code_buffer_address(code_buffer()->inline_Metadata(metadata)),
PointerType::getUnqual(type)),
name);
}
Value* SharkBuilder::CreateInlineData(void* data,
size_t size,
Type* type,
const char* name) {
return CreateIntToPtr(
code_buffer_address(code_buffer()->inline_data(data, size)),
type,
name);
}
BasicBlock* SharkBuilder::GetBlockInsertionPoint() const {
BasicBlock *cur = GetInsertBlock();
Function::iterator iter = cur->getParent()->begin();
Function::iterator end = cur->getParent()->end();
while (iter != end) {
iter++;
if (&*iter == cur) {
iter++;
break;
}
}
if (iter == end)
return NULL;
else
return iter;
}
BasicBlock* SharkBuilder::CreateBlock(BasicBlock* ip, const char* name) const {
return BasicBlock::Create(
SharkContext::current(), name, GetInsertBlock()->getParent(), ip);
}
LoadInst* SharkBuilder::CreateAtomicLoad(Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
return Insert(new LoadInst(ptr, name, isVolatile, align, ordering, synchScope), name);
}
StoreInst* SharkBuilder::CreateAtomicStore(Value* val, Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
return Insert(new StoreInst(val, ptr, isVolatile, align, ordering, synchScope), name);
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkBuilder.hpp
#ifndef SHARE_VM_SHARK_SHARKBUILDER_HPP
#define SHARE_VM_SHARK_SHARKBUILDER_HPP
#include "ci/ciType.hpp"
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkCodeBuffer.hpp"
#include "shark/sharkEntry.hpp"
#include "shark/sharkType.hpp"
#include "shark/sharkValue.hpp"
#include "utilities/debug.hpp"
#include "utilities/sizes.hpp"
class SharkBuilder : public llvm::IRBuilder<> {
friend class SharkCompileInvariants;
public:
SharkBuilder(SharkCodeBuffer* code_buffer);
private:
SharkCodeBuffer* _code_buffer;
protected:
SharkCodeBuffer* code_buffer() const {
return _code_buffer;
}
public:
llvm::LoadInst* CreateAtomicLoad(llvm::Value* ptr,
unsigned align = HeapWordSize,
llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
llvm::SynchronizationScope synchScope = llvm::CrossThread,
bool isVolatile = true,
const char *name = "");
llvm::StoreInst* CreateAtomicStore(llvm::Value *val,
llvm::Value *ptr,
unsigned align = HeapWordSize,
llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
llvm::SynchronizationScope SynchScope = llvm::CrossThread,
bool isVolatile = true,
const char *name = "");
public:
llvm::Value* CreateAddressOfStructEntry(llvm::Value* base,
ByteSize offset,
llvm::Type* type,
const char *name = "");
llvm::LoadInst* CreateValueOfStructEntry(llvm::Value* base,
ByteSize offset,
llvm::Type* type,
const char *name = "");
public:
llvm::LoadInst* CreateArrayLength(llvm::Value* arrayoop);
llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
llvm::Type* element_type,
int element_bytes,
ByteSize base_offset,
llvm::Value* index,
const char* name = "");
llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
BasicType basic_type,
ByteSize base_offset,
llvm::Value* index,
const char* name = "");
llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
BasicType basic_type,
llvm::Value* index,
const char* name = "");
private:
static llvm::Type* make_type(char type, bool void_ok);
static llvm::FunctionType* make_ftype(const char* params,
const char* ret);
llvm::Value* make_function(const char* name,
const char* params,
const char* ret);
llvm::Value* make_function(address func,
const char* params,
const char* ret);
public:
llvm::Value* find_exception_handler();
llvm::Value* monitorenter();
llvm::Value* monitorexit();
llvm::Value* new_instance();
llvm::Value* newarray();
llvm::Value* anewarray();
llvm::Value* multianewarray();
llvm::Value* register_finalizer();
llvm::Value* safepoint();
llvm::Value* throw_ArithmeticException();
llvm::Value* throw_ArrayIndexOutOfBoundsException();
llvm::Value* throw_ClassCastException();
llvm::Value* throw_NullPointerException();
public:
llvm::Value* f2i();
llvm::Value* f2l();
llvm::Value* d2i();
llvm::Value* d2l();
llvm::Value* is_subtype_of();
llvm::Value* current_time_millis();
llvm::Value* sin();
llvm::Value* cos();
llvm::Value* tan();
llvm::Value* atan2();
llvm::Value* sqrt();
llvm::Value* log();
llvm::Value* log10();
llvm::Value* pow();
llvm::Value* exp();
llvm::Value* fabs();
llvm::Value* unsafe_field_offset_to_byte_offset();
llvm::Value* osr_migration_end();
public:
llvm::Value* throw_StackOverflowError();
llvm::Value* uncommon_trap();
llvm::Value* deoptimized_entry_point();
public:
llvm::Value* check_special_condition_for_native_trans();
private:
llvm::Value* cmpxchg_int();
llvm::Value* cmpxchg_ptr();
llvm::Value* frame_address();
llvm::Value* memset();
llvm::Value* unimplemented();
llvm::Value* should_not_reach_here();
llvm::Value* dump();
public:
llvm::CallInst* CreateGetFrameAddress();
llvm::CallInst* CreateMemset(llvm::Value* dst,
llvm::Value* value,
llvm::Value* len,
llvm::Value* align);
llvm::CallInst* CreateUnimplemented(const char* file, int line);
llvm::CallInst* CreateShouldNotReachHere(const char* file, int line);
NOT_PRODUCT(llvm::CallInst* CreateDump(llvm::Value* value));
public:
void CreateUpdateBarrierSet(BarrierSet* bs, llvm::Value* field);
public:
llvm::Value* code_buffer_address(int offset);
llvm::Value* CreateInlineOop(jobject object, const char* name = "");
llvm::Value* CreateInlineOop(ciObject* object, const char* name = "") {
return CreateInlineOop(object->constant_encoding(), name);
}
llvm::Value* CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name = "");
llvm::Value* CreateInlineMetadata(ciMetadata* metadata, llvm::PointerType* type, const char* name = "") {
return CreateInlineMetadata(metadata->constant_encoding(), type, name);
}
llvm::Value* CreateInlineData(void* data,
size_t size,
llvm::Type* type,
const char* name = "");
public:
llvm::BasicBlock* GetBlockInsertionPoint() const;
llvm::BasicBlock* CreateBlock(llvm::BasicBlock* ip,
const char* name="") const;
};
#endif // SHARE_VM_SHARK_SHARKBUILDER_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkCacheDecache.cpp
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "code/debugInfoRec.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkCacheDecache.hpp"
#include "shark/sharkFunction.hpp"
#include "shark/sharkState.hpp"
using namespace llvm;
void SharkDecacher::start_frame() {
_pc_offset = code_buffer()->create_unique_offset();
_oopmap = new OopMap(
oopmap_slot_munge(stack()->oopmap_frame_size()),
oopmap_slot_munge(arg_size()));
debug_info()->add_safepoint(pc_offset(), oopmap());
}
void SharkDecacher::start_stack(int stack_depth) {
_exparray = new GrowableArray<ScopeValue*>(stack_depth);
stack()->CreateStoreStackPointer(
builder()->CreatePtrToInt(
stack()->slot_addr(
stack()->stack_slots_offset() + max_stack() - stack_depth),
SharkType::intptr_type()));
}
void SharkDecacher::process_stack_slot(int index,
SharkValue** addr,
int offset) {
SharkValue *value = *addr;
if (stack_slot_needs_write(index, value)) {
write_value_to_frame(
SharkType::to_stackType(value->basic_type()),
value->generic_value(),
adjusted_offset(value, offset));
}
if (stack_slot_needs_oopmap(index, value)) {
oopmap()->set_oop(slot2reg(offset));
}
if (stack_slot_needs_debuginfo(index, value)) {
exparray()->append(slot2lv(offset, stack_location_type(index, addr)));
}
}
void SharkDecacher::start_monitors(int num_monitors) {
_monarray = new GrowableArray<MonitorValue*>(num_monitors);
}
void SharkDecacher::process_monitor(int index, int box_offset, int obj_offset) {
oopmap()->set_oop(slot2reg(obj_offset));
monarray()->append(new MonitorValue(
slot2lv (obj_offset, Location::oop),
slot2loc(box_offset, Location::normal)));
}
void SharkDecacher::process_oop_tmp_slot(Value** value, int offset) {
if (*value) {
write_value_to_frame(
SharkType::oop_type(),
offset);
oopmap()->set_oop(slot2reg(offset));
}
}
void SharkDecacher::process_method_slot(Value** value, int offset) {
write_value_to_frame(
SharkType::Method_type(),
offset);
}
void SharkDecacher::process_pc_slot(int offset) {
builder()->CreateStore(
builder()->code_buffer_address(pc_offset()),
stack()->slot_addr(offset));
}
void SharkDecacher::start_locals() {
_locarray = new GrowableArray<ScopeValue*>(max_locals());}
void SharkDecacher::process_local_slot(int index,
SharkValue** addr,
int offset) {
SharkValue *value = *addr;
if (local_slot_needs_write(index, value)) {
write_value_to_frame(
SharkType::to_stackType(value->basic_type()),
value->generic_value(),
adjusted_offset(value, offset));
}
if (local_slot_needs_oopmap(index, value)) {
oopmap()->set_oop(slot2reg(offset));
}
if (local_slot_needs_debuginfo(index, value)) {
locarray()->append(slot2lv(offset, local_location_type(index, addr)));
}
}
void SharkDecacher::end_frame() {
debug_info()->describe_scope(
pc_offset(),
target(),
bci(),
true,
false,
false,
debug_info()->create_scope_values(locarray()),
debug_info()->create_scope_values(exparray()),
debug_info()->create_monitor_values(monarray()));
debug_info()->end_safepoint(pc_offset());
}
void SharkCacher::process_stack_slot(int index,
SharkValue** addr,
int offset) {
SharkValue *value = *addr;
if (stack_slot_needs_read(index, value)) {
value->type(),
read_value_from_frame(
SharkType::to_stackType(value->basic_type()),
adjusted_offset(value, offset)),
value->zero_checked());
}
}
void SharkOSREntryCacher::process_monitor(int index,
int box_offset,
int obj_offset) {
int src_offset = max_locals() + index * 2;
builder()->CreateStore(
builder()->CreateLoad(
CreateAddressOfOSRBufEntry(src_offset, SharkType::intptr_type())),
stack()->slot_addr(box_offset, SharkType::intptr_type()));
builder()->CreateStore(
builder()->CreateLoad(
CreateAddressOfOSRBufEntry(src_offset + 1, SharkType::oop_type())),
stack()->slot_addr(obj_offset, SharkType::oop_type()));
}
void SharkCacher::process_oop_tmp_slot(Value** value, int offset) {
if (*value)
}
void SharkCacher::process_method_slot(Value** value, int offset) {
}
void SharkFunctionEntryCacher::process_method_slot(Value** value, int offset) {
}
void SharkCacher::process_local_slot(int index,
SharkValue** addr,
int offset) {
SharkValue *value = *addr;
if (local_slot_needs_read(index, value)) {
value->type(),
read_value_from_frame(
SharkType::to_stackType(value->basic_type()),
adjusted_offset(value, offset)),
value->zero_checked());
}
}
Value* SharkOSREntryCacher::CreateAddressOfOSRBufEntry(int offset,
Type* type) {
Value *result = builder()->CreateStructGEP(osr_buf(), offset);
if (type != SharkType::intptr_type())
result = builder()->CreateBitCast(result, PointerType::getUnqual(type));
return result;
}
void SharkOSREntryCacher::process_local_slot(int index,
SharkValue** addr,
int offset) {
SharkValue *value = *addr;
if (local_slot_needs_read(index, value)) {
value->type(),
builder()->CreateLoad(
CreateAddressOfOSRBufEntry(
adjusted_offset(value, max_locals() - 1 - index),
SharkType::to_stackType(value->basic_type()))),
value->zero_checked());
}
}
void SharkDecacher::write_value_to_frame(Type* type,
Value* value,
int offset) {
builder()->CreateStore(value, stack()->slot_addr(offset, type));
}
Value* SharkCacher::read_value_from_frame(Type* type, int offset) {
return builder()->CreateLoad(stack()->slot_addr(offset, type));
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkCacheDecache.hpp
#ifndef SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
#define SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
#include "ci/ciMethod.hpp"
#include "code/debugInfoRec.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkFunction.hpp"
#include "shark/sharkStateScanner.hpp"
class SharkCacherDecacher : public SharkStateScanner {
protected:
SharkCacherDecacher(SharkFunction* function)
: SharkStateScanner(function) {}
protected:
static int adjusted_offset(SharkValue* value, int offset) {
if (value->is_two_word())
offset--;
return offset;
}
};
class SharkDecacher : public SharkCacherDecacher {
protected:
SharkDecacher(SharkFunction* function, int bci)
: SharkCacherDecacher(function), _bci(bci) {}
private:
int _bci;
protected:
int bci() const {
return _bci;
}
private:
int _pc_offset;
OopMap* _oopmap;
GrowableArray<ScopeValue*>* _exparray;
GrowableArray<MonitorValue*>* _monarray;
GrowableArray<ScopeValue*>* _locarray;
private:
int pc_offset() const {
return _pc_offset;
}
OopMap* oopmap() const {
return _oopmap;
}
GrowableArray<ScopeValue*>* exparray() const {
return _exparray;
}
GrowableArray<MonitorValue*>* monarray() const {
return _monarray;
}
GrowableArray<ScopeValue*>* locarray() const {
return _locarray;
}
protected:
void start_frame();
void start_stack(int stack_depth);
void process_stack_slot(int index, SharkValue** value, int offset);
void start_monitors(int num_monitors);
void process_monitor(int index, int box_offset, int obj_offset);
void process_oop_tmp_slot(llvm::Value** value, int offset);
void process_method_slot(llvm::Value** value, int offset);
void process_pc_slot(int offset);
void start_locals();
void process_local_slot(int index, SharkValue** value, int offset);
void end_frame();
private:
static int oopmap_slot_munge(int offset) {
return SharkStack::oopmap_slot_munge(offset);
}
static VMReg slot2reg(int offset) {
return SharkStack::slot2reg(offset);
}
static Location slot2loc(int offset, Location::Type type) {
return Location::new_stk_loc(type, offset * wordSize);
}
static LocationValue* slot2lv(int offset, Location::Type type) {
return new LocationValue(slot2loc(offset, type));
}
static Location::Type location_type(SharkValue** addr, bool maybe_two_word) {
SharkValue *value = *addr;
if (value) {
if (value->is_jobject())
return Location::oop;
#ifdef _LP64
if (value->is_two_word())
return Location::invalid;
#endif // _LP64
return Location::normal;
}
else {
if (maybe_two_word) {
value = *(addr - 1);
if (value && value->is_two_word()) {
#ifdef _LP64
if (value->is_jlong())
return Location::lng;
if (value->is_jdouble())
return Location::dbl;
ShouldNotReachHere();
#else
return Location::normal;
#endif // _LP64
}
}
return Location::invalid;
}
}
protected:
virtual bool stack_slot_needs_write(int index, SharkValue* value) = 0;
virtual bool stack_slot_needs_oopmap(int index, SharkValue* value) = 0;
virtual bool stack_slot_needs_debuginfo(int index, SharkValue* value) = 0;
static Location::Type stack_location_type(int index, SharkValue** addr) {
return location_type(addr, *addr == NULL);
}
protected:
virtual bool local_slot_needs_write(int index, SharkValue* value) = 0;
virtual bool local_slot_needs_oopmap(int index, SharkValue* value) = 0;
virtual bool local_slot_needs_debuginfo(int index, SharkValue* value) = 0;
static Location::Type local_location_type(int index, SharkValue** addr) {
return location_type(addr, index > 0);
}
protected:
void write_value_to_frame(llvm::Type* type,
llvm::Value* value,
int offset);
};
class SharkJavaCallDecacher : public SharkDecacher {
public:
SharkJavaCallDecacher(SharkFunction* function, int bci, ciMethod* callee)
: SharkDecacher(function, bci), _callee(callee) {}
private:
ciMethod* _callee;
protected:
ciMethod* callee() const {
return _callee;
}
protected:
bool stack_slot_needs_write(int index, SharkValue* value) {
return value && (index < callee()->arg_size() || value->is_jobject());
}
bool stack_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject() && index >= callee()->arg_size();
}
bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
return index >= callee()->arg_size();
}
protected:
bool local_slot_needs_write(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool local_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool local_slot_needs_debuginfo(int index, SharkValue* value) {
return true;
}
};
class SharkVMCallDecacher : public SharkDecacher {
public:
SharkVMCallDecacher(SharkFunction* function, int bci)
: SharkDecacher(function, bci) {}
protected:
bool stack_slot_needs_write(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool stack_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
return true;
}
protected:
bool local_slot_needs_write(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool local_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool local_slot_needs_debuginfo(int index, SharkValue* value) {
return true;
}
};
class SharkTrapDecacher : public SharkDecacher {
public:
SharkTrapDecacher(SharkFunction* function, int bci)
: SharkDecacher(function, bci) {}
protected:
bool stack_slot_needs_write(int index, SharkValue* value) {
return value != NULL;
}
bool stack_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
return true;
}
protected:
bool local_slot_needs_write(int index, SharkValue* value) {
return value != NULL;
}
bool local_slot_needs_oopmap(int index, SharkValue* value) {
return value && value->is_jobject();
}
bool local_slot_needs_debuginfo(int index, SharkValue* value) {
return true;
}
};
class SharkCacher : public SharkCacherDecacher {
protected:
SharkCacher(SharkFunction* function)
: SharkCacherDecacher(function) {}
protected:
void process_stack_slot(int index, SharkValue** value, int offset);
void process_oop_tmp_slot(llvm::Value** value, int offset);
virtual void process_method_slot(llvm::Value** value, int offset);
virtual void process_local_slot(int index, SharkValue** value, int offset);
protected:
virtual bool stack_slot_needs_read(int index, SharkValue* value) = 0;
protected:
virtual bool local_slot_needs_read(int index, SharkValue* value) {
return value && value->is_jobject();
}
protected:
llvm::Value* read_value_from_frame(llvm::Type* type, int offset);
};
class SharkJavaCallCacher : public SharkCacher {
public:
SharkJavaCallCacher(SharkFunction* function, ciMethod* callee)
: SharkCacher(function), _callee(callee) {}
private:
ciMethod* _callee;
protected:
ciMethod* callee() const {
return _callee;
}
protected:
bool stack_slot_needs_read(int index, SharkValue* value) {
return value && (index < callee()->return_type()->size() ||
value->is_jobject());
}
};
class SharkVMCallCacher : public SharkCacher {
public:
SharkVMCallCacher(SharkFunction* function)
: SharkCacher(function) {}
protected:
bool stack_slot_needs_read(int index, SharkValue* value) {
return value && value->is_jobject();
}
};
class SharkFunctionEntryCacher : public SharkCacher {
public:
SharkFunctionEntryCacher(SharkFunction* function, llvm::Value* method)
: SharkCacher(function), _method(method) {}
private:
llvm::Value* _method;
private:
llvm::Value* method() const {
return _method;
}
protected:
void process_method_slot(llvm::Value** value, int offset);
protected:
bool stack_slot_needs_read(int index, SharkValue* value) {
ShouldNotReachHere(); // entry block shouldn't have stack
}
protected:
bool local_slot_needs_read(int index, SharkValue* value) {
return value != NULL;
}
};
class SharkNormalEntryCacher : public SharkFunctionEntryCacher {
public:
SharkNormalEntryCacher(SharkFunction* function, llvm::Value* method)
: SharkFunctionEntryCacher(function, method) {}
};
class SharkOSREntryCacher : public SharkFunctionEntryCacher {
public:
SharkOSREntryCacher(SharkFunction* function,
llvm::Value* method,
llvm::Value* osr_buf)
: SharkFunctionEntryCacher(function, method),
_osr_buf(
builder()->CreateBitCast(
osr_buf,
llvm::PointerType::getUnqual(
llvm::ArrayType::get(
SharkType::intptr_type(),
max_locals() + max_monitors() * 2)))) {}
private:
llvm::Value* _osr_buf;
private:
llvm::Value* osr_buf() const {
return _osr_buf;
}
protected:
void process_monitor(int index, int box_offset, int obj_offset);
void process_local_slot(int index, SharkValue** value, int offset);
private:
llvm::Value* CreateAddressOfOSRBufEntry(int offset, llvm::Type* type);
};
#endif // SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkCodeBuffer.hpp
#ifndef SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
#define SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
#include "asm/codeBuffer.hpp"
#include "memory/allocation.hpp"
#include "shark/llvmHeaders.hpp"
class SharkCodeBuffer : public StackObj {
public:
SharkCodeBuffer(MacroAssembler* masm)
: _masm(masm), _base_pc(NULL) {}
private:
MacroAssembler* _masm;
llvm::Value* _base_pc;
private:
MacroAssembler* masm() const {
return _masm;
}
public:
llvm::Value* base_pc() const {
return _base_pc;
}
void set_base_pc(llvm::Value* base_pc) {
assert(_base_pc == NULL, "only do this once");
_base_pc = base_pc;
}
public:
void* malloc(size_t size) const {
masm()->align(BytesPerWord);
void *result = masm()->pc();
masm()->advance(size);
return result;
}
public:
int create_unique_offset() const {
int offset = masm()->offset();
masm()->advance(1);
return offset;
}
public:
int inline_oop(jobject object) const {
masm()->align(BytesPerWord);
int offset = masm()->offset();
masm()->store_oop(object);
return offset;
}
int inline_Metadata(Metadata* metadata) const {
masm()->align(BytesPerWord);
int offset = masm()->offset();
masm()->store_Metadata(metadata);
return offset;
}
public:
int inline_data(void *src, size_t size) const {
masm()->align(BytesPerWord);
int offset = masm()->offset();
void *dst = masm()->pc();
masm()->advance(size);
memcpy(dst, src, size);
return offset;
}
};
#endif // SHARE_VM_SHARK_SHARKCODEBUFFER_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkCompiler.cpp
#include "precompiled.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciMethod.hpp"
#include "code/debugInfoRec.hpp"
#include "code/dependencies.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/oopRecorder.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/oopMap.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkCodeBuffer.hpp"
#include "shark/sharkCompiler.hpp"
#include "shark/sharkContext.hpp"
#include "shark/sharkEntry.hpp"
#include "shark/sharkFunction.hpp"
#include "shark/sharkMemoryManager.hpp"
#include "shark/sharkNativeWrapper.hpp"
#include "shark/shark_globals.hpp"
#include "utilities/debug.hpp"
#include <fnmatch.h>
using namespace llvm;
namespace {
cl::opt<std::string>
MCPU("mcpu");
cl::list<std::string>
MAttrs("mattr",
cl::CommaSeparated);
}
SharkCompiler::SharkCompiler()
: AbstractCompiler() {
_execution_engine_lock = new Monitor(Mutex::leaf, "SharkExecutionEngineLock");
MutexLocker locker(execution_engine_lock());
if (!llvm_start_multithreaded())
fatal("llvm_start_multithreaded() failed");
InitializeNativeTarget();
InitializeNativeTargetAsmPrinter();
_normal_context = new SharkContext("normal");
_native_context = new SharkContext("native");
_memory_manager = new SharkMemoryManager();
StringMap<bool> Features;
bool gotCpuFeatures = llvm::sys::getHostCPUFeatures(Features);
std::string cpu("-mcpu=" + llvm::sys::getHostCPUName());
std::vector<const char*> args;
args.push_back(""); // program name
args.push_back(cpu.c_str());
std::string mattr("-mattr=");
if(gotCpuFeatures){
for(StringMap<bool>::iterator I = Features.begin(),
E = Features.end(); I != E; ++I){
if(I->second){
std::string attr(I->first());
mattr+="+"+attr+",";
}
}
args.push_back(mattr.c_str());
}
args.push_back(0); // terminator
cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]);
std::string ErrorMsg;
EngineBuilder builder(_normal_context->module());
builder.setMCPU(MCPU);
builder.setMAttrs(MAttrs);
builder.setJITMemoryManager(memory_manager());
builder.setEngineKind(EngineKind::JIT);
builder.setErrorStr(&ErrorMsg);
if (! fnmatch(SharkOptimizationLevel, "None", 0)) {
tty->print_cr("Shark optimization level set to: None");
builder.setOptLevel(llvm::CodeGenOpt::None);
} else if (! fnmatch(SharkOptimizationLevel, "Less", 0)) {
tty->print_cr("Shark optimization level set to: Less");
builder.setOptLevel(llvm::CodeGenOpt::Less);
} else if (! fnmatch(SharkOptimizationLevel, "Aggressive", 0)) {
tty->print_cr("Shark optimization level set to: Aggressive");
builder.setOptLevel(llvm::CodeGenOpt::Aggressive);
} // else Default is selected by, well, default :-)
_execution_engine = builder.create();
if (!execution_engine()) {
if (!ErrorMsg.empty())
printf("Error while creating Shark JIT: %s\n",ErrorMsg.c_str());
else
printf("Unknown error while creating Shark JIT\n");
exit(1);
}
execution_engine()->addModule(_native_context->module());
set_state(initialized);
}
void SharkCompiler::initialize() {
ShouldNotCallThis();
}
void SharkCompiler::compile_method(ciEnv* env,
ciMethod* target,
int entry_bci) {
assert(is_initialized(), "should be");
ResourceMark rm;
const char *name = methodname(
target->holder()->name()->as_utf8(), target->name()->as_utf8());
ciTypeFlow *flow;
if (entry_bci == InvocationEntryBci)
flow = target->get_flow_analysis();
else
flow = target->get_osr_flow_analysis(entry_bci);
if (flow->failing())
return;
if (SharkPrintTypeflowOf != NULL) {
if (!fnmatch(SharkPrintTypeflowOf, name, 0))
flow->print_on(tty);
}
Arena arena;
env->set_oop_recorder(new OopRecorder(&arena));
OopMapSet oopmaps;
env->set_debug_info(new DebugInformationRecorder(env->oop_recorder()));
env->debug_info()->set_oopmaps(&oopmaps);
env->set_dependencies(new Dependencies(env));
CodeBuffer hscb("Shark", 256 * K, 64 * K);
hscb.initialize_oop_recorder(env->oop_recorder());
MacroAssembler *masm = new MacroAssembler(&hscb);
SharkCodeBuffer cb(masm);
SharkBuilder builder(&cb);
SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
Function *function = SharkFunction::build(env, &builder, flow, name);
if (env->failing()) {
return;
}
{
ThreadInVMfromNative tiv(JavaThread::current());
generate_native_code(entry, function, name);
}
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Deopt, 0);
offsets.set_value(CodeOffsets::Exceptions, 0);
offsets.set_value(CodeOffsets::Verified_Entry,
target->is_static() ? 0 : wordSize);
ExceptionHandlerTable handler_table;
ImplicitExceptionTable inc_table;
env->register_method(target,
entry_bci,
&offsets,
0,
&hscb,
0,
&oopmaps,
&handler_table,
&inc_table,
this,
env->comp_level(),
false,
false);
}
nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm,
methodHandle target,
int compile_id,
BasicType* arg_types,
BasicType return_type) {
assert(is_initialized(), "should be");
ResourceMark rm;
const char *name = methodname(
target->klass_name()->as_utf8(), target->name()->as_utf8());
SharkCodeBuffer cb(masm);
SharkBuilder builder(&cb);
SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
SharkNativeWrapper *wrapper = SharkNativeWrapper::build(
&builder, target, name, arg_types, return_type);
generate_native_code(entry, wrapper->function(), name);
return nmethod::new_native_nmethod(target,
compile_id,
masm->code(),
0,
0,
wrapper->frame_size(),
wrapper->receiver_offset(),
wrapper->lock_offset(),
wrapper->oop_maps());
}
void SharkCompiler::generate_native_code(SharkEntry* entry,
Function* function,
const char* name) {
if (SharkPrintBitcodeOf != NULL) {
if (!fnmatch(SharkPrintBitcodeOf, name, 0))
function->dump();
}
if (SharkVerifyFunction != NULL) {
if (!fnmatch(SharkVerifyFunction, name, 0)) {
verifyFunction(*function);
}
}
address code = NULL;
context()->add_function(function);
{
MutexLocker locker(execution_engine_lock());
free_queued_methods();
#ifndef NDEBUG
#if SHARK_LLVM_VERSION <= 31
#define setCurrentDebugType SetCurrentDebugType
#endif
if (SharkPrintAsmOf != NULL) {
if (!fnmatch(SharkPrintAsmOf, name, 0)) {
llvm::setCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit"));
llvm::DebugFlag = true;
}
else {
llvm::setCurrentDebugType("");
llvm::DebugFlag = false;
}
}
#ifdef setCurrentDebugType
#undef setCurrentDebugType
#endif
#endif // !NDEBUG
memory_manager()->set_entry_for_function(function, entry);
code = (address) execution_engine()->getPointerToFunction(function);
}
assert(code != NULL, "code must be != NULL");
entry->set_entry_point(code);
entry->set_function(function);
entry->set_context(context());
address code_start = entry->code_start();
address code_limit = entry->code_limit();
if (JvmtiExport::should_post_dynamic_code_generated())
JvmtiExport::post_dynamic_code_generated(name, code_start, code_limit);
if (SharkTraceInstalls) {
tty->print_cr(
" [%p-%p): %s (%d bytes code)",
code_start, code_limit, name, code_limit - code_start);
}
}
void SharkCompiler::free_compiled_method(address code) {
assert(Thread::current()->is_Compiler_thread(), "must be called by compiler thread");
assert_locked_or_safepoint(CodeCache_lock);
SharkEntry *entry = (SharkEntry *) code;
entry->context()->push_to_free_queue(entry->function());
}
void SharkCompiler::free_queued_methods() {
assert(execution_engine_lock()->owned_by_self(), "should be");
while (true) {
Function *function = context()->pop_from_free_queue();
if (function == NULL)
break;
execution_engine()->freeMachineCodeForFunction(function);
function->eraseFromParent();
}
}
const char* SharkCompiler::methodname(const char* klass, const char* method) {
char *buf = NEW_RESOURCE_ARRAY(char, strlen(klass) + 2 + strlen(method) + 1);
char *dst = buf;
for (const char *c = klass; *c; c++) {
if (*c == '/')
else
}
for (const char *c = method; *c; c++) {
}
return buf;
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkCompiler.hpp
#ifndef SHARE_VM_SHARK_SHARKCOMPILER_HPP
#define SHARE_VM_SHARK_SHARKCOMPILER_HPP
#include "ci/ciEnv.hpp"
#include "ci/ciMethod.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compileBroker.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/sharkMemoryManager.hpp"
class SharkContext;
class SharkCompiler : public AbstractCompiler {
public:
SharkCompiler();
const char *name() { return "shark"; }
bool supports_native() { return true; }
bool supports_osr() { return true; }
bool can_compile_method(methodHandle method) {
return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
}
void initialize();
void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
nmethod* generate_native_wrapper(MacroAssembler* masm,
methodHandle target,
int compile_id,
BasicType* arg_types,
BasicType return_type);
void free_compiled_method(address code);
private:
SharkContext* _normal_context;
SharkContext* _native_context;
public:
SharkContext* context() const {
if (JavaThread::current()->is_Compiler_thread()) {
return _normal_context;
}
else {
assert(AdapterHandlerLibrary_lock->owned_by_self(), "should be");
return _native_context;
}
}
private:
Monitor* _execution_engine_lock;
SharkMemoryManager* _memory_manager;
llvm::ExecutionEngine* _execution_engine;
private:
Monitor* execution_engine_lock() const {
return _execution_engine_lock;
}
SharkMemoryManager* memory_manager() const {
assert(execution_engine_lock()->owned_by_self(), "should be");
return _memory_manager;
}
llvm::ExecutionEngine* execution_engine() const {
assert(execution_engine_lock()->owned_by_self(), "should be");
return _execution_engine;
}
public:
static SharkCompiler* compiler() {
AbstractCompiler *compiler =
CompileBroker::compiler(CompLevel_full_optimization);
assert(compiler->is_shark() && compiler->is_initialized(), "should be");
return (SharkCompiler *) compiler;
}
private:
static const char* methodname(const char* klass, const char* method);
void generate_native_code(SharkEntry* entry,
llvm::Function* function,
const char* name);
void free_queued_methods();
};
#endif // SHARE_VM_SHARK_SHARKCOMPILER_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkConstant.cpp
#include "precompiled.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciStreams.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkConstant.hpp"
#include "shark/sharkValue.hpp"
using namespace llvm;
SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) {
ciConstant constant = iter->get_constant();
ciType *type = NULL;
if (constant.basic_type() == T_OBJECT) {
ciEnv *env = ciEnv::current();
assert(constant.as_object()->klass() == env->String_klass()
|| constant.as_object()->klass() == env->Class_klass()
|| constant.as_object()->klass()->is_subtype_of(env->MethodType_klass())
|| constant.as_object()->klass()->is_subtype_of(env->MethodHandle_klass()), "should be");
type = constant.as_object()->klass();
}
return new SharkConstant(constant, type);
}
SharkConstant* SharkConstant::for_field(ciBytecodeStream *iter) {
bool will_link;
ciField *field = iter->get_field(will_link);
assert(will_link, "typeflow responsibility");
return new SharkConstant(field->constant_value(), field->type());
}
SharkConstant::SharkConstant(ciConstant constant, ciType *type) {
SharkValue *value = NULL;
switch (constant.basic_type()) {
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_SHORT:
case T_INT:
value = SharkValue::jint_constant(constant.as_int());
break;
case T_LONG:
value = SharkValue::jlong_constant(constant.as_long());
break;
case T_FLOAT:
value = SharkValue::jfloat_constant(constant.as_float());
break;
case T_DOUBLE:
value = SharkValue::jdouble_constant(constant.as_double());
break;
case T_OBJECT:
case T_ARRAY:
break;
case T_ILLEGAL:
_is_loaded = false;
return;
default:
tty->print_cr("Unhandled type %s", type2name(constant.basic_type()));
ShouldNotReachHere();
}
if (value) {
_value = value;
_is_loaded = true;
_is_nonzero = value->zero_checked();
_is_two_word = value->is_two_word();
return;
}
ciObject *object = constant.as_object();
assert(type != NULL, "shouldn't be");
if ((! object->is_null_object()) && object->klass() == ciEnv::current()->Class_klass()) {
ciKlass *klass = object->klass();
if (! klass->is_loaded()) {
_is_loaded = false;
return;
}
}
if (object->is_null_object() || ! object->can_be_constant() || ! object->is_loaded()) {
_is_loaded = false;
return;
}
_value = NULL;
_object = object;
_type = type;
_is_loaded = true;
_is_nonzero = true;
_is_two_word = false;
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkConstant.hpp
#ifndef SHARE_VM_SHARK_SHARKCONSTANT_HPP
#define SHARE_VM_SHARK_SHARKCONSTANT_HPP
#include "ci/ciStreams.hpp"
#include "memory/allocation.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkValue.hpp"
class SharkConstant : public ResourceObj {
public:
static SharkConstant* for_ldc(ciBytecodeStream* iter);
static SharkConstant* for_field(ciBytecodeStream* iter);
private:
SharkConstant(ciConstant constant, ciType* type);
private:
SharkValue* _value;
ciObject* _object;
ciType* _type;
bool _is_loaded;
bool _is_nonzero;
bool _is_two_word;
public:
bool is_loaded() const {
return _is_loaded;
}
bool is_nonzero() const {
assert(is_loaded(), "should be");
return _is_nonzero;
}
bool is_two_word() const {
assert(is_loaded(), "should be");
return _is_two_word;
}
public:
SharkValue* value(SharkBuilder* builder) {
assert(is_loaded(), "should be");
if (_value == NULL) {
_value = SharkValue::create_generic(
_type, builder->CreateInlineOop(_object), _is_nonzero);
}
return _value;
}
};
#endif // SHARE_VM_SHARK_SHARKCONSTANT_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkContext.cpp
#include "precompiled.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/sharkContext.hpp"
#include "utilities/globalDefinitions.hpp"
#include "memory/allocation.hpp"
using namespace llvm;
SharkContext::SharkContext(const char* name)
: LLVMContext(),
_free_queue(NULL) {
_module = new Module(name, *this);
_void_type = Type::getVoidTy(*this);
_bit_type = Type::getInt1Ty(*this);
_jbyte_type = Type::getInt8Ty(*this);
_jshort_type = Type::getInt16Ty(*this);
_jint_type = Type::getInt32Ty(*this);
_jlong_type = Type::getInt64Ty(*this);
_jfloat_type = Type::getFloatTy(*this);
_jdouble_type = Type::getDoubleTy(*this);
_itableOffsetEntry_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), itableOffsetEntry::size() * wordSize));
_Metadata_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(Metadata)));
_klass_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(Klass)));
_jniEnv_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(JNIEnv)));
_jniHandleBlock_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(JNIHandleBlock)));
_Method_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(Method)));
_monitor_type = ArrayType::get(
jbyte_type(), frame::interpreter_frame_monitor_size() * wordSize);
_oop_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(oopDesc)));
_thread_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(JavaThread)));
_zeroStack_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(ZeroStack)));
std::vector<Type*> params;
params.push_back(Method_type());
params.push_back(intptr_type());
params.push_back(thread_type());
_entry_point_type = FunctionType::get(jint_type(), params, false);
params.clear();
params.push_back(Method_type());
params.push_back(PointerType::getUnqual(jbyte_type()));
params.push_back(intptr_type());
params.push_back(thread_type());
_osr_entry_point_type = FunctionType::get(jint_type(), params, false);
for (int i = 0; i < T_CONFLICT; i++) {
switch (i) {
case T_BOOLEAN:
_to_stackType[i] = jint_type();
_to_arrayType[i] = jbyte_type();
break;
case T_BYTE:
_to_stackType[i] = jint_type();
_to_arrayType[i] = jbyte_type();
break;
case T_CHAR:
_to_stackType[i] = jint_type();
_to_arrayType[i] = jshort_type();
break;
case T_SHORT:
_to_stackType[i] = jint_type();
_to_arrayType[i] = jshort_type();
break;
case T_INT:
_to_stackType[i] = jint_type();
_to_arrayType[i] = jint_type();
break;
case T_LONG:
_to_stackType[i] = jlong_type();
_to_arrayType[i] = jlong_type();
break;
case T_FLOAT:
_to_stackType[i] = jfloat_type();
_to_arrayType[i] = jfloat_type();
break;
case T_DOUBLE:
_to_stackType[i] = jdouble_type();
_to_arrayType[i] = jdouble_type();
break;
case T_OBJECT:
case T_ARRAY:
_to_stackType[i] = oop_type();
_to_arrayType[i] = oop_type();
break;
case T_ADDRESS:
_to_stackType[i] = intptr_type();
_to_arrayType[i] = NULL;
break;
default:
_to_stackType[i] = NULL;
_to_arrayType[i] = NULL;
}
}
}
class SharkFreeQueueItem : public CHeapObj<mtNone> {
public:
SharkFreeQueueItem(llvm::Function* function, SharkFreeQueueItem *next)
: _function(function), _next(next) {}
private:
llvm::Function* _function;
SharkFreeQueueItem* _next;
public:
llvm::Function* function() const {
return _function;
}
SharkFreeQueueItem* next() const {
return _next;
}
};
void SharkContext::push_to_free_queue(Function* function) {
_free_queue = new SharkFreeQueueItem(function, _free_queue);
}
Function* SharkContext::pop_from_free_queue() {
if (_free_queue == NULL)
return NULL;
SharkFreeQueueItem *item = _free_queue;
Function *function = item->function();
_free_queue = item->next();
delete item;
return function;
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkContext.hpp
#ifndef SHARE_VM_SHARK_SHARKCONTEXT_HPP
#define SHARE_VM_SHARK_SHARKCONTEXT_HPP
#include "shark/llvmHeaders.hpp"
#include "shark/sharkCompiler.hpp"
class SharkFreeQueueItem;
class SharkContext : public llvm::LLVMContext {
public:
SharkContext(const char* name);
private:
llvm::Module* _module;
public:
llvm::Module* module() const {
return _module;
}
public:
static SharkContext& current() {
return *SharkCompiler::compiler()->context();
}
public:
void add_function(llvm::Function* function) const {
module()->getFunctionList().push_back(function);
}
llvm::Constant* get_external(const char* name,
llvm::FunctionType* sig) {
return module()->getOrInsertFunction(name, sig);
}
private:
llvm::Type* _void_type;
llvm::IntegerType* _bit_type;
llvm::IntegerType* _jbyte_type;
llvm::IntegerType* _jshort_type;
llvm::IntegerType* _jint_type;
llvm::IntegerType* _jlong_type;
llvm::Type* _jfloat_type;
llvm::Type* _jdouble_type;
public:
llvm::Type* void_type() const {
return _void_type;
}
llvm::IntegerType* bit_type() const {
return _bit_type;
}
llvm::IntegerType* jbyte_type() const {
return _jbyte_type;
}
llvm::IntegerType* jshort_type() const {
return _jshort_type;
}
llvm::IntegerType* jint_type() const {
return _jint_type;
}
llvm::IntegerType* jlong_type() const {
return _jlong_type;
}
llvm::Type* jfloat_type() const {
return _jfloat_type;
}
llvm::Type* jdouble_type() const {
return _jdouble_type;
}
llvm::IntegerType* intptr_type() const {
return LP64_ONLY(jlong_type()) NOT_LP64(jint_type());
}
private:
llvm::PointerType* _itableOffsetEntry_type;
llvm::PointerType* _jniEnv_type;
llvm::PointerType* _jniHandleBlock_type;
llvm::PointerType* _Metadata_type;
llvm::PointerType* _klass_type;
llvm::PointerType* _Method_type;
llvm::ArrayType* _monitor_type;
llvm::PointerType* _oop_type;
llvm::PointerType* _thread_type;
llvm::PointerType* _zeroStack_type;
llvm::FunctionType* _entry_point_type;
llvm::FunctionType* _osr_entry_point_type;
public:
llvm::PointerType* itableOffsetEntry_type() const {
return _itableOffsetEntry_type;
}
llvm::PointerType* jniEnv_type() const {
return _jniEnv_type;
}
llvm::PointerType* jniHandleBlock_type() const {
return _jniHandleBlock_type;
}
llvm::PointerType* Metadata_type() const {
return _Metadata_type;
}
llvm::PointerType* klass_type() const {
return _klass_type;
}
llvm::PointerType* Method_type() const {
return _Method_type;
}
llvm::ArrayType* monitor_type() const {
return _monitor_type;
}
llvm::PointerType* oop_type() const {
return _oop_type;
}
llvm::PointerType* thread_type() const {
return _thread_type;
}
llvm::PointerType* zeroStack_type() const {
return _zeroStack_type;
}
llvm::FunctionType* entry_point_type() const {
return _entry_point_type;
}
llvm::FunctionType* osr_entry_point_type() const {
return _osr_entry_point_type;
}
private:
llvm::Type* _to_stackType[T_CONFLICT];
llvm::Type* _to_arrayType[T_CONFLICT];
private:
llvm::Type* map_type(llvm::Type* const* table,
BasicType type) const {
assert(type >= 0 && type < T_CONFLICT, "unhandled type");
llvm::Type* result = table[type];
assert(result != NULL, "unhandled type");
return result;
}
public:
llvm::Type* to_stackType(BasicType type) const {
return map_type(_to_stackType, type);
}
llvm::Type* to_arrayType(BasicType type) const {
return map_type(_to_arrayType, type);
}
private:
SharkFreeQueueItem* _free_queue;
public:
void push_to_free_queue(llvm::Function* function);
llvm::Function* pop_from_free_queue();
};
#endif // SHARE_VM_SHARK_SHARKCONTEXT_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkEntry.hpp
#ifndef SHARE_VM_SHARK_SHARKENTRY_HPP
#define SHARE_VM_SHARK_SHARKENTRY_HPP
#include "shark/llvmHeaders.hpp"
class SharkContext;
class SharkEntry : public ZeroEntry {
private:
address _code_limit;
SharkContext* _context;
llvm::Function* _function;
public:
address code_start() const {
return entry_point();
}
address code_limit() const {
return _code_limit;
}
SharkContext* context() const {
return _context;
}
llvm::Function* function() const {
return _function;
}
public:
void set_code_limit(address code_limit) {
_code_limit = code_limit;
}
void set_context(SharkContext* context) {
_context = context;
}
void set_function(llvm::Function* function) {
_function = function;
}
};
#endif // SHARE_VM_SHARK_SHARKENTRY_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkFunction.cpp
#include "precompiled.hpp"
#include "ci/ciTypeFlow.hpp"
#include "memory/allocation.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkEntry.hpp"
#include "shark/sharkFunction.hpp"
#include "shark/sharkState.hpp"
#include "shark/sharkTopLevelBlock.hpp"
#include "shark/shark_globals.hpp"
#include "utilities/debug.hpp"
using namespace llvm;
void SharkFunction::initialize(const char *name) {
_function = Function::Create(
entry_point_type(),
GlobalVariable::InternalLinkage,
name);
Function::arg_iterator ai = function()->arg_begin();
Argument *method = ai++;
method->setName("method");
Argument *osr_buf = NULL;
if (is_osr()) {
osr_buf = ai++;
osr_buf->setName("osr_buf");
}
Argument *base_pc = ai++;
base_pc->setName("base_pc");
code_buffer()->set_base_pc(base_pc);
Argument *thread = ai++;
thread->setName("thread");
set_thread(thread);
set_block_insertion_point(NULL);
_blocks = NEW_RESOURCE_ARRAY(SharkTopLevelBlock*, block_count());
for (int i = 0; i < block_count(); i++) {
ciTypeFlow::Block *b = flow()->pre_order_at(i);
_blocks[b->pre_order()] = new SharkTopLevelBlock(this, b);
}
SharkTopLevelBlock *start_block = block(flow()->start_block_num());
if (is_osr() && start_block->stack_depth_at_entry() != 0) {
env()->record_method_not_compilable("can't compile OSR block with incoming stack-depth > 0");
return;
}
assert(start_block->start() == flow()->start_bci(), "blocks out of order");
start_block->enter();
for (int i = 0; i < block_count(); i++) {
if (block(i)->entered())
block(i)->initialize();
}
set_block_insertion_point(&function()->front());
builder()->SetInsertPoint(CreateBlock());
_stack = SharkStack::CreateBuildAndPushFrame(this, method);
SharkState *entry_state;
if (is_osr()) {
entry_state = new SharkOSREntryState(start_block, method, osr_buf);
builder()->CreateCall(builder()->osr_migration_end(), osr_buf);
}
else {
entry_state = new SharkNormalEntryState(start_block, method);
if (is_synchronized()) {
SharkTopLevelBlock *locker =
new SharkTopLevelBlock(this, start_block->ciblock());
locker->add_incoming(entry_state);
set_block_insertion_point(start_block->entry_block());
locker->acquire_method_lock();
entry_state = locker->current_state();
}
}
start_block->add_incoming(entry_state);
builder()->CreateBr(start_block->entry_block());
for (int i = 0; i < block_count(); i++) {
if (!block(i)->entered())
continue;
if (i + 1 < block_count())
set_block_insertion_point(block(i + 1)->entry_block());
else
set_block_insertion_point(NULL);
block(i)->emit_IR();
}
do_deferred_zero_checks();
}
class DeferredZeroCheck : public SharkTargetInvariants {
public:
DeferredZeroCheck(SharkTopLevelBlock* block, SharkValue* value)
: SharkTargetInvariants(block),
_block(block),
_value(value),
_bci(block->bci()),
_state(block->current_state()->copy()),
_check_block(builder()->GetInsertBlock()),
_continue_block(function()->CreateBlock("not_zero")) {
builder()->SetInsertPoint(continue_block());
}
private:
SharkTopLevelBlock* _block;
SharkValue* _value;
int _bci;
SharkState* _state;
BasicBlock* _check_block;
BasicBlock* _continue_block;
public:
SharkTopLevelBlock* block() const {
return _block;
}
SharkValue* value() const {
return _value;
}
int bci() const {
return _bci;
}
SharkState* state() const {
return _state;
}
BasicBlock* check_block() const {
return _check_block;
}
BasicBlock* continue_block() const {
return _continue_block;
}
public:
SharkFunction* function() const {
return block()->function();
}
public:
void process() const {
builder()->SetInsertPoint(check_block());
block()->do_deferred_zero_check(value(), bci(), state(), continue_block());
}
};
void SharkFunction::add_deferred_zero_check(SharkTopLevelBlock* block,
SharkValue* value) {
deferred_zero_checks()->append(new DeferredZeroCheck(block, value));
}
void SharkFunction::do_deferred_zero_checks() {
for (int i = 0; i < deferred_zero_checks()->length(); i++)
deferred_zero_checks()->at(i)->process();
}
C:\hotspot-69087d08d473\src\share\vm/shark/sharkFunction.hpp
#ifndef SHARE_VM_SHARK_SHARKFUNCTION_HPP
#define SHARE_VM_SHARK_SHARKFUNCTION_HPP
#include "ci/ciEnv.hpp"
#include "ci/ciStreams.hpp"
#include "ci/ciTypeFlow.hpp"
#include "memory/allocation.hpp"
#include "shark/llvmHeaders.hpp"
#include "shark/llvmValue.hpp"
#include "shark/sharkBuilder.hpp"
#include "shark/sharkContext.hpp"
#include "shark/sharkInvariants.hpp"
#include "shark/sharkStack.hpp"
class SharkTopLevelBlock;
class DeferredZeroCheck;
class SharkFunction : public SharkTargetInvariants {
friend class SharkStackWithNormalFrame;
public:
static llvm::Function* build(ciEnv* env,
SharkBuilder* builder,
ciTypeFlow* flow,
const char* name) {
SharkFunction function(env, builder, flow, name);
return function.function();
}
private:
SharkFunction(ciEnv* env,
SharkBuilder* builder,
ciTypeFlow* flow,
const char* name)
: SharkTargetInvariants(env, builder, flow) { initialize(name); }
private:
void initialize(const char* name);
private:
llvm::Function* _function;
SharkTopLevelBlock** _blocks;
GrowableArray<DeferredZeroCheck*> _deferred_zero_checks;
SharkStack* _stack;
public:
llvm::Function* function() const {
return _function;
}
int block_count() const {
return flow()->block_count();
}
SharkTopLevelBlock* block(int i) const {
assert(i < block_count(), "should be");
return _blocks[i];
}
GrowableArray<DeferredZeroCheck*>* deferred_zero_checks() {
return &_deferred_zero_checks;
}
SharkStack* stack() const {
return _stack;
}
private:
bool is_osr() const {
return flow()->is_osr_flow();
}
llvm::FunctionType* entry_point_type() const {
if (is_osr())
return SharkType::osr_entry_point_type();
else
return SharkType::entry_point_type();
}
private:
llvm::BasicBlock* _block_insertion_point;
void set_block_insertion_point(llvm::BasicBlock* block_insertion_point) {
_block_insertion_point = block_insertion_point;
}
llvm::BasicBlock* block_insertion_point() const {
return _block_insertion_point;
}
public:
llvm::BasicBlock* CreateBlock(const char* name = "") const {
return llvm::BasicBlock::Create(
SharkContext::current(), name, function(), block_insertion_point());
}
public:
void add_deferred_zero_check(SharkTopLevelBlock* block,
SharkValue* value);
private:
void do_deferred_zero_checks();
};
#endif // SHARE_VM_SHARK_SHARKFUNCTION_HPP
C:\hotspot-69087d08d473\src\share\vm/shark/sharkInliner.cpp
#include "precompiled.hpp"
#include "ci/ciField.hpp"
#include "ci/ciMethod.hpp"
#include "ci/ciStreams.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/allocation.hpp"
#include "shark/sharkBlock.hpp"
#include "shark/sharkConstant.hpp"
#include "shark/sharkInliner.hpp"
#include "shark/sharkIntrinsics.hpp"
#include "shark/sharkState.hpp"
#include "shark/sharkValue.hpp"
#include "shark/shark_globals.hpp"
using namespace llvm;
class SharkInlineBlock : public SharkBlock {
public:
SharkInlineBlock(ciMethod* target, SharkState* state)
: SharkBlock(state, target),
_outer_state(state),
_entry_state(new SharkState(this)) {
for (int i = target->max_locals() - 1; i >= 0; i--) {
SharkValue *value = NULL;
if (i < target->arg_size())
value = outer_state()->pop();
entry_state()->set_local(i, value);
}
}
private:
SharkState* _outer_state;
SharkState* _entry_state;
private:
SharkState* outer_state() {
return _outer_state;
}
SharkState* entry_state() {
return _entry_state;
}
public:
void emit_IR() {
parse_bytecode(0, target()->code_size());
}
private:
void do_return(BasicType type) {
if (type != T_VOID) {
SharkValue *result = pop_result(type);
outer_state()->push(result);
if (result->is_two_word())
outer_state()->push(NULL);
}
}
};
class SharkInlinerHelper : public StackObj {
public:
SharkInlinerHelper(ciMethod* target, SharkState* entry_state)
: _target(target),
_entry_state(entry_state),
_iter(target) {}
private:
ciBytecodeStream _iter;
SharkState* _entry_state;
ciMethod* _target;
public:
ciBytecodeStream* iter() {
return &_iter;
}
SharkState* entry_state() const {
return _entry_state;
}
ciMethod* target() const {
return _target;
}
public:
Bytecodes::Code bc() {
return iter()->cur_bc();
}
int max_locals() const {
return target()->max_locals();
}
int max_stack() const {
return target()->max_stack();
}
public:
bool is_inlinable();
private:
void initialize_for_check();
bool do_getstatic() {
return do_field_access(true, false);
}
bool do_getfield() {
return do_field_access(true, true);
}
bool do_putfield() {
return do_field_access(false, true);
}
bool do_field_access(bool is_get, bool is_field);
private:
bool* _locals;
public:
bool* local_addr(int index) const {
assert(index >= 0 && index < max_locals(), "bad local variable index");
return &_locals[index];
}
bool local(int index) const {
return *local_addr(index);
}
void set_local(int index, bool value) {
}
private:
bool* _stack;
bool* _sp;
public:
int stack_depth() const {
return _sp - _stack;
}
bool* stack_addr(int slot) const {
assert(slot >= 0 && slot < stack_depth(), "bad stack slot");
return &_sp[-(slot + 1)];
}
void push(bool value) {
assert(stack_depth() < max_stack(), "stack overrun");
}
bool pop() {
assert(stack_depth() > 0, "stack underrun");
return *(--_sp);
}
public:
void push_pair_local(int index) {
push(local(index));
push(local(index + 1));
}
void pop_pair_local(int index) {
set_local(index + 1, pop());
set_local(index, pop());
}
public:
void do_inline() {
(new SharkInlineBlock(target(), entry_state()))->emit_IR();
}
};
bool SharkInliner::may_be_inlinable(ciMethod *target) {
if (target->is_native())
return false;
if (target->is_abstract())
return false;
if (target->code_size() > SharkMaxInlineSize)
return false;
if (target->is_synchronized() || target->has_monitor_bytecodes())
return false;
if (target->has_exception_handlers() || target->has_jsrs())
return false;
if (target->is_initializer())
return false;
if (target->intrinsic_id() == vmIntrinsics::_Object_init)
return false;
return true;
}
bool SharkInlinerHelper::is_inlinable() {
ResourceMark rm;
initialize_for_check();
SharkConstant *sc;
bool a, b, c, d;
iter()->reset_to_bci(0);
while (iter()->next() != ciBytecodeStream::EOBC()) {
switch (bc()) {
case Bytecodes::_nop:
break;
case Bytecodes::_aconst_null:
push(false);
break;
case Bytecodes::_iconst_0:
push(false);
break;
case Bytecodes::_iconst_m1:
case Bytecodes::_iconst_1:
case Bytecodes::_iconst_2:
case Bytecodes::_iconst_3:
case Bytecodes::_iconst_4:
case Bytecodes::_iconst_5:
push(true);
break;
case Bytecodes::_lconst_0:
push(false);
push(false);
break;
case Bytecodes::_lconst_1:
push(true);
push(false);
break;
case Bytecodes::_fconst_0:
case Bytecodes::_fconst_1:
case Bytecodes::_fconst_2:
push(false);
break;
case Bytecodes::_dconst_0:
case Bytecodes::_dconst_1:
push(false);
push(false);
break;
case Bytecodes::_bipush:
push(iter()->get_constant_u1() != 0);
break;
case Bytecodes::_sipush:
push(iter()->get_constant_u2() != 0);
break;
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
case Bytecodes::_ldc2_w:
sc = SharkConstant::for_ldc(iter());
if (!sc->is_loaded())
return false;
push(sc->is_nonzero());
if (sc->is_two_word())
push(false);
break;
case Bytecodes::_iload_0:
case Bytecodes::_fload_0:
case Bytecodes::_aload_0:
push(local(0));
break;
ssssssss84
最新推荐文章于 2024-08-03 21:02:21 发布