void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
counter->set_carry_flag();
}
}
void SimpleThresholdPolicy::handle_counter_overflow(Method* method) {
MethodCounters *mcs = method->method_counters();
if (mcs != NULL) {
set_carry_if_necessary(mcs->invocation_counter());
set_carry_if_necessary(mcs->backedge_counter());
}
MethodData* mdo = method->method_data();
if (mdo != NULL) {
set_carry_if_necessary(mdo->invocation_counter());
set_carry_if_necessary(mdo->backedge_counter());
}
}
CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) {
return compile_queue->first();
}
void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
if (PrintTieredEvents) {
methodHandle mh(sd->method());
print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none);
}
MethodData* mdo = sd->method()->method_data();
if (mdo != NULL) {
mdo->reset_start_counters();
}
if (sd->is_top()) break;
}
}
nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
if (comp_level == CompLevel_none &&
JvmtiExport::can_post_interpreter_events() &&
thread->is_interp_only_mode()) {
return NULL;
}
if (CompileTheWorld || ReplayCompiles) {
return NULL;
}
handle_counter_overflow(method());
if (method() != inlinee()) {
handle_counter_overflow(inlinee());
}
if (PrintTieredEvents) {
print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
}
if (bci == InvocationEntryBci) {
method_invocation_event(method, inlinee, comp_level, nm, thread);
} else {
method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false);
if (osr_nm != NULL && osr_nm->comp_level() > comp_level) {
return osr_nm;
}
}
return NULL;
}
void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
assert(level <= TieredStopAtLevel, "Invalid compilation level");
if (level == CompLevel_none) {
return;
}
if (!can_be_compiled(mh, level)) {
if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
compile(mh, bci, CompLevel_simple, thread);
}
return;
}
if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
return;
}
if (!CompileBroker::compilation_is_in_queue(mh)) {
if (PrintTieredEvents) {
print_event(COMPILE, mh, mh, bci, level);
}
submit_compile(mh, bci, level, thread);
}
}
void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
}
bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
}
case CompLevel_full_profile: {
return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
}
default:
return true;
}
}
bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
return call_predicate_helper<CompLevel_none>(i, b, 1.0);
}
case CompLevel_full_profile: {
return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
}
default:
return true;
}
}
bool SimpleThresholdPolicy::is_mature(Method* method) {
if (is_trivial(method)) return true;
MethodData* mdo = method->method_data();
if (mdo != NULL) {
int i = mdo->invocation_count();
int b = mdo->backedge_count();
double k = ProfileMaturityPercentage / 100.0;
return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
loop_predicate_helper<CompLevel_full_profile>(i, b, k);
}
return false;
}
CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level) {
CompLevel next_level = cur_level;
int i = method->invocation_count();
int b = method->backedge_count();
if (is_trivial(method)) {
next_level = CompLevel_simple;
} else {
switch(cur_level) {
case CompLevel_none:
if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
} else if ((this->*p)(i, b, cur_level)) {
next_level = CompLevel_full_profile;
}
break;
case CompLevel_limited_profile:
case CompLevel_full_profile:
{
MethodData* mdo = method->method_data();
if (mdo != NULL) {
if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count_delta();
int mdo_b = mdo->backedge_count_delta();
if ((this->*p)(mdo_i, mdo_b, cur_level)) {
next_level = CompLevel_full_optimization;
}
} else {
next_level = CompLevel_full_optimization;
}
}
}
break;
}
}
return MIN2(next_level, (CompLevel)TieredStopAtLevel);
}
CompLevel SimpleThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
common(&SimpleThresholdPolicy::loop_predicate, method, cur_level));
CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
MethodData* mdo = method->method_data();
guarantee(mdo != NULL, "MDO should not be NULL");
if (mdo->invocation_count() >= 1) {
next_level = CompLevel_full_optimization;
}
} else {
next_level = MAX2(osr_level, next_level);
}
return next_level;
}
CompLevel SimpleThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
if (cur_level == CompLevel_none) {
CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
if (osr_level > CompLevel_none) {
return osr_level;
}
}
return next_level;
}
void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
CompLevel level, nmethod* nm, JavaThread* thread) {
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel next_level = call_event(mh(), level);
if (next_level != level) {
compile(mh, InvocationEntryBci, next_level, thread);
}
}
}
void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel cur_level = comp_level(mh());
CompLevel next_level = call_event(mh(), cur_level);
CompLevel next_osr_level = loop_event(mh(), level);
next_level = MAX2(next_level,
next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
bool is_compiling = false;
if (next_level != cur_level) {
compile(mh, InvocationEntryBci, next_level, thread);
is_compiling = true;
}
if (!is_compiling && next_osr_level != level) {
compile(mh, bci, next_osr_level, thread);
}
}
}
C:\hotspot-69087d08d473\src\share\vm/runtime/simpleThresholdPolicy.hpp
#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
#include "code/nmethod.hpp"
#include "oops/methodData.hpp"
#include "runtime/compilationPolicy.hpp"
#include "utilities/globalDefinitions.hpp"
class CompileTask;
class CompileQueue;
class SimpleThresholdPolicy : public CompilationPolicy {
int _c1_count, _c2_count;
inline void set_carry_if_necessary(InvocationCounter *counter);
inline void handle_counter_overflow(Method* method);
typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
bool call_predicate(int i, int b, CompLevel cur_level);
bool loop_predicate(int i, int b, CompLevel cur_level);
CompLevel common(Predicate p, Method* method, CompLevel cur_level);
CompLevel call_event(Method* method, CompLevel cur_level);
CompLevel loop_event(Method* method, CompLevel cur_level);
void print_counters(const char* prefix, methodHandle mh);
protected:
int c1_count() const { return _c1_count; }
int c2_count() const { return _c2_count; }
void set_c1_count(int x) { _c1_count = x; }
void set_c2_count(int x) { _c2_count = x; }
enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
void compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
virtual void submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread);
inline bool is_trivial(Method* method);
template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
static CompLevel comp_level(Method* method) {
nmethod *nm = method->code();
if (nm != NULL && nm->is_in_use()) {
return (CompLevel)nm->comp_level();
}
return CompLevel_none;
}
virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
CompLevel level, nmethod* nm, JavaThread* thread);
virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
int bci, CompLevel level, nmethod* nm, JavaThread* thread);
public:
SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
virtual int compiler_count(CompLevel comp_level) {
if (is_c1_compile(comp_level)) return c1_count();
if (is_c2_compile(comp_level)) return c2_count();
return 0;
}
virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); }
virtual void do_safepoint_work() { }
virtual void delay_compilation(Method* method) { }
virtual void disable_compilation(Method* method) { }
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
virtual nmethod* event(methodHandle method, methodHandle inlinee,
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread);
virtual CompileTask* select_task(CompileQueue* compile_queue);
virtual bool is_mature(Method* method);
virtual void initialize();
virtual bool should_not_inline(ciEnv* env, ciMethod* callee) {
return (env->comp_level() == CompLevel_limited_profile ||
env->comp_level() == CompLevel_full_profile) &&
callee->has_loops();
}
};
#endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/simpleThresholdPolicy.inline.hpp
#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
template<CompLevel level>
bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
return (i > Tier3InvocationThreshold * scale) ||
(i > Tier3MinInvocationThreshold * scale && i + b > Tier3CompileThreshold * scale);
case CompLevel_full_profile:
return (i > Tier4InvocationThreshold * scale) ||
(i > Tier4MinInvocationThreshold * scale && i + b > Tier4CompileThreshold * scale);
}
return true;
}
template<CompLevel level>
bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
return b > Tier3BackEdgeThreshold * scale;
case CompLevel_full_profile:
return b > Tier4BackEdgeThreshold * scale;
}
return true;
}
bool SimpleThresholdPolicy::is_trivial(Method* method) {
if (method->is_accessor() ||
method->is_constant_getter()) {
return true;
}
if (method->has_loops() || method->code_size() >= 15) {
return false;
}
MethodData* mdo = method->method_data();
if (mdo != NULL && !mdo->would_profile() &&
(method->code_size() < 5 || (mdo->num_blocks() < 4))) {
return true;
}
return false;
}
#endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/stackValue.cpp
#include "precompiled.hpp"
#include "code/debugInfo.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/stackValue.hpp"
StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv) {
if (sv->is_location()) {
Location loc = ((LocationValue *)sv)->location();
#ifdef SPARC
assert( !(loc.is_register() && loc.type() == Location::float_in_dbl), "Sparc does not handle callee-save floats yet" );
#endif // SPARC
address value_addr = loc.is_register()
? reg_map->location(VMRegImpl::as_VMReg(loc.register_number()))
: ((address)fr->unextended_sp()) + loc.stack_offset();
switch( loc.type() ) {
case Location::float_in_dbl: { // Holds a float in a double register?
assert( loc.is_register(), "floats always saved to stack in 1 word" );
union { intptr_t p; jfloat jf; } value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.jf = (jfloat) *(jdouble*) value_addr;
return new StackValue(value.p); // 64-bit high half is stack junk
}
case Location::int_in_long: { // Holds an int in a long register?
assert( loc.is_register(), "ints always saved to stack in 1 word" );
union { intptr_t p; jint ji;} value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.ji = (jint) *(jlong*) value_addr;
return new StackValue(value.p); // 64-bit high half is stack junk
}
#ifdef _LP64
case Location::dbl:
return new StackValue(*(intptr_t*)value_addr);
case Location::lng:
return new StackValue(*(intptr_t*)value_addr);
case Location::narrowoop: {
union { intptr_t p; narrowOop noop;} value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
if (loc.is_register()) {
value.noop = (narrowOop) *(julong*) value_addr;
} else {
value.noop = *(narrowOop*) value_addr;
}
Handle h(oopDesc::decode_heap_oop(value.noop));
return new StackValue(h);
}
#endif
case Location::oop: {
oop val = *(oop *)value_addr;
#ifdef _LP64
if (Universe::is_narrow_oop_base(val)) {
val = (oop)NULL;
}
#endif
Handle h(val); // Wrap a handle around the oop
return new StackValue(h);
}
case Location::addr: {
ShouldNotReachHere(); // both C1 and C2 now inline jsrs
}
case Location::normal: {
union { intptr_t p; jint ji;} value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.ji = *(jint*)value_addr;
return new StackValue(value.p);
}
case Location::invalid:
return new StackValue();
default:
ShouldNotReachHere();
}
} else if (sv->is_constant_int()) {
union { intptr_t p; jint ji;} value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.ji = (jint)((ConstantIntValue*)sv)->value();
return new StackValue(value.p);
} else if (sv->is_constant_oop()) {
return new StackValue(sv->as_ConstantOopReadValue()->value());
#ifdef _LP64
} else if (sv->is_constant_double()) {
union { intptr_t p; double d; } value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.d = ((ConstantDoubleValue *)sv)->value();
return new StackValue(value.p);
} else if (sv->is_constant_long()) {
union { intptr_t p; jlong jl; } value;
value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF);
value.jl = ((ConstantLongValue *)sv)->value();
return new StackValue(value.p);
#endif
} else if (sv->is_object()) { // Scalar replaced object in compiled frame
Handle ov = ((ObjectValue *)sv)->value();
return new StackValue(ov, (ov.is_null()) ? 1 : 0);
}
ShouldNotReachHere();
return new StackValue((intptr_t) 0); // dummy
}
BasicLock* StackValue::resolve_monitor_lock(const frame* fr, Location location) {
assert(location.is_stack(), "for now we only look at the stack");
int word_offset = location.stack_offset() / wordSize;
return (BasicLock*) (fr->unextended_sp() + word_offset);
}
#ifndef PRODUCT
void StackValue::print_on(outputStream* st) const {
switch(_type) {
case T_INT:
st->print("%d (int) %f (float) %x (hex)", *(int *)&_i, *(float *)&_i, *(int *)&_i);
break;
case T_OBJECT:
_o()->print_value_on(st);
st->print(" <" INTPTR_FORMAT ">", p2i((address)_o()));
break;
case T_CONFLICT:
st->print("conflict");
break;
default:
ShouldNotReachHere();
}
}
#endif
C:\hotspot-69087d08d473\src\share\vm/runtime/stackValue.hpp
#ifndef SHARE_VM_RUNTIME_STACKVALUE_HPP
#define SHARE_VM_RUNTIME_STACKVALUE_HPP
#include "code/location.hpp"
#include "runtime/handles.hpp"
#include "utilities/top.hpp"
class StackValue : public ResourceObj {
private:
BasicType _type;
intptr_t _i; // Blank java stack slot value
Handle _o; // Java stack slot value interpreted as a Handle
public:
StackValue(intptr_t value) {
_type = T_INT;
_i = value;
}
StackValue(Handle value, intptr_t scalar_replaced = 0) {
_type = T_OBJECT;
_i = scalar_replaced;
_o = value;
assert(_i == 0 || _o.is_null(), "not null object should not be marked as scalar replaced");
}
StackValue() {
_type = T_CONFLICT;
_i = 0;
}
StackValue(intptr_t o, BasicType t) {
assert(t == T_OBJECT, "should not be used");
_type = t;
_i = o;
}
Handle get_obj() const {
assert(type() == T_OBJECT, "type check");
return _o;
}
bool obj_is_scalar_replaced() const {
assert(type() == T_OBJECT, "type check");
return _i != 0;
}
void set_obj(Handle value) {
assert(type() == T_OBJECT, "type check");
_o = value;
}
intptr_t get_int() const {
assert(type() == T_INT, "type check");
return _i;
}
intptr_t get_int(BasicType t) const {
assert(t == T_OBJECT && type() == T_OBJECT, "type check");
return _i;
}
void set_int(intptr_t value) {
assert(type() == T_INT, "type check");
_i = value;
}
BasicType type() const { return _type; }
bool equal(StackValue *value) {
if (_type != value->_type) return false;
if (_type == T_OBJECT)
return (_o == value->_o);
else {
assert(_type == T_INT, "sanity check");
return (*(int *)&_i == *(int *)&value->_i);
}
}
static StackValue* create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv);
static BasicLock* resolve_monitor_lock(const frame* fr, Location location);
#ifndef PRODUCT
public:
void print_on(outputStream* st) const;
#endif
};
#endif // SHARE_VM_RUNTIME_STACKVALUE_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/stackValueCollection.cpp
#include "precompiled.hpp"
#include "runtime/stackValueCollection.hpp"
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "jniTypes_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "jniTypes_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "jniTypes_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "jniTypes_ppc.hpp"
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
jint StackValueCollection::int_at(int slot) const {
intptr_t val = at(slot)->get_int();
jint ival = *((jint*) (&val));
return ival;
}
jlong StackValueCollection::long_at(int slot) const {
#ifdef _LP64
return at(slot+1)->get_int();
#else
union {
jlong jl;
jint array[2];
} value;
value.array[0] = at(slot+1)->get_int();
value.array[1] = at(slot )->get_int();
return value.jl;
#endif
}
Handle StackValueCollection::obj_at(int slot) const {
return at(slot)->get_obj();
}
jfloat StackValueCollection::float_at(int slot) const {
intptr_t res = at(slot)->get_int();
return *((jfloat*) (&res));
}
jdouble StackValueCollection::double_at(int slot) const {
#ifdef _LP64
intptr_t res = at(slot+1)->get_int();
return *((jdouble*) (&res));
#else
union {
jdouble jd;
jint array[2];
} value;
value.array[0] = at(slot+1)->get_int();
value.array[1] = at(slot )->get_int();
return value.jd;
#endif
}
void StackValueCollection::set_int_at(int slot, jint value) {
intptr_t val;
at(slot)->set_int(val);
}
void StackValueCollection::set_long_at(int slot, jlong value) {
#ifdef _LP64
at(slot+1)->set_int(value);
#else
union {
jlong jl;
jint array[2];
} x;
x.jl = value;
at(slot+1)->set_int(x.array[0]);
at(slot+0)->set_int(x.array[1]);
#endif
}
void StackValueCollection::set_obj_at(int slot, Handle value) {
at(slot)->set_obj(value);
}
void StackValueCollection::set_float_at(int slot, jfloat value) {
#ifdef _LP64
union {
intptr_t jd;
jint array[2];
} val;
val.array[0] = *(jint*)(&value);
val.array[1] = 0;
at(slot)->set_int(val.jd);
#else
at(slot)->set_int(*(jint*)(&value));
#endif
}
void StackValueCollection::set_double_at(int slot, jdouble value) {
#ifdef _LP64
at(slot+1)->set_int(*(intptr_t*)(&value));
#else
union {
jdouble jd;
jint array[2];
} x;
x.jd = value;
at(slot+1)->set_int(x.array[0]);
at(slot+0)->set_int(x.array[1]);
#endif
}
#ifndef PRODUCT
void StackValueCollection::print() {
for(int index = 0; index < size(); index++) {
tty->print("\t %2d ", index);
at(index)->print_on(tty);
if( at(index )->type() == T_INT &&
index+1 < size() &&
at(index+1)->type() == T_INT ) {
tty->print(" " INT64_FORMAT " (long)", long_at(index));
tty->cr();
tty->print("\t %.15e (double)", double_at(index));
tty->print(" " PTR64_FORMAT " (longhex)", long_at(index));
}
tty->cr();
}
}
#endif
C:\hotspot-69087d08d473\src\share\vm/runtime/stackValueCollection.hpp
#ifndef SHARE_VM_RUNTIME_STACKVALUECOLLECTION_HPP
#define SHARE_VM_RUNTIME_STACKVALUECOLLECTION_HPP
#include "memory/allocation.hpp"
#include "runtime/stackValue.hpp"
#include "utilities/growableArray.hpp"
class StackValueCollection : public ResourceObj {
private:
GrowableArray<StackValue*>* _values;
public:
StackValueCollection() { _values = new GrowableArray<StackValue*>(); }
StackValueCollection(int length) { _values = new GrowableArray<StackValue*>(length); }
void add(StackValue *val) const { _values->push(val); }
int size() const { return _values->length(); }
bool is_empty() const { return (size() == 0); }
StackValue* at(int i) const { return _values->at(i); }
jint int_at(int slot) const;
jlong long_at(int slot) const;
Handle obj_at(int slot) const;
jfloat float_at(int slot) const;
jdouble double_at(int slot) const;
void set_int_at(int slot, jint value);
void set_long_at(int slot, jlong value);
void set_obj_at(int slot, Handle value);
void set_float_at(int slot, jfloat value);
void set_double_at(int slot, jdouble value);
void print();
};
#endif // SHARE_VM_RUNTIME_STACKVALUECOLLECTION_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/statSampler.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "runtime/statSampler.hpp"
#ifdef TARGET_ARCH_x86
# include "vm_version_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "vm_version_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "vm_version_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "vm_version_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "vm_version_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "vm_version_ppc.hpp"
#endif
class StatSamplerTask : public PeriodicTask {
public:
StatSamplerTask(int interval_time) : PeriodicTask(interval_time) {}
void task() { StatSampler::collect_sample(); }
};
StatSamplerTask* StatSampler::_task = NULL;
PerfDataList* StatSampler::_sampled = NULL;
void StatSampler::initialize() {
if (!UsePerfData) return;
create_misc_perfdata();
_sampled = PerfDataManager::sampled();
}
void StatSampler::engage() {
if (!UsePerfData) return;
if (!is_active()) {
initialize();
_task = new StatSamplerTask(PerfDataSamplingInterval);
_task->enroll();
}
}
void StatSampler::disengage() {
if (!UsePerfData) return;
if (!is_active())
return;
_task->disenroll();
delete _task;
_task = NULL;
sample_data(_sampled);
}
void StatSampler::destroy() {
if (!UsePerfData) return;
if (_sampled != NULL) {
delete(_sampled);
_sampled = NULL;
}
}
void StatSampler::sample_data(PerfDataList* list) {
assert(list != NULL, "null list unexpected");
for (int index = 0; index < list->length(); index++) {
PerfData* item = list->at(index);
item->sample();
}
}
void StatSampler::collect_sample() {
assert(_sampled != NULL, "list not initialized");
sample_data(_sampled);
}
const char* StatSampler::get_system_property(const char* name, TRAPS) {
Handle key_str = java_lang_String::create_from_str(name, CHECK_NULL);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
KlassHandle(THREAD, SystemDictionary::System_klass()),
vmSymbols::getProperty_name(),
vmSymbols::string_string_signature(),
key_str,
CHECK_NULL);
oop value_oop = (oop)result.get_jobject();
if (value_oop == NULL) {
return NULL;
}
char* value = java_lang_String::as_utf8_string(value_oop);
return value;
}
static const char* property_counters_ss[] = {
"java.vm.specification.version",
"java.vm.specification.name",
"java.vm.specification.vendor",
"java.vm.version",
"java.vm.name",
"java.vm.vendor",
"java.vm.info",
"java.library.path",
"java.class.path",
"java.endorsed.dirs",
"java.ext.dirs",
"java.version",
"java.home",
NULL
};
static const char* property_counters_us[] = {
NULL
};
static const char* property_counters_uu[] = {
"sun.boot.class.path",
"sun.boot.library.path",
NULL
};
typedef struct {
const char** property_list;
CounterNS name_space;
} PropertyCounters;
static PropertyCounters property_counters[] = {
{ property_counters_ss, JAVA_PROPERTY },
{ property_counters_us, COM_PROPERTY },
{ property_counters_uu, SUN_PROPERTY },
{ NULL, SUN_PROPERTY }
};
void StatSampler::create_system_property_instrumentation(TRAPS) {
ResourceMark rm;
for (int i = 0; property_counters[i].property_list != NULL; i++) {
for (int j = 0; property_counters[i].property_list[j] != NULL; j++) {
const char* property_name = property_counters[i].property_list[j];
assert(property_name != NULL, "property name should not be NULL");
const char* value = get_system_property(property_name, CHECK);
assert(value != NULL, "property name should be valid");
if (value != NULL) {
PerfDataManager::create_string_constant(property_counters[i].name_space,
property_name, value, CHECK);
}
}
}
}
void StatSampler::create_misc_perfdata() {
ResourceMark rm;
EXCEPTION_MARK;
PerfDataManager::create_constant(SUN_OS, "hrt.frequency",
PerfData::U_Hertz, os::elapsed_frequency(),
CHECK);
create_system_property_instrumentation(CHECK);
PerfDataManager::create_string_constant(JAVA_RT, "vmFlags",
Arguments::jvm_flags(), CHECK);
PerfDataManager::create_string_constant(JAVA_RT, "vmArgs",
Arguments::jvm_args(), CHECK);
PerfDataManager::create_string_constant(SUN_RT, "javaCommand",
Arguments::java_command(), CHECK);
PerfDataManager::create_string_constant(SUN_RT, "internalVersion",
VM_Version::internal_vm_info_string(),
CHECK);
create_sampled_perfdata();
}
class HighResTimeSampler : public PerfSampleHelper {
public:
jlong take_sample() { return os::elapsed_counter(); }
};
void StatSampler::create_sampled_perfdata() {
EXCEPTION_MARK;
PerfSampleHelper* psh = new HighResTimeSampler();
PerfDataManager::create_counter(SUN_OS, "hrt.ticks",
PerfData::U_Ticks, psh, CHECK);
}
void statSampler_exit() {
if (!UsePerfData) return;
StatSampler::destroy();
}
C:\hotspot-69087d08d473\src\share\vm/runtime/statSampler.hpp
#ifndef SHARE_VM_RUNTIME_STATSAMPLER_HPP
#define SHARE_VM_RUNTIME_STATSAMPLER_HPP
#include "runtime/perfData.hpp"
#include "runtime/task.hpp"
class StatSamplerTask;
class StatSampler : AllStatic {
friend class StatSamplerTask;
private:
static StatSamplerTask* _task;
static PerfDataList* _sampled;
static void collect_sample();
static void create_misc_perfdata();
static void create_sampled_perfdata();
static void sample_data(PerfDataList* list);
static const char* get_system_property(const char* name, TRAPS);
static void create_system_property_instrumentation(TRAPS);
public:
static void engage();
static void disengage();
static bool is_active() { return _task != NULL; }
static void initialize();
static void destroy();
};
void statSampler_exit();
#endif // SHARE_VM_RUNTIME_STATSAMPLER_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/stubCodeGenerator.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
#include "runtime/stubCodeGenerator.hpp"
StubCodeDesc* volatile StubCodeDesc::_list = NULL;
int StubCodeDesc::_count = 0;
StubCodeDesc* StubCodeDesc::desc_for(address pc) {
StubCodeDesc* p = (StubCodeDesc*)OrderAccess::load_ptr_acquire(&_list);
while (p != NULL && !p->contains(pc)) p = p->_next;
return p;
}
StubCodeDesc* StubCodeDesc::desc_for_index(int index) {
StubCodeDesc* p = (StubCodeDesc*)OrderAccess::load_ptr_acquire(&_list);
while (p != NULL && p->index() != index) p = p->_next;
return p;
}
const char* StubCodeDesc::name_for(address pc) {
StubCodeDesc* p = desc_for(pc);
return p == NULL ? NULL : p->name();
}
void StubCodeDesc::print_on(outputStream* st) const {
st->print("%s", group());
st->print("::");
st->print("%s", name());
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT "[ (%d bytes)", p2i(begin()), p2i(end()), size_in_bytes());
}
StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) {
_masm = new MacroAssembler(code);
_first_stub = _last_stub = NULL;
_print_code = print_code;
}
extern "C" {
static int compare_cdesc(const void* void_a, const void* void_b) {
int ai = (*((StubCodeDesc**) void_a))->index();
int bi = (*((StubCodeDesc**) void_b))->index();
return ai - bi;
}
}
StubCodeGenerator::~StubCodeGenerator() {
if (PrintStubCode || _print_code) {
CodeBuffer* cbuf = _masm->code();
CodeBlob* blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
if (blob != NULL) {
blob->set_strings(cbuf->strings());
}
bool saw_first = false;
StubCodeDesc* toprint[1000];
int toprint_len = 0;
for (StubCodeDesc* cdesc = _last_stub; cdesc != NULL; cdesc = cdesc->_next) {
toprint[toprint_len++] = cdesc;
if (cdesc == _first_stub) { saw_first = true; break; }
}
assert(saw_first, "must get both first & last");
qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
for (int i = 0; i < toprint_len; i++) {
StubCodeDesc* cdesc = toprint[i];
cdesc->print();
tty->cr();
Disassembler::decode(cdesc->begin(), cdesc->end());
tty->cr();
}
}
}
void StubCodeGenerator::stub_prolog(StubCodeDesc* cdesc) {
}
void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) {
if (_first_stub == NULL) _first_stub = cdesc;
_last_stub = cdesc;
}
StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name) {
_cgen = cgen;
_cdesc = new StubCodeDesc(group, name, _cgen->assembler()->pc());
_cgen->stub_prolog(_cdesc);
_cdesc->set_begin(_cgen->assembler()->pc());
}
StubCodeMark::~StubCodeMark() {
_cgen->assembler()->flush();
_cdesc->set_end(_cgen->assembler()->pc());
assert(StubCodeDesc::_list == _cdesc, "expected order on list");
_cgen->stub_epilog(_cdesc);
Forte::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
JvmtiExport::post_dynamic_code_generated(_cdesc->name(), _cdesc->begin(), _cdesc->end());
}
}
C:\hotspot-69087d08d473\src\share\vm/runtime/stubCodeGenerator.hpp
#ifndef SHARE_VM_RUNTIME_STUBCODEGENERATOR_HPP
#define SHARE_VM_RUNTIME_STUBCODEGENERATOR_HPP
#include "asm/assembler.hpp"
#include "memory/allocation.hpp"
class StubCodeDesc: public CHeapObj<mtCode> {
protected:
static StubCodeDesc* volatile _list; // the list of all descriptors
static int _count; // length of list
StubCodeDesc* _next; // the next element in the linked list
const char* _group; // the group to which the stub code belongs
const char* _name; // the name assigned to the stub code
int _index; // serial number assigned to the stub
address _begin; // points to the first byte of the stub code (included)
address _end; // points to the first byte after the stub code (excluded)
void set_end(address end) {
assert(_begin <= end, "begin & end not properly ordered");
_end = end;
}
void set_begin(address begin) {
assert(begin >= _begin, "begin may not decrease");
assert(_end == NULL || begin <= _end, "begin & end not properly ordered");
_begin = begin;
}
friend class StubCodeMark;
friend class StubCodeGenerator;
public:
static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or NULL
static StubCodeDesc* desc_for_index(int); // returns the code descriptor for the index or NULL
static const char* name_for(address pc); // returns the name of the code containing pc or NULL
StubCodeDesc(const char* group, const char* name, address begin) {
assert(name != NULL, "no name specified");
_next = (StubCodeDesc*)OrderAccess::load_ptr_acquire(&_list);
_group = group;
_name = name;
_index = ++_count; // (never zero)
_begin = begin;
_end = NULL;
OrderAccess::release_store_ptr(&_list, this);
};
const char* group() const { return _group; }
const char* name() const { return _name; }
int index() const { return _index; }
address begin() const { return _begin; }
address end() const { return _end; }
int size_in_bytes() const { return _end - _begin; }
bool contains(address pc) const { return _begin <= pc && pc < _end; }
void print_on(outputStream* st) const;
void print() const { print_on(tty); }
};
class StubCodeGenerator: public StackObj {
protected:
MacroAssembler* _masm;
StubCodeDesc* _first_stub;
StubCodeDesc* _last_stub;
bool _print_code;
public:
StubCodeGenerator(CodeBuffer* code, bool print_code = false);
~StubCodeGenerator();
MacroAssembler* assembler() const { return _masm; }
virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor
virtual void stub_epilog(StubCodeDesc* cdesc); // called by StubCodeMark destructor
};
class StubCodeMark: public StackObj {
protected:
StubCodeGenerator* _cgen;
StubCodeDesc* _cdesc;
public:
StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name);
~StubCodeMark();
};
#endif // SHARE_VM_RUNTIME_STUBCODEGENERATOR_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/stubRoutines.cpp
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timer.hpp"
#include "utilities/copy.hpp"
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
BufferBlob* StubRoutines::_code1 = NULL;
BufferBlob* StubRoutines::_code2 = NULL;
address StubRoutines::_call_stub_return_address = NULL;
address StubRoutines::_call_stub_entry = NULL;
address StubRoutines::_catch_exception_entry = NULL;
address StubRoutines::_forward_exception_entry = NULL;
address StubRoutines::_throw_AbstractMethodError_entry = NULL;
address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
address StubRoutines::_throw_StackOverflowError_entry = NULL;
address StubRoutines::_handler_for_unsafe_access_entry = NULL;
jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL;
address StubRoutines::_atomic_xchg_ptr_entry = NULL;
address StubRoutines::_atomic_store_entry = NULL;
address StubRoutines::_atomic_store_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
address StubRoutines::_atomic_add_entry = NULL;
address StubRoutines::_atomic_add_ptr_entry = NULL;
address StubRoutines::_fence_entry = NULL;
address StubRoutines::_d2i_wrapper = NULL;
address StubRoutines::_d2l_wrapper = NULL;
jint StubRoutines::_fpu_cntrl_wrd_std = 0;
jint StubRoutines::_fpu_cntrl_wrd_24 = 0;
jint StubRoutines::_fpu_cntrl_wrd_64 = 0;
jint StubRoutines::_fpu_cntrl_wrd_trunc = 0;
jint StubRoutines::_mxcsr_std = 0;
jint StubRoutines::_fpu_subnormal_bias1[3] = { 0, 0, 0 };
jint StubRoutines::_fpu_subnormal_bias2[3] = { 0, 0, 0 };
address StubRoutines::_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy);
address StubRoutines::_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy);
address StubRoutines::_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy);
address StubRoutines::_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy);
address StubRoutines::_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy);
address StubRoutines::_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit);
address StubRoutines::_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy);
address StubRoutines::_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy);
address StubRoutines::_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy);
address StubRoutines::_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy);
address StubRoutines::_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy);
address StubRoutines::_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit);
address StubRoutines::_arrayof_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy);
address StubRoutines::_arrayof_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy);
address StubRoutines::_arrayof_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy);
address StubRoutines::_arrayof_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy);
address StubRoutines::_arrayof_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
address StubRoutines::_arrayof_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit);
address StubRoutines::_arrayof_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy);
address StubRoutines::_arrayof_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy);
address StubRoutines::_arrayof_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy);
address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy);
address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit);
address StubRoutines::_zero_aligned_words = CAST_FROM_FN_PTR(address, Copy::zero_to_words);
address StubRoutines::_checkcast_arraycopy = NULL;
address StubRoutines::_checkcast_arraycopy_uninit = NULL;
address StubRoutines::_unsafe_arraycopy = NULL;
address StubRoutines::_generic_arraycopy = NULL;
address StubRoutines::_jbyte_fill;
address StubRoutines::_jshort_fill;
address StubRoutines::_jint_fill;
address StubRoutines::_arrayof_jbyte_fill;
address StubRoutines::_arrayof_jshort_fill;
address StubRoutines::_arrayof_jint_fill;
address StubRoutines::_aescrypt_encryptBlock = NULL;
address StubRoutines::_aescrypt_decryptBlock = NULL;
address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
address StubRoutines::_ghash_processBlocks = NULL;
address StubRoutines::_sha1_implCompress = NULL;
address StubRoutines::_sha1_implCompressMB = NULL;
address StubRoutines::_sha256_implCompress = NULL;
address StubRoutines::_sha256_implCompressMB = NULL;
address StubRoutines::_sha512_implCompress = NULL;
address StubRoutines::_sha512_implCompressMB = NULL;
address StubRoutines::_updateBytesCRC32 = NULL;
address StubRoutines::_crc_table_adr = NULL;
address StubRoutines::_multiplyToLen = NULL;
address StubRoutines::_squareToLen = NULL;
address StubRoutines::_mulAdd = NULL;
address StubRoutines::_montgomeryMultiply = NULL;
address StubRoutines::_montgomerySquare = NULL;
double (* StubRoutines::_intrinsic_log )(double) = NULL;
double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
double (* StubRoutines::_intrinsic_exp )(double) = NULL;
double (* StubRoutines::_intrinsic_pow )(double, double) = NULL;
double (* StubRoutines::_intrinsic_sin )(double) = NULL;
double (* StubRoutines::_intrinsic_cos )(double) = NULL;
double (* StubRoutines::_intrinsic_tan )(double) = NULL;
address StubRoutines::_safefetch32_entry = NULL;
address StubRoutines::_safefetch32_fault_pc = NULL;
address StubRoutines::_safefetch32_continuation_pc = NULL;
address StubRoutines::_safefetchN_entry = NULL;
address StubRoutines::_safefetchN_fault_pc = NULL;
address StubRoutines::_safefetchN_continuation_pc = NULL;
extern void StubGenerator_generate(CodeBuffer* code, bool all); // only interface to generators
void StubRoutines::initialize1() {
if (_code1 == NULL) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 1", TraceStartupTime);
_code1 = BufferBlob::create("StubRoutines (1)", code_size1);
if (_code1 == NULL) {
vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)");
}
CodeBuffer buffer(_code1);
StubGenerator_generate(&buffer, false);
assert(code_size1 == 0 || buffer.insts_remaining() > 200, "increase code_size1");
}
}
#ifdef ASSERT
typedef void (*arraycopy_fn)(address src, address dst, int count);
static void test_arraycopy_func(address func, int alignment) {
int v = 0xcc;
int v2 = 0x11;
jlong lbuffer[8];
jlong lbuffer2[8];
address fbuffer = (address) lbuffer;
address fbuffer2 = (address) lbuffer2;
unsigned int i;
for (i = 0; i < sizeof(lbuffer); i++) {
fbuffer[i] = v; fbuffer2[i] = v2;
}
address buffer = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
((arraycopy_fn)func)(buffer, buffer2, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
}
#endif
void StubRoutines::initialize2() {
if (_code2 == NULL) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 2", TraceStartupTime);
_code2 = BufferBlob::create("StubRoutines (2)", code_size2);
if (_code2 == NULL) {
vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)");
}
CodeBuffer buffer(_code2);
StubGenerator_generate(&buffer, true);
assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size2");
}
#ifdef ASSERT
#define TEST_ARRAYCOPY(type) \
test_arraycopy_func( type##_arraycopy(), sizeof(type)); \
test_arraycopy_func( type##_disjoint_arraycopy(), sizeof(type)); \
test_arraycopy_func(arrayof_##type##_arraycopy(), sizeof(HeapWord)); \
test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
TEST_ARRAYCOPY(jbyte);
TEST_ARRAYCOPY(jshort);
TEST_ARRAYCOPY(jint);
TEST_ARRAYCOPY(jlong);
#undef TEST_ARRAYCOPY
#define TEST_FILL(type) \
if (_##type##_fill != NULL) { \
union { \
double d; \
type body[96]; \
} s; \
\
int v = 32; \
for (int offset = -2; offset <= 2; offset++) { \
for (int i = 0; i < 96; i++) { \
s.body[i] = 1; \
} \
type* start = s.body + 8 + offset; \
for (int aligned = 0; aligned < 2; aligned++) { \
if (aligned) { \
if (((intptr_t)start) % HeapWordSize == 0) { \
((void (*)(type*, int, int))StubRoutines::_arrayof_##type##_fill)(start, v, 80); \
} else { \
continue; \
} \
} else { \
((void (*)(type*, int, int))StubRoutines::_##type##_fill)(start, v, 80); \
} \
for (int i = 0; i < 96; i++) { \
if (i < (8 + offset) || i >= (88 + offset)) { \
assert(s.body[i] == 1, "what?"); \
} else { \
assert(s.body[i] == 32, "what?"); \
} \
} \
} \
} \
} \
TEST_FILL(jbyte);
TEST_FILL(jshort);
TEST_FILL(jint);
#undef TEST_FILL
#define TEST_COPYRTN(type) \
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic), sizeof(type)); \
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
TEST_COPYRTN(jbyte);
TEST_COPYRTN(jshort);
TEST_COPYRTN(jint);
TEST_COPYRTN(jlong);
#undef TEST_COPYRTN
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
#endif
}
void stubRoutines_init1() { StubRoutines::initialize1(); }
void stubRoutines_init2() { StubRoutines::initialize2(); }
static void gen_arraycopy_barrier_pre(oop* dest, size_t count, bool dest_uninitialized) {
assert(count != 0, "count should be non-zero");
assert(count <= (size_t)max_intx, "count too large");
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
bs->write_ref_array_pre(dest, (int)count, dest_uninitialized);
}
static void gen_arraycopy_barrier(oop* dest, size_t count) {
assert(count != 0, "count should be non-zero");
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)dest, count);
}
JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
#endif // !PRODUCT
Copy::conjoint_jbytes_atomic(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
#endif // !PRODUCT
Copy::conjoint_jshorts_atomic(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
Copy::conjoint_jints_atomic(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jlong_array_copy_ctr++; // Slow-path long/double array copy
#endif // !PRODUCT
Copy::conjoint_jlongs_atomic(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/false);
Copy::conjoint_oops_atomic(src, dest, count);
gen_arraycopy_barrier(dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/true);
Copy::conjoint_oops_atomic(src, dest, count);
gen_arraycopy_barrier(dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
#endif // !PRODUCT
Copy::arrayof_conjoint_jbytes(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
#endif // !PRODUCT
Copy::arrayof_conjoint_jshorts(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
Copy::arrayof_conjoint_jints(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jlong_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
Copy::arrayof_conjoint_jlongs(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/false);
Copy::arrayof_conjoint_oops(src, dest, count);
gen_arraycopy_barrier((oop *) dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/true);
Copy::arrayof_conjoint_oops(src, dest, count);
gen_arraycopy_barrier((oop *) dest, count);
JRT_END
address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
#define RETURN_STUB(xxx_fill) { \
name = #xxx_fill; \
return StubRoutines::xxx_fill(); }
switch (t) {
case T_BYTE:
case T_BOOLEAN:
if (!aligned) RETURN_STUB(jbyte_fill);
RETURN_STUB(arrayof_jbyte_fill);
case T_CHAR:
case T_SHORT:
if (!aligned) RETURN_STUB(jshort_fill);
RETURN_STUB(arrayof_jshort_fill);
case T_INT:
case T_FLOAT:
if (!aligned) RETURN_STUB(jint_fill);
RETURN_STUB(arrayof_jint_fill);
case T_DOUBLE:
case T_LONG:
case T_ARRAY:
case T_OBJECT:
case T_NARROWOOP:
case T_NARROWKLASS:
case T_ADDRESS:
return NULL;
default:
ShouldNotReachHere();
return NULL;
}
#undef RETURN_STUB
}
enum {
COPYFUNC_UNALIGNED = 0,
COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize
COPYFUNC_CONJOINT = 0,
COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend
};
address
StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
int selector =
(aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) +
(disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
#define RETURN_STUB(xxx_arraycopy) { \
name = #xxx_arraycopy; \
return StubRoutines::xxx_arraycopy(); }
#define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
name = #xxx_arraycopy; \
return StubRoutines::xxx_arraycopy(parm); }
switch (t) {
case T_BYTE:
case T_BOOLEAN:
switch (selector) {
case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy);
case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
}
case T_CHAR:
case T_SHORT:
switch (selector) {
case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy);
case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
}
case T_INT:
case T_FLOAT:
switch (selector) {
case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy);
case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy);
}
case T_DOUBLE:
case T_LONG:
switch (selector) {
case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy);
case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy);
case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
}
case T_ARRAY:
case T_OBJECT:
switch (selector) {
case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
}
default:
ShouldNotReachHere();
return NULL;
}
#undef RETURN_STUB
#undef RETURN_STUB_PARM
}
C:\hotspot-69087d08d473\src\share\vm/runtime/stubRoutines.hpp
#ifndef SHARE_VM_RUNTIME_STUBROUTINES_HPP
#define SHARE_VM_RUNTIME_STUBROUTINES_HPP
#include "code/codeBlob.hpp"
#include "memory/allocation.hpp"
#include "runtime/frame.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/top.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "nativeInst_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
class StubRoutines: AllStatic {
public:
enum platform_independent_constants {
max_size_of_parameters = 256 // max. parameter size supported by megamorphic lookups
};
friend class StubGenerator;
#if defined STUBROUTINES_MD_HPP
# include STUBROUTINES_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
# include "stubRoutines_x86_32.hpp"
#elif defined TARGET_ARCH_MODEL_x86_64
# include "stubRoutines_x86_64.hpp"
#elif defined TARGET_ARCH_MODEL_aarch64
# include "stubRoutines_aarch64.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "stubRoutines_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
# include "stubRoutines_zero.hpp"
#elif defined TARGET_ARCH_MODEL_ppc_64
# include "stubRoutines_ppc_64.hpp"
#endif
static jint _verify_oop_count;
static address _verify_oop_subroutine_entry;
static address _call_stub_return_address; // the return PC, when returning to a call stub
static address _call_stub_entry;
static address _forward_exception_entry;
static address _catch_exception_entry;
static address _throw_AbstractMethodError_entry;
static address _throw_IncompatibleClassChangeError_entry;
static address _throw_NullPointerException_at_call_entry;
static address _throw_StackOverflowError_entry;
static address _handler_for_unsafe_access_entry;
static address _atomic_xchg_entry;
static address _atomic_xchg_ptr_entry;
static address _atomic_store_entry;
static address _atomic_store_ptr_entry;
static address _atomic_cmpxchg_entry;
static address _atomic_cmpxchg_ptr_entry;
static address _atomic_cmpxchg_long_entry;
static address _atomic_add_entry;
static address _atomic_add_ptr_entry;
static address _fence_entry;
static address _d2i_wrapper;
static address _d2l_wrapper;
static jint _fpu_cntrl_wrd_std;
static jint _fpu_cntrl_wrd_24;
static jint _fpu_cntrl_wrd_64;
static jint _fpu_cntrl_wrd_trunc;
static jint _mxcsr_std;
static jint _fpu_subnormal_bias1[3];
static jint _fpu_subnormal_bias2[3];
static BufferBlob* _code1; // code buffer for initial routines
static BufferBlob* _code2; // code buffer for all other routines
static address _jbyte_arraycopy;
static address _jshort_arraycopy;
static address _jint_arraycopy;
static address _jlong_arraycopy;
static address _oop_arraycopy, _oop_arraycopy_uninit;
static address _jbyte_disjoint_arraycopy;
static address _jshort_disjoint_arraycopy;
static address _jint_disjoint_arraycopy;
static address _jlong_disjoint_arraycopy;
static address _oop_disjoint_arraycopy, _oop_disjoint_arraycopy_uninit;
static address _arrayof_jbyte_arraycopy;
static address _arrayof_jshort_arraycopy;
static address _arrayof_jint_arraycopy;
static address _arrayof_jlong_arraycopy;
static address _arrayof_oop_arraycopy, _arrayof_oop_arraycopy_uninit;
static address _arrayof_jbyte_disjoint_arraycopy;
static address _arrayof_jshort_disjoint_arraycopy;
static address _arrayof_jint_disjoint_arraycopy;
static address _arrayof_jlong_disjoint_arraycopy;
static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit;
static address _checkcast_arraycopy, _checkcast_arraycopy_uninit;
static address _unsafe_arraycopy;
static address _generic_arraycopy;
static address _jbyte_fill;
static address _jshort_fill;
static address _jint_fill;
static address _arrayof_jbyte_fill;
static address _arrayof_jshort_fill;
static address _arrayof_jint_fill;
static address _zero_aligned_words;
static address _aescrypt_encryptBlock;
static address _aescrypt_decryptBlock;
static address _cipherBlockChaining_encryptAESCrypt;
static address _cipherBlockChaining_decryptAESCrypt;
static address _ghash_processBlocks;
static address _sha1_implCompress;
static address _sha1_implCompressMB;
static address _sha256_implCompress;
static address _sha256_implCompressMB;
static address _sha512_implCompress;
static address _sha512_implCompressMB;
static address _updateBytesCRC32;
static address _crc_table_adr;
static address _multiplyToLen;
static address _squareToLen;
static address _mulAdd;
static address _montgomeryMultiply;
static address _montgomerySquare;
static double (*_intrinsic_log)(double);
static double (*_intrinsic_log10)(double);
static double (*_intrinsic_exp)(double);
static double (*_intrinsic_pow)(double, double);
static double (*_intrinsic_sin)(double);
static double (*_intrinsic_cos)(double);
static double (*_intrinsic_tan)(double);
static address _safefetch32_entry;
static address _safefetch32_fault_pc;
static address _safefetch32_continuation_pc;
static address _safefetchN_entry;
static address _safefetchN_fault_pc;
static address _safefetchN_continuation_pc;
public:
static void initialize1(); // must happen before universe::genesis
static void initialize2(); // must happen after universe::genesis
static bool is_stub_code(address addr) { return contains(addr); }
static bool contains(address addr) {
return
(_code1 != NULL && _code1->blob_contains(addr)) ||
(_code2 != NULL && _code2->blob_contains(addr)) ;
}
static CodeBlob* code1() { return _code1; }
static CodeBlob* code2() { return _code2; }
static jint verify_oop_count() { return _verify_oop_count; }
static jint* verify_oop_count_addr() { return &_verify_oop_count; }
static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; }
static address catch_exception_entry() { return _catch_exception_entry; }
typedef void (*CallStub)(
address link,
intptr_t* result,
BasicType result_type,
Method* method,
address entry_point,
intptr_t* parameters,
int size_of_parameters,
TRAPS
);
static CallStub call_stub() { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); }
static address forward_exception_entry() { return _forward_exception_entry; }
static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; }
static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
static address handler_for_unsafe_access() { return _handler_for_unsafe_access_entry; }
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; }
static address atomic_store_entry() { return _atomic_store_entry; }
static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
static address atomic_cmpxchg_ptr_entry() { return _atomic_cmpxchg_ptr_entry; }
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
static address atomic_add_entry() { return _atomic_add_entry; }
static address atomic_add_ptr_entry() { return _atomic_add_ptr_entry; }
static address fence_entry() { return _fence_entry; }
static address d2i_wrapper() { return _d2i_wrapper; }
static address d2l_wrapper() { return _d2l_wrapper; }
static jint fpu_cntrl_wrd_std() { return _fpu_cntrl_wrd_std; }
static address addr_fpu_cntrl_wrd_std() { return (address)&_fpu_cntrl_wrd_std; }
static address addr_fpu_cntrl_wrd_24() { return (address)&_fpu_cntrl_wrd_24; }
static address addr_fpu_cntrl_wrd_64() { return (address)&_fpu_cntrl_wrd_64; }
static address addr_fpu_cntrl_wrd_trunc() { return (address)&_fpu_cntrl_wrd_trunc; }
static address addr_mxcsr_std() { return (address)&_mxcsr_std; }
static address addr_fpu_subnormal_bias1() { return (address)&_fpu_subnormal_bias1; }
static address addr_fpu_subnormal_bias2() { return (address)&_fpu_subnormal_bias2; }
static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized);
static address jbyte_arraycopy() { return _jbyte_arraycopy; }
static address jshort_arraycopy() { return _jshort_arraycopy; }
static address jint_arraycopy() { return _jint_arraycopy; }
static address jlong_arraycopy() { return _jlong_arraycopy; }
static address oop_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy;
}
static address jbyte_disjoint_arraycopy() { return _jbyte_disjoint_arraycopy; }
static address jshort_disjoint_arraycopy() { return _jshort_disjoint_arraycopy; }
static address jint_disjoint_arraycopy() { return _jint_disjoint_arraycopy; }
static address jlong_disjoint_arraycopy() { return _jlong_disjoint_arraycopy; }
static address oop_disjoint_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy;
}
static address arrayof_jbyte_arraycopy() { return _arrayof_jbyte_arraycopy; }
static address arrayof_jshort_arraycopy() { return _arrayof_jshort_arraycopy; }
static address arrayof_jint_arraycopy() { return _arrayof_jint_arraycopy; }
static address arrayof_jlong_arraycopy() { return _arrayof_jlong_arraycopy; }
static address arrayof_oop_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy;
}
static address arrayof_jbyte_disjoint_arraycopy() { return _arrayof_jbyte_disjoint_arraycopy; }
static address arrayof_jshort_disjoint_arraycopy() { return _arrayof_jshort_disjoint_arraycopy; }
static address arrayof_jint_disjoint_arraycopy() { return _arrayof_jint_disjoint_arraycopy; }
static address arrayof_jlong_disjoint_arraycopy() { return _arrayof_jlong_disjoint_arraycopy; }
static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy;
}
static address checkcast_arraycopy(bool dest_uninitialized = false) {
return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy;
}
static address unsafe_arraycopy() { return _unsafe_arraycopy; }
static address generic_arraycopy() { return _generic_arraycopy; }
static address jbyte_fill() { return _jbyte_fill; }
static address jshort_fill() { return _jshort_fill; }
static address jint_fill() { return _jint_fill; }
static address arrayof_jbyte_fill() { return _arrayof_jbyte_fill; }
static address arrayof_jshort_fill() { return _arrayof_jshort_fill; }
static address arrayof_jint_fill() { return _arrayof_jint_fill; }
static address aescrypt_encryptBlock() { return _aescrypt_encryptBlock; }
static address aescrypt_decryptBlock() { return _aescrypt_decryptBlock; }
static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; }
static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; }
static address ghash_processBlocks() { return _ghash_processBlocks; }
static address sha1_implCompress() { return _sha1_implCompress; }
static address sha1_implCompressMB() { return _sha1_implCompressMB; }
static address sha256_implCompress() { return _sha256_implCompress; }
static address sha256_implCompressMB() { return _sha256_implCompressMB; }
static address sha512_implCompress() { return _sha512_implCompress; }
static address sha512_implCompressMB() { return _sha512_implCompressMB; }
static address updateBytesCRC32() { return _updateBytesCRC32; }
static address crc_table_addr() { return _crc_table_adr; }
static address multiplyToLen() {return _multiplyToLen; }
static address squareToLen() {return _squareToLen; }
static address mulAdd() {return _mulAdd; }
static address montgomeryMultiply() { return _montgomeryMultiply; }
static address montgomerySquare() { return _montgomerySquare; }
static address select_fill_function(BasicType t, bool aligned, const char* &name);
static address zero_aligned_words() { return _zero_aligned_words; }
static double intrinsic_log(double d) {
assert(_intrinsic_log != NULL, "must be defined");
return _intrinsic_log(d);
}
static double intrinsic_log10(double d) {
assert(_intrinsic_log != NULL, "must be defined");
return _intrinsic_log10(d);
}
static double intrinsic_exp(double d) {
assert(_intrinsic_exp != NULL, "must be defined");
return _intrinsic_exp(d);
}
static double intrinsic_pow(double d, double d2) {
assert(_intrinsic_pow != NULL, "must be defined");
return _intrinsic_pow(d, d2);
}
static double intrinsic_sin(double d) {
assert(_intrinsic_sin != NULL, "must be defined");
return _intrinsic_sin(d);
}
static double intrinsic_cos(double d) {
assert(_intrinsic_cos != NULL, "must be defined");
return _intrinsic_cos(d);
}
static double intrinsic_tan(double d) {
assert(_intrinsic_tan != NULL, "must be defined");
return _intrinsic_tan(d);
}
typedef int (*SafeFetch32Stub)(int* adr, int errValue);
typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue);
static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); }
static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); }
static bool is_safefetch_fault(address pc) {
return pc != NULL &&
(pc == _safefetch32_fault_pc ||
pc == _safefetchN_fault_pc);
}
static address continuation_for_safefetch_fault(address pc) {
assert(_safefetch32_continuation_pc != NULL &&
_safefetchN_continuation_pc != NULL,
"not initialized");
if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc;
if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc;
ShouldNotReachHere();
return NULL;
}
static void jbyte_copy (jbyte* src, jbyte* dest, size_t count);
static void jshort_copy (jshort* src, jshort* dest, size_t count);
static void jint_copy (jint* src, jint* dest, size_t count);
static void jlong_copy (jlong* src, jlong* dest, size_t count);
static void oop_copy (oop* src, oop* dest, size_t count);
static void oop_copy_uninit(oop* src, oop* dest, size_t count);
static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count);
static void arrayof_jshort_copy (HeapWord* src, HeapWord* dest, size_t count);
static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count);
static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count);
static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count);
static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
};
inline int SafeFetch32(int* adr, int errValue) {
assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
return StubRoutines::SafeFetch32_stub()(adr, errValue);
}
inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
return StubRoutines::SafeFetchN_stub()(adr, errValue);
}
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/sweeper.cpp
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileBroker.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "utilities/events.hpp"
#include "utilities/ticks.hpp"
#include "utilities/xmlstream.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#ifdef ASSERT
#define SWEEP(nm) record_sweep(nm, __LINE__)
class SweeperRecord {
public:
int traversal;
int invocation;
int compile_id;
long traversal_mark;
int state;
const char* kind;
address vep;
address uep;
int line;
void print() {
tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
PTR_FORMAT " state = %d traversal_mark %d line = %d",
traversal,
invocation,
compile_id,
kind == NULL ? "" : kind,
uep,
vep,
state,
traversal_mark,
line);
}
};
static int _sweep_index = 0;
static SweeperRecord* _records = NULL;
void NMethodSweeper::report_events(int id, address entry) {
if (_records != NULL) {
for (int i = _sweep_index; i < SweeperLogEntries; i++) {
if (_records[i].uep == entry ||
_records[i].vep == entry ||
_records[i].compile_id == id) {
_records[i].print();
}
}
for (int i = 0; i < _sweep_index; i++) {
if (_records[i].uep == entry ||
_records[i].vep == entry ||
_records[i].compile_id == id) {
_records[i].print();
}
}
}
}
void NMethodSweeper::report_events() {
if (_records != NULL) {
for (int i = _sweep_index; i < SweeperLogEntries; i++) {
if (_records[i].vep == NULL) continue;
_records[i].print();
}
for (int i = 0; i < _sweep_index; i++) {
if (_records[i].vep == NULL) continue;
_records[i].print();
}
}
}
void NMethodSweeper::record_sweep(nmethod* nm, int line) {
if (_records != NULL) {
_records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
_records[_sweep_index].invocation = _sweep_fractions_left;
_records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state;
_records[_sweep_index].vep = nm->verified_entry_point();
_records[_sweep_index].uep = nm->entry_point();
_records[_sweep_index].line = line;
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
}
}
#else
#define SWEEP(nm)
#endif
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
int NMethodSweeper::_hotness_counter_reset_val = 0;
long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
class MarkActivationClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
if (nm->is_not_entrant()) {
nm->mark_as_seen_on_stack();
}
}
}
};
static MarkActivationClosure mark_activation_closure;
class SetHotnessClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
}
}
};
static SetHotnessClosure set_hotness_closure;
int NMethodSweeper::hotness_counter_reset_val() {
if (_hotness_counter_reset_val == 0) {
_hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
}
return _hotness_counter_reset_val;
}
bool NMethodSweeper::sweep_in_progress() {
return (_current != NULL);
}
void NMethodSweeper::mark_active_nmethods() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (!MethodFlushing) {
return;
}
_time_counter++;
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
if (!sweep_in_progress()) {
_seen = 0;
_sweep_fractions_left = NmethodSweepFraction;
_current = CodeCache::first_nmethod();
_traversals += 1;
_total_time_this_sweep = Tickspan();
if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals);
}
Threads::nmethods_do(&mark_activation_closure);
} else {
Threads::nmethods_do(&set_hotness_closure);
}
OrderAccess::storestore();
}
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
return;
}
if (!_should_sweep) {
const int time_since_last_sweep = _time_counter - _last_sweep;
const int max_wait_time = ReservedCodeCacheSize / (16 * M);
double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
_should_sweep = true;
}
}
if (_should_sweep && _sweep_fractions_left > 0) {
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
return;
}
#ifdef ASSERT
if (LogSweeper && _records == NULL) {
_records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
}
#endif
if (_sweep_fractions_left > 0) {
sweep_code_cache();
_sweep_fractions_left--;
}
if (_sweep_fractions_left == 0) {
_total_nof_code_cache_sweeps++;
_last_sweep = _time_counter;
_should_sweep = false;
possibly_enable_sweeper();
if (_should_sweep) {
_bytes_changed = 0;
}
}
OrderAccess::release_store((int*)&_sweep_started, 0);
}
}
static void post_sweep_event(EventSweepCodeCache* event,
const Ticks& start,
const Ticks& end,
s4 traversals,
int swept,
int flushed,
int zombified) {
assert(event != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_starttime(start);
event->set_endtime(end);
event->set_sweepId(traversals);
event->set_sweptCount(swept);
event->set_flushedCount(flushed);
event->set_zombifiedCount(zombified);
event->commit();
}
void NMethodSweeper::sweep_code_cache() {
ResourceMark rm;
Ticks sweep_start_counter = Ticks::now();
_flushed_count = 0;
_zombified_count = 0;
_marked_for_reclamation_count = 0;
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
if (!CompileBroker::should_compile_new_jobs()) {
_sweep_fractions_left = 1;
}
int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
int swept_count = 0;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
int freed_memory = 0;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
swept_count++;
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
assert(Thread::current()->is_Java_thread(), "should be java thread");
JavaThread* thread = (JavaThread*)Thread::current();
ThreadBlockInVM tbivm(thread);
thread->java_suspend_self();
}
nmethod* next = CodeCache::next_nmethod(_current);
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
freed_memory += process_nmethod(_current);
}
_seen++;
_current = next;
}
}
assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
const Ticks sweep_end_counter = Ticks::now();
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
_total_time_sweeping += sweep_time;
_total_time_this_sweep += sweep_time;
_peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
_total_flushed_size += freed_memory;
_total_nof_methods_reclaimed += _flushed_count;
EventSweepCodeCache event(UNTIMED);
if (event.should_commit()) {
post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, _flushed_count, _zombified_count);
}
#ifdef ASSERT
if(PrintMethodFlushing) {
tty->print_cr("### sweeper: sweep time(%d): "
INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
}
#endif
if (_sweep_fractions_left == 1) {
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
log_sweep("finished");
}
if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
log_sweep("restart_compiler");
}
}
void NMethodSweeper::report_state_change(nmethod* nm) {
_bytes_changed += nm->total_size();
possibly_enable_sweeper();
}
void NMethodSweeper::possibly_enable_sweeper() {
double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
if (percent_changed > 1.0) {
_should_sweep = true;
}
}
class NMethodMarker: public StackObj {
private:
CompilerThread* _thread;
public:
NMethodMarker(nmethod* nm) {
_thread = CompilerThread::current();
if (!nm->is_zombie() && !nm->is_unloaded()) {
_thread->set_scanned_nmethod(nm);
}
}
~NMethodMarker() {
_thread->set_scanned_nmethod(NULL);
}
};
void NMethodSweeper::release_nmethod(nmethod *nm) {
{
ResourceMark rm;
MutexLocker ml_patch(CompiledIC_lock);
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
}
}
}
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush();
}
int NMethodSweeper::process_nmethod(nmethod *nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
int freed_memory = 0;
NMethodMarker nmm(nm);
SWEEP(nm);
if (nm->is_locked_by_vm()) {
if (nm->is_alive()) {
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
return freed_memory;
}
if (nm->is_zombie()) {
if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
freed_memory = nm->total_size();
if (nm->is_compiled_by_c2()) {
_total_nof_c2_methods_reclaimed++;
}
release_nmethod(nm);
_flushed_count++;
} else {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
_bytes_changed += nm->total_size();
_marked_for_reclamation_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
if (nm->can_convert_to_zombie()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
MutexLocker cl(CompiledIC_lock);
nm->clear_ic_stubs();
nm->make_zombie();
_zombified_count++;
SWEEP(nm);
} else {
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
} else if (nm->is_unloaded()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
}
if (nm->is_osr_method()) {
SWEEP(nm);
freed_memory = nm->total_size();
if (nm->is_compiled_by_c2()) {
_total_nof_c2_methods_reclaimed++;
}
release_nmethod(nm);
_flushed_count++;
} else {
{
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
nm->make_zombie();
_zombified_count++;
SWEEP(nm);
}
} else {
if (UseCodeCacheFlushing) {
if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
nm->dec_hotness_counter();
int reset_val = hotness_counter_reset_val();
int time_since_reset = reset_val - nm->hotness_counter();
double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
nm->make_not_entrant();
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
}
}
}
}
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
return freed_memory;
}
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
if (PrintMethodFlushing) {
ResourceMark rm;
stringStream s;
CodeCache::log_state(&s);
ttyLocker ttyl;
tty->print("### sweeper: %s ", msg);
if (format != NULL) {
va_list ap;
va_start(ap, format);
tty->vprint(format, ap);
va_end(ap);
}
tty->print_cr("%s", s.as_string());
}
if (LogCompilation && (xtty != NULL)) {
ResourceMark rm;
stringStream s;
CodeCache::log_state(&s);
ttyLocker ttyl;
xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
if (format != NULL) {
va_list ap;
va_start(ap, format);
xtty->vprint(format, ap);
va_end(ap);
}
xtty->print("%s", s.as_string());
xtty->stamp();
xtty->end_elem();
}
}
void NMethodSweeper::print() {
ttyLocker ttyl;
tty->print_cr("Code cache sweeper statistics:");
tty->print_cr(" Total sweep time: %1.0lfms", (double)_total_time_sweeping.value()/1000000);
tty->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps);
tty->print_cr(" Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
_total_nof_c2_methods_reclaimed);
tty->print_cr(" Total size of flushed methods: " SIZE_FORMAT "kB", _total_flushed_size/K);
}
C:\hotspot-69087d08d473\src\share\vm/runtime/sweeper.hpp
#ifndef SHARE_VM_RUNTIME_SWEEPER_HPP
#define SHARE_VM_RUNTIME_SWEEPER_HPP
#include "utilities/ticks.hpp"
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack scan count, also sweep ID.
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _flushed_count; // Nof. nmethods flushed in current sweep
static int _zombified_count; // Nof. nmethods made zombie in current sweep
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
static long _total_nof_methods_reclaimed; // Accumulated nof methods flushed
static long _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
static size_t _total_flushed_size; // Total size of flushed methods
static int _hotness_counter_reset_val;
static Tickspan _total_time_sweeping; // Accumulated time sweeping
static Tickspan _total_time_this_sweep; // Total time this sweep
static Tickspan _peak_sweep_time; // Peak time for a full sweep
static Tickspan _peak_sweep_fraction_time; // Peak time sweeping one fraction
static int process_nmethod(nmethod *nm);
static void release_nmethod(nmethod* nm);
static bool sweep_in_progress();
static void sweep_code_cache();
public:
static long traversal_count() { return _traversals; }
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
static const Tickspan total_time_sweeping() { return _total_time_sweeping; }
static const Tickspan peak_sweep_time() { return _peak_sweep_time; }
static const Tickspan peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
static void log_sweep(const char* msg, const char* format = NULL, ...) ATTRIBUTE_PRINTF(2, 3);
#ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current == which; }
static void record_sweep(nmethod* nm, int line);
static void report_events(int id, address entry);
static void report_events();
#endif
static void mark_active_nmethods(); // Invoked at the end of each safepoint
static void possibly_sweep(); // Compiler threads call this to sweep
static int hotness_counter_reset_val();
static void report_state_change(nmethod* nm);
static void possibly_enable_sweeper();
static void print(); // Printing/debugging
};
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/synchronizer.cpp
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/preserveException.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
#if defined(__GNUC__) && !defined(PPC64)
#define ATTR __attribute__((noinline))
#else
#define ATTR
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#ifdef DTRACE_ENABLED
#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
char* bytes = NULL; \
int len = 0; \
jlong jtid = SharedRuntime::get_java_tid(thread); \
Symbol* klassname = ((oop)(obj))->klass()->name(); \
if (klassname != NULL) { \
bytes = (char*)klassname->bytes(); \
len = klassname->utf8_length(); \
}
#ifndef USDT2
HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
jlong, uintptr_t, char*, int, long);
HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
jlong, uintptr_t, char*, int);
#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
{ \
if (DTraceMonitorProbes) { \
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
(monitor), bytes, len, (millis)); \
} \
}
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
{ \
if (DTraceMonitorProbes) { \
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
(uintptr_t)(monitor), bytes, len); \
} \
}
#else /* USDT2 */
#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
{ \
if (DTraceMonitorProbes) { \
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
HOTSPOT_MONITOR_WAIT(jtid, \
(uintptr_t)(monitor), bytes, len, (millis)); \
} \
}
#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
{ \
if (DTraceMonitorProbes) { \
DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
(uintptr_t)(monitor), bytes, len); \
} \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED
#define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
#define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
#endif // ndef DTRACE_ENABLED
int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
return 0;
}
#define NINFLATIONLOCKS 256
static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
int ObjectSynchronizer::gOmInUseCount = 0;
static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
static volatile int MonitorFreeCount = 0 ; // # on gFreeList
static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
if (UseBiasedLocking) {
if (!SafepointSynchronize::is_at_safepoint()) {
BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
return;
}
} else {
assert(!attempt_rebias, "can not rebias toward VM thread");
BiasedLocking::revoke_at_safepoint(obj);
}
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
slow_enter (obj, lock, THREAD) ;
}
void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
markOop dhw = lock->displaced_header();
markOop mark ;
if (dhw == NULL) {
mark = object->mark() ;
assert (!mark->is_neutral(), "invariant") ;
if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
}
if (mark->has_monitor()) {
ObjectMonitor * m = mark->monitor() ;
assert(((oop)(m->object()))->mark() == mark, "invariant") ;
assert(m->is_entered(THREAD), "invariant") ;
}
return ;
}
mark = object->mark() ;
if (mark == (markOop) lock) {
assert (dhw->is_neutral(), "invariant") ;
if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
TEVENT (fast_exit: release stacklock) ;
return;
}
}
ObjectSynchronizer::inflate(THREAD,
object,
inflate_cause_vm_internal)->exit(true, THREAD);
}
void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
markOop mark = obj->mark();
assert(!mark->has_bias_pattern(), "should not see bias pattern here");
if (mark->is_neutral()) {
lock->set_displaced_header(mark);
if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
TEVENT (slow_enter: release stacklock) ;
return ;
}
} else
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
assert(lock != mark->locker(), "must not re-lock the same lock");
assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
lock->set_displaced_header(NULL);
return;
}
#if 0
if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
lock->set_displaced_header (NULL) ;
return ;
}
#endif
lock->set_displaced_header(markOopDesc::unused_mark());
ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_monitor_enter)->enter(THREAD);
}
void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
fast_exit (object, lock, THREAD) ;
}
intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
TEVENT (complete_exit) ;
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_vm_internal);
return monitor->complete_exit(THREAD);
}
void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
TEVENT (reenter) ;
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_vm_internal);
monitor->reenter(recursion, THREAD);
}
void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
TEVENT (jni_enter) ;
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
THREAD->set_current_pending_monitor_is_from_java(false);
ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
THREAD->set_current_pending_monitor_is_from_java(true);
}
bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
return monitor->try_enter(THREAD);
}
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
TEVENT (jni_exit) ;
if (UseBiasedLocking) {
Handle h_obj(THREAD, obj);
BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
obj = h_obj();
}
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
obj,
inflate_cause_jni_exit);
if (monitor->check(THREAD)) {
monitor->exit(true, THREAD);
}
}
ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
_dolock = doLock;
_thread = thread;
debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
_obj = obj;
if (_dolock) {
TEVENT (ObjectLocker) ;
ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
}
}
ObjectLocker::~ObjectLocker() {
if (_dolock) {
ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
}
}
void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
TEVENT (wait - throw IAX) ;
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
}
ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_wait);
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
monitor->wait(millis, true, THREAD);
that's fixed we can uncomment the following line and remove the call */
dtrace_waited_probe(monitor, obj, THREAD);
}
void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
TEVENT (wait - throw IAX) ;
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
}
ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_wait)->wait(millis, false, THREAD) ;
}
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
markOop mark = obj->mark();
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
return;
}
ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_notify)->notify(THREAD);
}
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
markOop mark = obj->mark();
if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
return;
}
ObjectSynchronizer::inflate(THREAD,
obj(),
inflate_cause_notify)->notifyAll(THREAD);
}
struct SharedGlobals {
double padPrefix [8];
volatile int stwRandom ;
volatile int stwCycle ;
double padSuffix [16];
volatile int hcSequence ;
double padFinal [8] ;
} ;
static SharedGlobals GVars ;
static int MonitorScavengeThreshold = 1000000 ;
static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
static markOop ReadStableMark (oop obj) {
markOop mark = obj->mark() ;
if (!mark->is_being_inflated()) {
return mark ; // normal fast-path return
}
int its = 0 ;
for (;;) {
markOop mark = obj->mark() ;
if (!mark->is_being_inflated()) {
return mark ; // normal fast-path return
}
++its ;
if (its > 10000 || !os::is_MP()) {
if (its & 1) {
os::NakedYield() ;
TEVENT (Inflate: INFLATING - yield) ;
} else {
int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
int YieldThenBlock = 0 ;
assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
while (obj->mark() == markOopDesc::INFLATING()) {
if ((YieldThenBlock++) >= 16) {
Thread::current()->_ParkEvent->park(1) ;
} else {
os::NakedYield() ;
}
}
Thread::muxRelease (InflationLocks + ix ) ;
TEVENT (Inflate: INFLATING - yield/park) ;
}
} else {
SpinPause() ; // SMP-polite spinning
}
}
}
static inline intptr_t get_next_hash(Thread * Self, oop obj) {
intptr_t value = 0 ;
if (hashCode == 0) {
value = os::random() ;
} else
if (hashCode == 1) {
intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ;
value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
} else
if (hashCode == 2) {
value = 1 ; // for sensitivity testing
} else
if (hashCode == 3) {
value = ++GVars.hcSequence ;
} else
if (hashCode == 4) {
value = cast_from_oop<intptr_t>(obj) ;
} else {
unsigned t = Self->_hashStateX ;
t ^= (t << 11) ;
Self->_hashStateX = Self->_hashStateY ;
Self->_hashStateY = Self->_hashStateZ ;
Self->_hashStateZ = Self->_hashStateW ;
unsigned v = Self->_hashStateW ;
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
Self->_hashStateW = v ;
value = v ;
}
value &= markOopDesc::hash_mask;
if (value == 0) value = 0xBAD ;
assert (value != markOopDesc::no_hash, "invariant") ;
TEVENT (hashCode: GENERATE) ;
return value;
}
intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
if (UseBiasedLocking) {
if (obj->mark()->has_bias_pattern()) {
Handle hobj (Self, obj) ;
assert (Universe::verify_in_progress() ||
!SafepointSynchronize::is_at_safepoint(),
"biases should not be seen by VM thread here");
BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
obj = hobj() ;
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
}
assert (Universe::verify_in_progress() ||
!SafepointSynchronize::is_at_safepoint(), "invariant") ;
assert (Universe::verify_in_progress() ||
Self->is_Java_thread() , "invariant") ;
assert (Universe::verify_in_progress() ||
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
ObjectMonitor* monitor = NULL;
markOop temp, test;
intptr_t hash;
markOop mark = ReadStableMark (obj);
assert (!mark->has_bias_pattern(), "invariant") ;
if (mark->is_neutral()) {
hash = mark->hash(); // this is a normal header
if (hash) { // if it has hash, just return it
return hash;
}
hash = get_next_hash(Self, obj); // allocate a new hash code
temp = mark->copy_set_hash(hash); // merge the hash code into header
test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
if (test == mark) {
return hash;
}
} else if (mark->has_monitor()) {
monitor = mark->monitor();
temp = monitor->header();
assert (temp->is_neutral(), "invariant") ;
hash = temp->hash();
if (hash) {
return hash;
}
} else if (Self->is_lock_owned((address)mark->locker())) {
temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
assert (temp->is_neutral(), "invariant") ;
hash = temp->hash(); // by current thread, check if the displaced
if (hash) { // header contains hash code
return hash;
}
}
monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
mark = monitor->header();
assert (mark->is_neutral(), "invariant") ;
hash = mark->hash();
if (hash == 0) {
hash = get_next_hash(Self, obj);
temp = mark->copy_set_hash(hash); // merge hash code into header
assert (temp->is_neutral(), "invariant") ;
test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
if (test != mark) {
hash = test->hash();
assert (test->is_neutral(), "invariant") ;
assert (hash != 0, "Trivial unexpected object/monitor header usage.");
}
}
return hash;
}
intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
return FastHashCode (Thread::current(), obj()) ;
}
bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
Handle h_obj) {
if (UseBiasedLocking) {
BiasedLocking::revoke_and_rebias(h_obj, false, thread);
assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
assert(thread == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
markOop mark = ReadStableMark (obj) ;
if (mark->has_locker()) {
return thread->is_lock_owned((address)mark->locker());
}
if (mark->has_monitor()) {
ObjectMonitor* monitor = mark->monitor();
return monitor->is_entered(thread) != 0 ;
}
assert(mark->is_neutral(), "sanity check");
return false;
}
ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
(JavaThread *self, Handle h_obj) {
assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
assert (self->thread_state() != _thread_blocked , "invariant") ;
if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
BiasedLocking::revoke_and_rebias(h_obj, false, self);
assert(!h_obj->mark()->has_bias_pattern(),
"biases should be revoked by now");
}
assert(self == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
markOop mark = ReadStableMark (obj) ;
if (mark->has_locker()) {
return self->is_lock_owned((address)mark->locker()) ?
owner_self : owner_other;
}
if (mark->has_monitor()) {
void * owner = mark->monitor()->_owner ;
if (owner == NULL) return owner_none ;
return (owner == self ||
self->is_lock_owned((address)owner)) ? owner_self : owner_other;
}
assert(mark->is_neutral(), "sanity check");
return owner_none ; // it's unlocked
}
JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
if (UseBiasedLocking) {
if (SafepointSynchronize::is_at_safepoint()) {
BiasedLocking::revoke_at_safepoint(h_obj);
} else {
BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
}
assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
}
oop obj = h_obj();
address owner = NULL;
markOop mark = ReadStableMark (obj) ;
if (mark->has_locker()) {
owner = (address) mark->locker();
}
if (mark->has_monitor()) {
ObjectMonitor* monitor = mark->monitor();
assert(monitor != NULL, "monitor should be non-null");
owner = (address) monitor->owner();
}
if (owner != NULL) {
return Threads::owning_thread_from_monitor_owner(owner, doLock);
}
return NULL;
}
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
ObjectMonitor* block =
(ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) {
ObjectMonitor* mid = (ObjectMonitor *)(block + i);
oop object = (oop)mid->object();
if (object != NULL) {
closure->do_monitor(mid);
}
}
block = (ObjectMonitor*)block->FreeNext;
}
}
static inline ObjectMonitor* next(ObjectMonitor* block) {
assert(block->object() == CHAINMARKER, "must be a block header");
block = block->FreeNext ;
assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
return block;
}
void ObjectSynchronizer::oops_do(OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
ObjectMonitor* block =
(ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
for (; block != NULL; block = (ObjectMonitor *)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = &block[i];
if (mid->object() != NULL) {
f->do_oop((oop*)mid->object_addr());
}
}
}
}
static void InduceScavenge (Thread * Self, const char * Whence) {
if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
if (ObjectMonitor::Knob_Verbose) {
::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
::fflush(stdout) ;
}
VMThread::execute (new VM_ForceAsyncSafepoint()) ;
if (ObjectMonitor::Knob_Verbose) {
::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
::fflush(stdout) ;
}
}
}
void ObjectSynchronizer::verifyInUse (Thread *Self) {
ObjectMonitor* mid;
int inusetally = 0;
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
inusetally ++;
}
assert(inusetally == Self->omInUseCount, "inuse count off");
int freetally = 0;
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
freetally ++;
}
assert(freetally == Self->omFreeCount, "free count off");
}
ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
const int MAXPRIVATE = 1024 ;
for (;;) {
ObjectMonitor * m ;
m = Self->omFreeList ;
if (m != NULL) {
Self->omFreeList = m->FreeNext ;
Self->omFreeCount -- ;
guarantee (m->object() == NULL, "invariant") ;
if (MonitorInUseLists) {
m->FreeNext = Self->omInUseList;
Self->omInUseList = m;
Self->omInUseCount ++;
} else {
m->FreeNext = NULL;
}
return m ;
}
if (gFreeList != NULL) {
Thread::muxAcquire (&ListLock, "omAlloc") ;
for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
MonitorFreeCount --;
ObjectMonitor * take = gFreeList ;
gFreeList = take->FreeNext ;
guarantee (take->object() == NULL, "invariant") ;
guarantee (!take->is_busy(), "invariant") ;
take->Recycle() ;
omRelease (Self, take, false) ;
}
Thread::muxRelease (&ListLock) ;
Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
TEVENT (omFirst - reprovision) ;
const int mx = MonitorBound ;
if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
InduceScavenge (Self, "omAlloc") ;
}
continue;
}
assert (_BLOCKSIZE > 1, "invariant") ;
ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
if (temp == NULL) {
vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
"Allocate ObjectMonitors");
}
for (int i = 1; i < _BLOCKSIZE ; i++) {
temp[i].FreeNext = &temp[i+1];
}
temp[_BLOCKSIZE - 1].FreeNext = NULL ;
temp[0].set_object(CHAINMARKER);
Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
MonitorPopulation += _BLOCKSIZE-1;
MonitorFreeCount += _BLOCKSIZE-1;
temp[0].FreeNext = gBlockList;
OrderAccess::release_store_ptr(&gBlockList, temp);
temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
gFreeList = temp + 1;
Thread::muxRelease (&ListLock) ;
TEVENT (Allocate block of monitors) ;
}
}
void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
guarantee (m->object() == NULL, "invariant") ;
if (MonitorInUseLists && fromPerThreadAlloc) {
ObjectMonitor* curmidinuse = NULL;
for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
if (m == mid) {
if (mid == Self->omInUseList) {
Self->omInUseList = mid->FreeNext;
} else if (curmidinuse != NULL) {
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
}
Self->omInUseCount --;
break;
} else {
curmidinuse = mid;
mid = mid->FreeNext;
}
}
}
m->FreeNext = Self->omFreeList ;
Self->omFreeList = m ;
Self->omFreeCount ++ ;
}
void ObjectSynchronizer::omFlush (Thread * Self) {
ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
Self->omFreeList = NULL ;
ObjectMonitor * Tail = NULL ;
int Tally = 0;
if (List != NULL) {
ObjectMonitor * s ;
for (s = List ; s != NULL ; s = s->FreeNext) {
Tally ++ ;
Tail = s ;
guarantee (s->object() == NULL, "invariant") ;
guarantee (!s->is_busy(), "invariant") ;
s->set_owner (NULL) ; // redundant but good hygiene
TEVENT (omFlush - Move one) ;
}
guarantee (Tail != NULL && List != NULL, "invariant") ;
}
ObjectMonitor * InUseList = Self->omInUseList;
ObjectMonitor * InUseTail = NULL ;
int InUseTally = 0;
if (InUseList != NULL) {
Self->omInUseList = NULL;
ObjectMonitor *curom;
for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
InUseTail = curom;
InUseTally++;
}
assert(Self->omInUseCount == InUseTally, "inuse count off");
Self->omInUseCount = 0;
guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
}
Thread::muxAcquire (&ListLock, "omFlush") ;
if (Tail != NULL) {
Tail->FreeNext = gFreeList ;
gFreeList = List ;
MonitorFreeCount += Tally;
}
if (InUseTail != NULL) {
InUseTail->FreeNext = gOmInUseList;
gOmInUseList = InUseList;
gOmInUseCount += InUseTally;
}
Thread::muxRelease (&ListLock) ;
TEVENT (omFlush) ;
}
const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
switch (cause) {
case inflate_cause_vm_internal: return "VM Internal";
case inflate_cause_monitor_enter: return "Monitor Enter";
case inflate_cause_wait: return "Monitor Wait";
case inflate_cause_notify: return "Monitor Notify";
case inflate_cause_hash_code: return "Monitor Hash Code";
case inflate_cause_jni_enter: return "JNI Monitor Enter";
case inflate_cause_jni_exit: return "JNI Monitor Exit";
default:
ShouldNotReachHere();
}
return "Unknown";
}
static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
const oop obj,
const ObjectSynchronizer::InflateCause cause) {
assert(event != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_monitorClass(obj->klass());
event->set_address((uintptr_t)(void*)obj);
event->set_cause((u1)cause);
event->commit();
}
ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
markOop mark = obj->mark();
if (mark->has_monitor()) {
assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
return mark->monitor();
}
return ObjectSynchronizer::inflate(Thread::current(),
obj,
inflate_cause_vm_internal);
}
ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self,
oop object,
const InflateCause cause) {
assert (Universe::verify_in_progress() ||
!SafepointSynchronize::is_at_safepoint(), "invariant") ;
EventJavaMonitorInflate event;
for (;;) {
const markOop mark = object->mark() ;
assert (!mark->has_bias_pattern(), "invariant") ;
if (mark->has_monitor()) {
ObjectMonitor * inf = mark->monitor() ;
assert (inf->header()->is_neutral(), "invariant");
assert (inf->object() == object, "invariant") ;
assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
return inf ;
}
if (mark == markOopDesc::INFLATING()) {
TEVENT (Inflate: spin while INFLATING) ;
ReadStableMark(object) ;
continue ;
}
if (mark->has_locker()) {
ObjectMonitor * m = omAlloc (Self) ;
m->Recycle();
m->_Responsible = NULL ;
m->OwnerIsThread = 0 ;
m->_recursions = 0 ;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
if (cmp != mark) {
omRelease (Self, m, true) ;
continue ; // Interference -- just retry
}
markOop dmw = mark->displaced_mark_helper() ;
assert (dmw->is_neutral(), "invariant") ;
m->set_header(dmw) ;
m->set_owner(mark->locker());
m->set_object(object);
guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
object->release_set_mark(markOopDesc::encode(m));
if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
TEVENT(Inflate: overwrite stacklock) ;
if (TraceMonitorInflation) {
if (object->is_instance()) {
ResourceMark rm;
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
(void *) object, (intptr_t) object->mark(),
object->klass()->external_name());
}
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
}
return m ;
}
assert (mark->is_neutral(), "invariant");
ObjectMonitor * m = omAlloc (Self) ;
m->Recycle();
m->set_header(mark);
m->set_owner(NULL);
m->set_object(object);
m->OwnerIsThread = 1 ;
m->_recursions = 0 ;
m->_Responsible = NULL ;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
m->set_object (NULL) ;
m->set_owner (NULL) ;
m->OwnerIsThread = 0 ;
m->Recycle() ;
omRelease (Self, m, true) ;
m = NULL ;
continue ;
}
if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
TEVENT(Inflate: overwrite neutral) ;
if (TraceMonitorInflation) {
if (object->is_instance()) {
ResourceMark rm;
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
(void *) object, (intptr_t) object->mark(),
object->klass()->external_name());
}
}
if (event.should_commit()) {
post_monitor_inflate_event(&event, object, cause);
}
return m ;
}
}
enum ManifestConstants {
ClearResponsibleAtSTW = 0,
MaximumRecheckInterval = 1000
} ;
bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
bool deflated;
guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
guarantee (mid == obj->mark()->monitor(), "invariant");
guarantee (mid->header()->is_neutral(), "invariant");
if (mid->is_busy()) {
if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
deflated = false;
} else {
TEVENT (deflate_idle_monitors - scavenge1) ;
if (TraceMonitorInflation) {
if (obj->is_instance()) {
ResourceMark rm;
tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
(void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
}
}
obj->release_set_mark(mid->header());
mid->clear();
assert (mid->object() == NULL, "invariant") ;
if (*FreeHeadp == NULL) *FreeHeadp = mid;
if (*FreeTailp != NULL) {
ObjectMonitor * prevtail = *FreeTailp;
assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
prevtail->FreeNext = mid;
}
deflated = true;
}
return deflated;
}
int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
ObjectMonitor* mid;
ObjectMonitor* next;
ObjectMonitor* curmidinuse = NULL;
int deflatedcount = 0;
for (mid = *listheadp; mid != NULL; ) {
oop obj = (oop) mid->object();
bool deflated = false;
if (obj != NULL) {
deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
}
if (deflated) {
if (mid == *listheadp) {
} else if (curmidinuse != NULL) {
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
}
next = mid->FreeNext;
mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
mid = next;
deflatedcount++;
} else {
curmidinuse = mid;
mid = mid->FreeNext;
}
}
return deflatedcount;
}
void ObjectSynchronizer::deflate_idle_monitors() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
int nInuse = 0 ; // currently associated with objects
int nInCirculation = 0 ; // extant
int nScavenged = 0 ; // reclaimed
bool deflated = false;
ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
ObjectMonitor * FreeTail = NULL ;
TEVENT (deflate_idle_monitors) ;
Thread::muxAcquire (&ListLock, "scavenge - return") ;
if (MonitorInUseLists) {
int inUse = 0;
for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
nInCirculation+= cur->omInUseCount;
int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
cur->omInUseCount-= deflatedcount;
nScavenged += deflatedcount;
nInuse += cur->omInUseCount;
}
if (gOmInUseList) {
nInCirculation += gOmInUseCount;
int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
gOmInUseCount-= deflatedcount;
nScavenged += deflatedcount;
nInuse += gOmInUseCount;
}
} else {
ObjectMonitor* block =
(ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
for (; block != NULL; block = (ObjectMonitor*)next(block)) {
assert(block->object() == CHAINMARKER, "must be a block header");
nInCirculation += _BLOCKSIZE;
for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = (ObjectMonitor*)&block[i];
oop obj = (oop)mid->object();
if (obj == NULL) {
guarantee(!mid->is_busy(), "invariant");
continue;
}
deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
if (deflated) {
mid->FreeNext = NULL;
nScavenged++;
} else {
nInuse++;
}
}
}
}
MonitorFreeCount += nScavenged;
if (ObjectMonitor::Knob_Verbose) {
::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
MonitorPopulation, MonitorFreeCount) ;
::fflush(stdout) ;
}
ForceMonitorScavenge = 0; // Reset
if (FreeHead != NULL) {
guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
assert (FreeTail->FreeNext == NULL, "invariant") ;
FreeTail->FreeNext = gFreeList ;
gFreeList = FreeHead ;
}
Thread::muxRelease (&ListLock) ;
if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
GVars.stwRandom = os::random() ;
GVars.stwCycle ++ ;
}
class ReleaseJavaMonitorsClosure: public MonitorClosure {
private:
TRAPS;
public:
ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
void do_monitor(ObjectMonitor* mid) {
if (mid->owner() == THREAD) {
(void)mid->complete_exit(CHECK);
}
}
};
void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
assert(THREAD == JavaThread::current(), "must be current Java thread");
No_Safepoint_Verifier nsv ;
ReleaseJavaMonitorsClosure rjmc(THREAD);
Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
ObjectSynchronizer::monitors_iterate(&rjmc);
Thread::muxRelease(&ListLock);
THREAD->clear_pending_exception();
}
void ObjectSynchronizer::sanity_checks(const bool verbose,
const uint cache_line_size,
int *error_cnt_ptr,
int *warning_cnt_ptr) {
u_char *addr_begin = (u_char*)&GVars;
u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
if (verbose) {
tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
sizeof(SharedGlobals));
}
uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
if (verbose) {
tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
}
if (cache_line_size != 0) {
if (offset_stwRandom < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
"to the struct beginning than a cache line which permits "
"false sharing.");
(*warning_cnt_ptr)++;
}
if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
"SharedGlobals.hcSequence fields are closer than a cache "
"line which permits false sharing.");
(*warning_cnt_ptr)++;
}
if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
"to the struct end than a cache line which permits false "
"sharing.");
(*warning_cnt_ptr)++;
}
}
}
#ifndef PRODUCT
void ObjectSynchronizer::verify() {
ObjectMonitor* block =
(ObjectMonitor *)OrderAccess::load_ptr_acquire(&gBlockList);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = 1; i < _BLOCKSIZE; i++) {
ObjectMonitor* mid = (ObjectMonitor *)(block + i);
oop object = (oop)mid->object();
if (object != NULL) {
mid->verify();
}
}
block = (ObjectMonitor*) block->FreeNext;
}
}
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
ObjectMonitor* block =
(ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
address mon = (address)monitor;
address blk = (address)block;
size_t diff = mon - blk;
assert((diff % sizeof(ObjectMonitor)) == 0, "must be aligned");
return 1;
}
block = (ObjectMonitor*)block->FreeNext;
}
return 0;
}
#endif
C:\hotspot-69087d08d473\src\share\vm/runtime/synchronizer.hpp
#ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
#define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "utilities/top.hpp"
class ObjectMonitor;
class ObjectSynchronizer : AllStatic {
friend class VMStructs;
public:
typedef enum {
owner_self,
owner_none,
owner_other
} LockOwnership;
typedef enum {
inflate_cause_vm_internal = 0,
inflate_cause_monitor_enter = 1,
inflate_cause_wait = 2,
inflate_cause_notify = 3,
inflate_cause_hash_code = 4,
inflate_cause_jni_enter = 5,
inflate_cause_jni_exit = 6,
inflate_cause_nof = 7 // Number of causes
} InflateCause;
static void fast_enter (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
static void fast_exit (oop obj, BasicLock* lock, Thread* THREAD);
static void slow_enter (Handle obj, BasicLock* lock, TRAPS);
static void slow_exit (oop obj, BasicLock* lock, Thread* THREAD);
static void jni_enter (Handle obj, TRAPS);
static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter
static void jni_exit (oop obj, Thread* THREAD);
static void wait (Handle obj, jlong millis, TRAPS);
static void notify (Handle obj, TRAPS);
static void notifyall (Handle obj, TRAPS);
static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ;
static intptr_t complete_exit (Handle obj, TRAPS);
static void reenter (Handle obj, intptr_t recursion, TRAPS);
static ObjectMonitor * omAlloc (Thread * Self) ;
static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
static void omFlush (Thread * Self) ;
static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause);
static ObjectMonitor* inflate_helper(oop obj);
static const char* inflate_cause_name(const InflateCause cause);
static intptr_t identity_hash_value_for(Handle obj);
static intptr_t FastHashCode (Thread * Self, oop obj) ;
static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
static void release_monitors_owned_by_thread(TRAPS);
static void monitors_iterate(MonitorClosure* m);
static void deflate_idle_monitors();
static int walk_monitor_list(ObjectMonitor** listheadp,
ObjectMonitor** FreeHeadp,
ObjectMonitor** FreeTailp);
static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
ObjectMonitor** FreeTailp);
static void oops_do(OopClosure* f);
static void sanity_checks(const bool verbose,
const unsigned int cache_line_size,
int *error_cnt_ptr, int *warning_cnt_ptr);
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
private:
enum { _BLOCKSIZE = 128 };
static ObjectMonitor * volatile gBlockList;
static ObjectMonitor * volatile gFreeList;
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
static int gOmInUseCount;
};
class ObjectLocker : public StackObj {
private:
Thread* _thread;
Handle _obj;
BasicLock _lock;
bool _dolock; // default true
public:
ObjectLocker(Handle obj, Thread* thread, bool doLock = true);
~ObjectLocker();
void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);}
intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); }
void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
};
#endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/task.cpp
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/init.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
int PeriodicTask::_num_tasks = 0;
PeriodicTask* PeriodicTask::_tasks[PeriodicTask::max_tasks];
#ifndef PRODUCT
elapsedTimer PeriodicTask::_timer;
int PeriodicTask::_intervalHistogram[PeriodicTask::max_interval];
int PeriodicTask::_ticks;
void PeriodicTask::print_intervals() {
if (ProfilerCheckIntervals) {
for (int i = 0; i < PeriodicTask::max_interval; i++) {
int n = _intervalHistogram[i];
if (n > 0) tty->print_cr("%3d: %5d (%4.1f%%)", i, n, 100.0 * n / _ticks);
}
}
}
#endif
void PeriodicTask::real_time_tick(int delay_time) {
#ifndef PRODUCT
if (ProfilerCheckIntervals) {
_ticks++;
_timer.stop();
int ms = (int)(_timer.seconds() * 1000.0);
_timer.reset();
_timer.start();
if (ms >= PeriodicTask::max_interval) ms = PeriodicTask::max_interval - 1;
_intervalHistogram[ms]++;
}
#endif
{
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
int orig_num_tasks = _num_tasks;
for(int index = 0; index < _num_tasks; index++) {
_tasks[index]->execute_if_pending(delay_time);
if (_num_tasks < orig_num_tasks) { // task dis-enrolled itself
index--; // re-do current slot as it has changed
orig_num_tasks = _num_tasks;
}
}
}
}
int PeriodicTask::time_to_wait() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
if (_num_tasks == 0) {
return 0; // sleep until shutdown or a task is enrolled
}
int delay = _tasks[0]->time_to_next_interval();
for (int index = 1; index < _num_tasks; index++) {
delay = MIN2(delay, _tasks[index]->time_to_next_interval());
}
return delay;
}
PeriodicTask::PeriodicTask(size_t interval_time) :
_counter(0), _interval((int) interval_time) {
assert(_interval >= PeriodicTask::min_interval &&
_interval % PeriodicTask::interval_gran == 0,
"improper PeriodicTask interval time");
}
PeriodicTask::~PeriodicTask() {
disenroll();
}
void PeriodicTask::enroll() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
NULL : PeriodicTask_lock);
if (_num_tasks == PeriodicTask::max_tasks) {
fatal("Overflow in PeriodicTask table");
}
_tasks[_num_tasks++] = this;
WatcherThread* thread = WatcherThread::watcher_thread();
if (thread) {
thread->unpark();
} else {
WatcherThread::start();
}
}
void PeriodicTask::disenroll() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
NULL : PeriodicTask_lock);
int index;
for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
;
if (index == _num_tasks) {
return;
}
_num_tasks--;
for (; index < _num_tasks; index++) {
_tasks[index] = _tasks[index+1];
}
}
C:\hotspot-69087d08d473\src\share\vm/runtime/task.hpp
#ifndef SHARE_VM_RUNTIME_TASK_HPP
#define SHARE_VM_RUNTIME_TASK_HPP
#include "utilities/top.hpp"
class PeriodicTask: public CHeapObj<mtInternal> {
public:
enum { max_tasks = 10, // Max number of periodic tasks in system
interval_gran = 10,
min_interval = 10,
max_interval = 10000 };
static int num_tasks() { return _num_tasks; }
private:
int _counter;
const int _interval;
static int _num_tasks;
static PeriodicTask* _tasks[PeriodicTask::max_tasks];
static void real_time_tick(int delay_time);
#ifndef PRODUCT
static elapsedTimer _timer; // measures time between ticks
static int _ticks; // total number of ticks
static int _intervalHistogram[max_interval]; // to check spacing of timer interrupts
public:
static void print_intervals();
#endif
friend class WatcherThread;
public:
PeriodicTask(size_t interval_time); // interval is in milliseconds of elapsed time
~PeriodicTask();
void enroll();
void disenroll();
void execute_if_pending(int delay_time) {
jlong tmp = (jlong) _counter + (jlong) delay_time;
if (tmp >= (jlong) _interval) {
_counter = 0;
task();
} else {
_counter += delay_time;
}
}
int time_to_next_interval() const {
assert(_interval > _counter, "task counter greater than interval?");
return _interval - _counter;
}
static int time_to_wait();
virtual void task() = 0;
};
#endif // SHARE_VM_RUNTIME_TASK_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/thread.cpp
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jfr/jfrEvents.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvm_misc.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/privilegedStack.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniPeriodicChecker.hpp"
#include "runtime/memprofiler.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/attachListener.hpp"
#include "services/management.hpp"
#include "services/memTracker.hpp"
#include "services/threadService.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/macros.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/parallelScavenge/pcTasks.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2compiler.hpp"
#include "opto/idealGraphPrinter.hpp"
#endif
#if INCLUDE_RTM_OPT
#include "runtime/rtmLocking.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#ifdef DTRACE_ENABLED
#ifndef USDT2
HS_DTRACE_PROBE_DECL(hotspot, vm__init__begin);
HS_DTRACE_PROBE_DECL(hotspot, vm__init__end);
HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t,
intptr_t, intptr_t, bool);
HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
intptr_t, intptr_t, bool);
#define DTRACE_THREAD_PROBE(probe, javathread) \
{ \
ResourceMark rm(this); \
int len = 0; \
const char* name = (javathread)->get_thread_name(); \
len = strlen(name); \
HS_DTRACE_PROBE5(hotspot, thread__##probe, \
name, len, \
java_lang_Thread::thread_id((javathread)->threadObj()), \
(javathread)->osthread()->thread_id(), \
java_lang_Thread::is_daemon((javathread)->threadObj())); \
}
#else /* USDT2 */
#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_START
#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
#define DTRACE_THREAD_PROBE(probe, javathread) \
{ \
ResourceMark rm(this); \
int len = 0; \
const char* name = (javathread)->get_thread_name(); \
len = strlen(name); \
HOTSPOT_THREAD_PROBE_##probe( /* probe = start, stop */ \
(char *) name, len, \
java_lang_Thread::thread_id((javathread)->threadObj()), \
(uintptr_t) (javathread)->osthread()->thread_id(), \
java_lang_Thread::is_daemon((javathread)->threadObj())); \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED
#define DTRACE_THREAD_PROBE(probe, javathread)
#endif // ndef DTRACE_ENABLED
void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
if (UseBiasedLocking) {
const int alignment = markOopDesc::biased_lock_alignment;
size_t aligned_size = size + (alignment - sizeof(intptr_t));
void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
: AllocateHeap(aligned_size, flags, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
void* aligned_addr = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
"JavaThread alignment code overflowed allocated storage");
if (TraceBiasedLocking) {
if (aligned_addr != real_malloc_addr)
tty->print_cr("Aligned thread " INTPTR_FORMAT " to " INTPTR_FORMAT,
real_malloc_addr, aligned_addr);
}
((Thread*) aligned_addr)->_real_malloc_address = real_malloc_addr;
return aligned_addr;
} else {
return throw_excpt? AllocateHeap(size, flags, CURRENT_PC)
: AllocateHeap(size, flags, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
}
}
void Thread::operator delete(void* p) {
if (UseBiasedLocking) {
void* real_malloc_addr = ((Thread*) p)->_real_malloc_address;
FreeHeap(real_malloc_addr, mtThread);
} else {
FreeHeap(p, mtThread);
}
}
Thread::Thread() {
set_stack_base(NULL);
set_stack_size(0);
set_self_raw_id(0);
set_lgrp_id(-1);
set_osthread(NULL);
set_resource_area(new (mtThread)ResourceArea());
DEBUG_ONLY(_current_resource_mark = NULL;)
set_handle_area(new (mtThread) HandleArea(NULL));
set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(30, true));
set_active_handles(NULL);
set_free_handle_block(NULL);
set_last_handle_mark(NULL);
_oops_do_parity = 0;
_metadata_on_stack_buffer = NULL;
new HandleMark(this);
debug_only(_owned_locks = NULL;)
debug_only(_allow_allocation_count = 0;)
NOT_PRODUCT(_allow_safepoint_count = 0;)
NOT_PRODUCT(_skip_gcalot = false;)
_jvmti_env_iteration_count = 0;
set_allocated_bytes(0);
_vm_operation_started_count = 0;
_vm_operation_completed_count = 0;
_current_pending_monitor = NULL;
_current_pending_monitor_is_from_java = true;
_current_waiting_monitor = NULL;
_num_nested_signal = 0;
omFreeList = NULL ;
omFreeCount = 0 ;
omFreeProvision = 32 ;
omInUseList = NULL ;
omInUseCount = 0 ;
#ifdef ASSERT
_visited_for_critical_count = false;
#endif
_SR_lock = new Monitor(Mutex::suspend_resume, "SR_lock", true);
_suspend_flags = 0;
_hashStateX = os::random() ;
_hashStateY = 842502087 ;
_hashStateZ = 0x8767 ; // (int)(3579807591LL & 0xffff) ;
_hashStateW = 273326509 ;
_OnTrap = 0 ;
_schedctl = NULL ;
_Stalled = 0 ;
_TypeTag = 0x2BAD ;
_ParkEvent = ParkEvent::Allocate (this) ;
_SleepEvent = ParkEvent::Allocate (this) ;
_MutexEvent = ParkEvent::Allocate (this) ;
_MuxEvent = ParkEvent::Allocate (this) ;
#ifdef CHECK_UNHANDLED_OOPS
if (CheckUnhandledOops) {
_unhandled_oops = new UnhandledOops(this);
}
#endif // CHECK_UNHANDLED_OOPS
#ifdef ASSERT
if (UseBiasedLocking) {
assert((((uintptr_t) this) & (markOopDesc::biased_lock_alignment - 1)) == 0, "forced alignment of thread object failed");
assert(this == _real_malloc_address ||
this == (void*) align_size_up((intptr_t) _real_malloc_address, markOopDesc::biased_lock_alignment),
"bug in forced alignment of thread objects");
}
#endif /* ASSERT */
}
void Thread::initialize_thread_local_storage() {
ThreadLocalStorage::set_thread(this);
}
void Thread::record_stack_base_and_size() {
set_stack_base(os::current_stack_base());
set_stack_size(os::current_stack_size());
if (is_Java_thread()) {
((JavaThread*) this)->set_stack_overflow_limit();
}
os::initialize_thread(this);
#if INCLUDE_NMT
address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size());
#endif // INCLUDE_NMT
}
Thread::~Thread() {
ObjectSynchronizer::omFlush (this) ;
#if INCLUDE_NMT
if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size());
#ifdef ASSERT
set_stack_base(NULL);
#endif
}
#endif // INCLUDE_NMT
delete resource_area();
assert(last_handle_mark() != NULL, "check we have an element");
delete last_handle_mark();
assert(last_handle_mark() == NULL, "check we have reached the end");
ParkEvent::Release (_ParkEvent) ; _ParkEvent = NULL ;
ParkEvent::Release (_SleepEvent) ; _SleepEvent = NULL ;
ParkEvent::Release (_MutexEvent) ; _MutexEvent = NULL ;
ParkEvent::Release (_MuxEvent) ; _MuxEvent = NULL ;
delete handle_area();
delete metadata_handles();
if (osthread() != NULL) os::free_thread(osthread());
delete _SR_lock;
if (this == Thread::current()) {
ThreadLocalStorage::set_thread(NULL);
} else {
ThreadLocalStorage::invalidate_all();
}
CHECK_UNHANDLED_OOPS_ONLY(if (CheckUnhandledOops) delete unhandled_oops();)
}
void Thread::run() {
ShouldNotReachHere();
}
#ifdef ASSERT
void check_for_dangling_thread_pointer(Thread *thread) {
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
}
#endif
#ifndef PRODUCT
void Thread::trace(const char* msg, const Thread* const thread) {
if (!TraceThreadEvents) return;
ResourceMark rm;
ThreadCritical tc;
const char *name = "non-Java thread";
int prio = -1;
if (thread->is_Java_thread()
&& !thread->is_Compiler_thread()) {
bool release_Threads_lock = false;
if (!Threads_lock->owned_by_self()) {
Threads_lock->lock();
release_Threads_lock = true;
}
JavaThread* jt = (JavaThread *)thread;
name = (char *)jt->get_thread_name();
oop thread_oop = jt->threadObj();
if (thread_oop != NULL) {
prio = java_lang_Thread::priority(thread_oop);
}
if (release_Threads_lock) {
Threads_lock->unlock();
}
}
tty->print_cr("Thread::%s " INTPTR_FORMAT " [%lx] %s (prio: %d)", msg, thread, thread->osthread()->thread_id(), name, prio);
}
#endif
ThreadPriority Thread::get_priority(const Thread* const thread) {
trace("get priority", thread);
ThreadPriority priority;
(void)os::get_priority(thread, priority);
assert(MinPriority <= priority && priority <= MaxPriority, "non-Java priority found");
return priority;
}
void Thread::set_priority(Thread* thread, ThreadPriority priority) {
trace("set priority", thread);
debug_only(check_for_dangling_thread_pointer(thread);)
(void)os::set_priority(thread, priority);
}
void Thread::start(Thread* thread) {
trace("start", thread);
if (!DisableStartThread) {
if (thread->is_Java_thread()) {
java_lang_Thread::set_thread_status(((JavaThread*)thread)->threadObj(),
java_lang_Thread::RUNNABLE);
}
os::start_thread(thread);
}
}
void Thread::send_async_exception(oop java_thread, oop java_throwable) {
VM_ThreadStop* vm_stop = new VM_ThreadStop(java_thread, java_throwable);
VMThread::execute(vm_stop);
}
#define DEBUG_FALSE_BITS (0x00000010 | 0x00200000)
class TraceSuspendDebugBits : public StackObj {
private:
JavaThread * jt;
bool is_wait;
bool called_by_wait; // meaningful when !is_wait
uint32_t * bits;
public:
TraceSuspendDebugBits(JavaThread *_jt, bool _is_wait, bool _called_by_wait,
uint32_t *_bits) {
jt = _jt;
is_wait = _is_wait;
called_by_wait = _called_by_wait;
bits = _bits;
}
~TraceSuspendDebugBits() {
if (!is_wait) {
#if 1
return;
#else
if (!called_by_wait) {
return;
}
#endif
}
if (AssertOnSuspendWaitFailure || TraceSuspendWaitFailures) {
if (bits != NULL && (*bits & DEBUG_FALSE_BITS) != 0) {
MutexLocker ml(Threads_lock); // needed for get_thread_name()
ResourceMark rm;
tty->print_cr(
"Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
jt->get_thread_name(), *bits);
guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
}
}
}
};
#undef DEBUG_FALSE_BITS
bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits) {
TraceSuspendDebugBits tsdb(this, false /* !is_wait */, called_by_wait, bits);
bool did_trans_retry = false; // only do thread_in_native_trans retry once
bool do_trans_retry; // flag to force the retry
do {
do_trans_retry = false;
if (is_exiting()) {
return false;
}
if (!is_external_suspend()) {
return false;
}
if (is_ext_suspended()) {
return true;
}
JavaThreadState save_state = thread_state();
if (save_state == _thread_blocked && is_suspend_equivalent()) {
return true;
} else if (save_state == _thread_in_native && frame_anchor()->walkable()) {
return true;
} else if (!called_by_wait && !did_trans_retry &&
save_state == _thread_in_native_trans &&
frame_anchor()->walkable()) {
did_trans_retry = true;
for (int i = 1; i <= SuspendRetryCount; i++) {
SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
if (thread_state() != _thread_in_native_trans) {
do_trans_retry = true;
break;
}
} // end retry loop
}
} while (do_trans_retry);
return false;
}
bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
uint32_t *bits) {
TraceSuspendDebugBits tsdb(this, true /* is_wait */,
false /* !called_by_wait */, bits);
bool is_suspended;
bool pending;
uint32_t reset_bits;
reset_bits = *bits;
{
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
delay, bits);
pending = is_external_suspend();
}
if (!pending) {
return false;
}
if (is_suspended) {
return true;
}
for (int i = 1; i <= retries; i++) {
{
MutexLocker ml(SR_lock());
SR_lock()->wait(!Thread::current()->is_Java_thread(), i * delay);
is_suspended = is_ext_suspend_completed(true /* called_by_wait */,
delay, bits);
pending = is_external_suspend();
}
if (!pending) {
return false;
}
if (is_suspended) {
return true;
}
} // end retry loop
return false;
}
#ifndef PRODUCT
void JavaThread::record_jump(address target, address instr, const char* file, int line) {
int index = _jmp_ring_index;
_jmp_ring_index = (index + 1 ) & (jump_ring_buffer_size - 1);
_jmp_ring[index]._target = (intptr_t) target;
_jmp_ring[index]._instruction = (intptr_t) instr;
_jmp_ring[index]._file = file;
_jmp_ring[index]._line = line;
}
#endif /* PRODUCT */
bool JavaThread::profile_last_Java_frame(frame* _fr) {
bool gotframe = false;
if (has_last_Java_frame() && _anchor.walkable()) {
gotframe = true;
}
return gotframe;
}
void Thread::interrupt(Thread* thread) {
trace("interrupt", thread);
debug_only(check_for_dangling_thread_pointer(thread);)
os::interrupt(thread);
}
bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
trace("is_interrupted", thread);
debug_only(check_for_dangling_thread_pointer(thread);)
return os::is_interrupted(thread, clear_interrupted);
}
bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
jint thread_parity = _oops_do_parity;
if (thread_parity != strong_roots_parity) {
jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
if (res == thread_parity) {
return true;
} else {
guarantee(res == strong_roots_parity, "Or else what?");
assert(SharedHeap::heap()->workers()->active_workers() > 0,
"Should only fail when parallel.");
return false;
}
}
assert(SharedHeap::heap()->workers()->active_workers() > 0,
"Should only fail when parallel.");
return false;
}
void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
if (active_handles() != NULL) {
active_handles()->oops_do(f);
}
f->do_oop((oop*)&_pending_exception);
handle_area()->oops_do(f);
}
void Thread::nmethods_do(CodeBlobClosure* cf) {
}
void Thread::metadata_do(void f(Metadata*)) {
if (metadata_handles() != NULL) {
for (int i = 0; i< metadata_handles()->length(); i++) {
f(metadata_handles()->at(i));
}
}
}
void Thread::print_on(outputStream* st) const {
if (osthread() != NULL) {
int os_prio;
if (os::get_native_priority(this, &os_prio) == OS_OK) {
st->print("os_prio=%d ", os_prio);
}
st->print("tid=" INTPTR_FORMAT " ", this);
ext().print_on(st);
osthread()->print_on(st);
}
debug_only(if (WizardMode) print_owned_locks_on(st);)
}
void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
if (is_VM_thread()) st->print("VMThread");
else if (is_Compiler_thread()) st->print("CompilerThread");
else if (is_Java_thread()) st->print("JavaThread");
else if (is_GC_task_thread()) st->print("GCTaskThread");
else if (is_Watcher_thread()) st->print("WatcherThread");
else if (is_ConcurrentGC_thread()) st->print("ConcurrentGCThread");
else st->print("Thread");
st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
_stack_base - _stack_size, _stack_base);
if (osthread()) {
st->print(" [id=%d]", osthread()->thread_id());
}
}
#ifdef ASSERT
void Thread::print_owned_locks_on(outputStream* st) const {
Monitor *cur = _owned_locks;
if (cur == NULL) {
st->print(" (no locks) ");
} else {
st->print_cr(" Locks owned:");
while(cur) {
cur->print_on(st);
cur = cur->next();
}
}
}
static int ref_use_count = 0;
bool Thread::owns_locks_but_compiled_lock() const {
for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
if (cur != Compile_lock) return true;
}
return false;
}
#endif
#ifndef PRODUCT
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
if (!(_allow_safepoint_count == 0))
fatal("Possible safepoint reached by thread that does not allow it");
if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
fatal("LEAF method calling lock?");
}
#ifdef ASSERT
if (potential_vm_operation && is_Java_thread()
&& !Universe::is_bootstrapping()) {
for(Monitor *cur = _owned_locks; cur; cur = cur->next()) {
if ( (cur->allow_vm_block() &&
cur != Threads_lock &&
cur != Compile_lock && // Temporary: should not be necessary when we get spearate compilation
cur != VMOperationRequest_lock &&
cur != VMOperationQueue_lock) ||
cur->rank() == Mutex::special) {
fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
}
}
}
if (GCALotAtAllSafepoints) {
InterfaceSupport::check_gc_alot();
}
#endif
}
#endif
bool Thread::is_in_stack(address adr) const {
assert(Thread::current() == this, "is_in_stack can only be called from current thread");
address end = os::current_stack_pointer();
if (_stack_base == NULL) return true;
if (stack_base() > adr && adr >= end) return true;
return false;
}
bool Thread::is_in_usable_stack(address adr) const {
size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
size_t usable_stack_size = _stack_size - stack_guard_size;
return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
}
bool Thread::is_lock_owned(address adr) const {
return on_local_stack(adr);
}
bool Thread::set_as_starting_thread() {
return os::create_main_thread((JavaThread*)this);
}
static void initialize_class(Symbol* class_name, TRAPS) {
Klass* klass = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
InstanceKlass::cast(klass)->initialize(CHECK);
}
static Handle create_initial_thread_group(TRAPS) {
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ThreadGroup(), true, CHECK_NH);
instanceKlassHandle klass (THREAD, k);
Handle system_instance = klass->allocate_instance_handle(CHECK_NH);
{
JavaValue result(T_VOID);
JavaCalls::call_special(&result,
system_instance,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::void_method_signature(),
CHECK_NH);
}
Universe::set_system_thread_group(system_instance());
Handle main_instance = klass->allocate_instance_handle(CHECK_NH);
{
JavaValue result(T_VOID);
Handle string = java_lang_String::create_from_str("main", CHECK_NH);
JavaCalls::call_special(&result,
main_instance,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
system_instance,
string,
CHECK_NH);
}
return main_instance;
}
static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS) {
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_NULL);
instanceKlassHandle klass (THREAD, k);
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_NULL);
java_lang_Thread::set_thread(thread_oop(), thread);
java_lang_Thread::set_priority(thread_oop(), NormPriority);
thread->set_threadObj(thread_oop());
Handle string = java_lang_String::create_from_str("main", CHECK_NULL);
JavaValue result(T_VOID);
JavaCalls::call_special(&result, thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK_NULL);
return thread_oop();
}
static void call_initializeSystemClass(TRAPS) {
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
instanceKlassHandle klass (THREAD, k);
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbols::initializeSystemClass_name(),
vmSymbols::void_method_signature(), CHECK);
}
char java_runtime_name[128] = "";
char java_runtime_version[128] = "";
static const char* get_java_runtime_name(TRAPS) {
Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
fieldDescriptor fd;
bool found = k != NULL &&
InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(),
vmSymbols::string_signature(), &fd);
if (found) {
oop name_oop = k->java_mirror()->obj_field(fd.offset());
if (name_oop == NULL)
return NULL;
const char* name = java_lang_String::as_utf8_string(name_oop,
java_runtime_name,
sizeof(java_runtime_name));
return name;
} else {
return NULL;
}
}
static const char* get_java_runtime_version(TRAPS) {
Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
fieldDescriptor fd;
bool found = k != NULL &&
InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(),
vmSymbols::string_signature(), &fd);
if (found) {
oop name_oop = k->java_mirror()->obj_field(fd.offset());
if (name_oop == NULL)
return NULL;
const char* name = java_lang_String::as_utf8_string(name_oop,
java_runtime_version,
sizeof(java_runtime_version));
return name;
} else {
return NULL;
}
}
static void call_postVMInitHook(TRAPS) {
Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD);
instanceKlassHandle klass (THREAD, k);
if (klass.not_null()) {
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
vmSymbols::void_method_signature(),
CHECK);
}
}
static void reset_vm_info_property(TRAPS) {
ResourceMark rm(THREAD);
const char *vm_info = VM_Version::vm_info_string();
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_System(), true, CHECK);
instanceKlassHandle klass (THREAD, k);
Handle key_str = java_lang_String::create_from_str("java.vm.info", CHECK);
Handle value_str = java_lang_String::create_from_str(vm_info, CHECK);
JavaValue r(T_OBJECT);
JavaCalls::call_static(&r,
klass,
vmSymbols::setProperty_name(),
vmSymbols::string_string_string_signature(),
key_str,
value_str,
CHECK);
}
void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS) {
assert(thread_group.not_null(), "thread group should be specified");
assert(threadObj() == NULL, "should only create Java thread object once");
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
instanceKlassHandle klass (THREAD, k);
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
java_lang_Thread::set_thread(thread_oop(), this);
java_lang_Thread::set_priority(thread_oop(), NormPriority);
set_threadObj(thread_oop());
JavaValue result(T_VOID);
if (thread_name != NULL) {
Handle name = java_lang_String::create_from_str(thread_name, CHECK);
JavaCalls::call_special(&result,
thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group, // Argument 1
name, // Argument 2
THREAD);
} else {
JavaCalls::call_special(&result,
thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_runnable_void_signature(),
thread_group, // Argument 1
Handle(), // Argument 2
THREAD);
}
if (daemon) {
java_lang_Thread::set_daemon(thread_oop());
}
if (HAS_PENDING_EXCEPTION) {
return;
}
KlassHandle group(this, SystemDictionary::ThreadGroup_klass());
Handle threadObj(this, this->threadObj());
JavaCalls::call_special(&result,
thread_group,
group,
vmSymbols::add_method_name(),
vmSymbols::thread_void_signature(),
threadObj, // Arg 1
THREAD);
}
NamedThread::NamedThread() : Thread() {
_name = NULL;
_processed_thread = NULL;
}
NamedThread::~NamedThread() {
JFR_ONLY(Jfr::on_thread_exit(this);)
if (_name != NULL) {
FREE_C_HEAP_ARRAY(char, _name, mtThread);
_name = NULL;
}
}
void NamedThread::set_name(const char* format, ...) {
guarantee(_name == NULL, "Only get to set name once.");
_name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
guarantee(_name != NULL, "alloc failure");
va_list ap;
va_start(ap, format);
jio_vsnprintf(_name, max_name_len, format, ap);
va_end(ap);
}
WatcherThread* WatcherThread::_watcher_thread = NULL;
bool WatcherThread::_startable = false;
volatile bool WatcherThread::_should_terminate = false;
WatcherThread::WatcherThread() : Thread() {
assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
if (os::create_thread(this, os::watcher_thread)) {
_watcher_thread = this;
os::set_priority(this, MaxPriority);
if (!DisableStartThread) {
os::start_thread(this);
}
}
}
int WatcherThread::sleep() const {
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
int remaining = PeriodicTask::time_to_wait();
int time_slept = 0;
OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
jlong time_before_loop = os::javaTimeNanos();
for (;;) {
bool timedout = PeriodicTask_lock->wait(Mutex::_no_safepoint_check_flag, remaining);
jlong now = os::javaTimeNanos();
if (remaining == 0) {
time_slept = 0;
time_before_loop = now;
} else {
time_slept = (int) ((now - time_before_loop) / 1000000);
}
if (timedout || _should_terminate) {
break;
}
remaining = PeriodicTask::time_to_wait();
if (remaining == 0) {
continue;
}
remaining -= time_slept;
if (remaining <= 0)
break;
}
return time_slept;
}
void WatcherThread::run() {
assert(this == watcher_thread(), "just checking");
this->record_stack_base_and_size();
this->initialize_thread_local_storage();
this->set_native_thread_name(this->name());
this->set_active_handles(JNIHandleBlock::allocate_block());
while(!_should_terminate) {
assert(watcher_thread() == Thread::current(), "thread consistency check");
assert(watcher_thread() == this, "thread consistency check");
int time_waited = sleep();
if (is_error_reported()) {
for (;;) {
if (!ShowMessageBoxOnError
&& (OnError == NULL || OnError[0] == '\0')
&& Arguments::abort_hook() == NULL) {
os::sleep(this, 2 * 60 * 1000, false);
fdStream err(defaultStream::output_fd());
err.print_raw_cr("# [ timer expired, abort... ]");
os::die();
}
os::sleep(this, 5 * 1000, false);
}
}
PeriodicTask::real_time_tick(time_waited);
}
{
MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
_watcher_thread = NULL;
Terminator_lock->notify();
}
ThreadLocalStorage::set_thread(NULL);
}
void WatcherThread::start() {
assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
if (watcher_thread() == NULL && _startable) {
_should_terminate = false;
new WatcherThread();
}
}
void WatcherThread::make_startable() {
assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
_startable = true;
}
void WatcherThread::stop() {
{
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
_should_terminate = true;
OrderAccess::fence(); // ensure WatcherThread sees update in main loop
WatcherThread* watcher = watcher_thread();
if (watcher != NULL)
watcher->unpark();
}
MutexLocker mu(Terminator_lock);
while(watcher_thread() != NULL) {
Terminator_lock->wait(!Mutex::_no_safepoint_check_flag, 0,
Mutex::_as_suspend_equivalent_flag);
}
}
void WatcherThread::unpark() {
MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
PeriodicTask_lock->notify();
}
void WatcherThread::print_on(outputStream* st) const {
st->print("\"%s\" ", name());
Thread::print_on(st);
st->cr();
}
void JavaThread::initialize() {
set_claimed_par_id(UINT_MAX);
set_saved_exception_pc(NULL);
set_threadObj(NULL);
_anchor.clear();
set_entry_point(NULL);
set_jni_functions(jni_functions());
set_callee_target(NULL);
set_vm_result(NULL);
set_vm_result_2(NULL);
set_vframe_array_head(NULL);
set_vframe_array_last(NULL);
set_deferred_locals(NULL);
set_deopt_mark(NULL);
set_deopt_nmethod(NULL);
clear_must_deopt_id();
set_monitor_chunks(NULL);
set_next(NULL);
set_thread_state(_thread_new);
_terminated = _not_terminated;
_privileged_stack_top = NULL;
_array_for_gc = NULL;
_suspend_equivalent = false;
_in_deopt_handler = 0;
_doing_unsafe_access = false;
_stack_guard_state = stack_guard_unused;
(void)const_cast<oop&>(_exception_oop = oop(NULL));
_exception_pc = 0;
_exception_handler_pc = 0;
_is_method_handle_return = 0;
_jvmti_thread_state= NULL;
_should_post_on_exceptions_flag = JNI_FALSE;
_jvmti_get_loaded_classes_closure = NULL;
_interp_only_mode = 0;
_special_runtime_exit_condition = _no_async_condition;
_pending_async_exception = NULL;
_thread_stat = NULL;
_thread_stat = new ThreadStatistics();
_blocked_on_compilation = false;
_jni_active_critical = 0;
_pending_jni_exception_check_fn = NULL;
_do_not_unlock_if_synchronized = false;
_cached_monitor_info = NULL;
_parker = Parker::Allocate(this) ;
#ifndef PRODUCT
_jmp_ring_index = 0;
for (int ji = 0 ; ji < jump_ring_buffer_size ; ji++ ) {
record_jump(NULL, NULL, NULL, 0);
}
#endif /* PRODUCT */
set_thread_profiler(NULL);
if (FlatProfiler::is_active()) {
ThreadProfiler* pp = new ThreadProfiler();
pp->engage();
set_thread_profiler(pp);
}
ThreadSafepointState::create(this);
debug_only(_java_call_counter = 0);
_popframe_condition = popframe_inactive;
_popframe_preserved_args = NULL;
_popframe_preserved_args_size = 0;
_frames_to_pop_failed_realloc = 0;
pd_initialize();
}
#if INCLUDE_ALL_GCS
SATBMarkQueueSet JavaThread::_satb_mark_queue_set;
DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
#endif // INCLUDE_ALL_GCS
JavaThread::JavaThread(bool is_attaching_via_jni) :
Thread()
#if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
#endif // INCLUDE_ALL_GCS
{
initialize();
if (is_attaching_via_jni) {
_jni_attach_state = _attaching_via_jni;
} else {
_jni_attach_state = _not_attaching_via_jni;
}
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
}
bool JavaThread::reguard_stack(address cur_sp) {
if (_stack_guard_state != stack_guard_yellow_disabled) {
return true; // Stack already guarded or guard pages not needed.
}
if (register_stack_overflow()) {
return false;
}
guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
enable_stack_yellow_zone();
return true;
}
bool JavaThread::reguard_stack(void) {
return reguard_stack(os::current_stack_pointer());
}
void JavaThread::block_if_vm_exited() {
if (_terminated == _vm_exited) {
Threads_lock->lock_without_safepoint_check();
ShouldNotReachHere();
}
}
static void compiler_thread_entry(JavaThread* thread, TRAPS);
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
Thread()
#if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
#endif // INCLUDE_ALL_GCS
{
if (TraceThreadEvents) {
tty->print_cr("creating thread %p", this);
}
initialize();
_jni_attach_state = _not_attaching_via_jni;
set_entry_point(entry_point);
os::ThreadType thr_type = os::java_thread;
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
os::java_thread;
os::create_thread(this, thr_type, stack_sz);
}
JavaThread::~JavaThread() {
if (TraceThreadEvents) {
tty->print_cr("terminate thread %p", this);
}
Parker::Release(_parker);
_parker = NULL ;
vframeArray* old_array = vframe_array_last();
if (old_array != NULL) {
Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
old_array->set_unroll_block(NULL);
delete old_info;
delete old_array;
}
GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred = deferred_locals();
if (deferred != NULL) {
assert(deferred->length() != 0, "empty array!");
do {
jvmtiDeferredLocalVariableSet* dlv = deferred->at(0);
deferred->remove_at(0);
delete dlv;
} while (deferred->length() != 0);
delete deferred;
}
ThreadSafepointState::destroy(this);
if (_thread_profiler != NULL) delete _thread_profiler;
if (_thread_stat != NULL) delete _thread_stat;
}
void JavaThread::run() {
this->initialize_tlab();
this->record_base_of_stack_pointer();
this->record_stack_base_and_size();
this->initialize_thread_local_storage();
this->create_stack_guard_pages();
this->cache_global_variables();
ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
assert(JavaThread::current() == this, "sanity check");
assert(!Thread::current()->owns_locks(), "sanity check");
DTRACE_THREAD_PROBE(start, this);
this->set_active_handles(JNIHandleBlock::allocate_block());
if (JvmtiExport::should_post_thread_life()) {
JvmtiExport::post_thread_start(this);
}
JFR_ONLY(Jfr::on_thread_start(this);)
thread_main_inner();
}
void JavaThread::thread_main_inner() {
assert(JavaThread::current() == this, "sanity check");
assert(this->threadObj() != NULL, "just checking");
if (!this->has_pending_exception() &&
!java_lang_Thread::is_stillborn(this->threadObj())) {
{
ResourceMark rm(this);
this->set_native_thread_name(this->get_thread_name());
}
HandleMark hm(this);
this->entry_point()(this, this);
}
DTRACE_THREAD_PROBE(stop, this);
this->exit(false);
delete this;
}
static void ensure_join(JavaThread* thread) {
Handle threadObj(thread, thread->threadObj());
assert(threadObj.not_null(), "java thread object must exist");
ObjectLocker lock(threadObj, thread);
thread->clear_pending_exception();
java_lang_Thread::set_thread_status(threadObj(), java_lang_Thread::TERMINATED);
java_lang_Thread::set_thread(threadObj(), NULL);
lock.notify_all(thread);
thread->clear_pending_exception();
}
void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
assert(this == JavaThread::current(), "thread consistency check");
HandleMark hm(this);
Handle uncaught_exception(this, this->pending_exception());
this->clear_pending_exception();
Handle threadObj(this, this->threadObj());
assert(threadObj.not_null(), "Java thread object should be created");
if (get_thread_profiler() != NULL) {
get_thread_profiler()->disengage();
ResourceMark rm;
get_thread_profiler()->print(get_thread_name());
}
{
EXCEPTION_MARK;
CLEAR_PENDING_EXCEPTION;
}
if (!destroy_vm || JDK_Version::is_jdk12x_version()) {
if (uncaught_exception.not_null()) {
Handle group(this, java_lang_Thread::threadGroup(threadObj()));
{
EXCEPTION_MARK;
KlassHandle recvrKlass(THREAD, threadObj->klass());
CallInfo callinfo;
KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
vmSymbols::dispatchUncaughtException_name(),
vmSymbols::throwable_void_signature(),
KlassHandle(), false, false, THREAD);
CLEAR_PENDING_EXCEPTION;
methodHandle method = callinfo.selected_method();
if (method.not_null()) {
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbols::dispatchUncaughtException_name(),
vmSymbols::throwable_void_signature(),
uncaught_exception,
THREAD);
} else {
KlassHandle thread_group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
group, thread_group,
vmSymbols::uncaughtException_name(),
vmSymbols::thread_throwable_void_signature(),
threadObj, // Arg 1
uncaught_exception, // Arg 2
THREAD);
}
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm(this);
jio_fprintf(defaultStream::error_stream(),
"\nException: %s thrown from the UncaughtExceptionHandler"
" in thread \"%s\"\n",
pending_exception()->klass()->external_name(),
get_thread_name());
CLEAR_PENDING_EXCEPTION;
}
}
}
JFR_ONLY(Jfr::on_java_thread_dismantle(this);)
if (!is_Compiler_thread()) {
int count = 3;
while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
EXCEPTION_MARK;
JavaValue result(T_VOID);
KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbols::exit_method_name(),
vmSymbols::void_method_signature(),
THREAD);
CLEAR_PENDING_EXCEPTION;
}
}
if (JvmtiExport::should_post_thread_life()) {
JvmtiExport::post_thread_end(this);
}
while (true) {
{
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
if (!is_external_suspend()) {
set_terminated(_thread_exiting);
ThreadService::current_thread_exiting(this);
break;
}
}
ThreadBlockInVM tbivm(this);
java_suspend_self();
}
} else {
}
ensure_join(this);
assert(!this->has_pending_exception(), "ensure_join should have cleared");
if (exit_type == jni_detach && JNIDetachReleasesMonitors) {
assert(!this->has_last_Java_frame(), "detaching with Java frames?");
ObjectSynchronizer::release_monitors_owned_by_thread(this);
assert(!this->has_pending_exception(), "release_monitors should have cleared");
}
assert(_privileged_stack_top == NULL, "must be NULL when we get here");
JFR_ONLY(Jfr::on_thread_exit(this);)
if (active_handles() != NULL) {
JNIHandleBlock* block = active_handles();
set_active_handles(NULL);
JNIHandleBlock::release_block(block);
}
if (free_handle_block() != NULL) {
JNIHandleBlock* block = free_handle_block();
set_free_handle_block(NULL);
JNIHandleBlock::release_block(block);
}
remove_stack_guard_pages();
if (UseTLAB) {
tlab().make_parsable(true); // retire TLAB
}
if (JvmtiEnv::environments_might_exist()) {
JvmtiExport::cleanup_thread(this);
}
Universe::heap()->flush_deferred_store_barrier(this);
assert(deferred_card_mark().is_empty(), "Should have been flushed");
#if INCLUDE_ALL_GCS
if (UseG1GC) {
flush_barrier_queues();
}
#endif // INCLUDE_ALL_GCS
Threads::remove(this);
}
#if INCLUDE_ALL_GCS
void JavaThread::flush_barrier_queues() {
satb_mark_queue().flush();
dirty_card_queue().flush();
}
void JavaThread::initialize_queues() {
assert(!SafepointSynchronize::is_at_safepoint(),
"we should not be at a safepoint");
ObjPtrQueue& satb_queue = satb_mark_queue();
SATBMarkQueueSet& satb_queue_set = satb_mark_queue_set();
assert(!satb_queue.is_active(), "SATB queue should not be active");
assert(satb_queue.is_empty(), "SATB queue should be empty");
if (satb_queue_set.is_active()) {
satb_queue.set_active(true);
}
DirtyCardQueue& dirty_queue = dirty_card_queue();
assert(dirty_queue.is_active(), "dirty card queue should be active");
}
#endif // INCLUDE_ALL_GCS
void JavaThread::cleanup_failed_attach_current_thread() {
if (get_thread_profiler() != NULL) {
get_thread_profiler()->disengage();
ResourceMark rm;
get_thread_profiler()->print(get_thread_name());
}
if (active_handles() != NULL) {
JNIHandleBlock* block = active_handles();
set_active_handles(NULL);
JNIHandleBlock::release_block(block);
}
if (free_handle_block() != NULL) {
JNIHandleBlock* block = free_handle_block();
set_free_handle_block(NULL);
JNIHandleBlock::release_block(block);
}
remove_stack_guard_pages();
if (UseTLAB) {
tlab().make_parsable(true); // retire TLAB, if any
}
#if INCLUDE_ALL_GCS
if (UseG1GC) {
flush_barrier_queues();
}
#endif // INCLUDE_ALL_GCS
Threads::remove(this);
delete this;
}
JavaThread* JavaThread::active() {
Thread* thread = ThreadLocalStorage::thread();
assert(thread != NULL, "just checking");
if (thread->is_Java_thread()) {
return (JavaThread*) thread;
} else {
assert(thread->is_VM_thread(), "this must be a vm thread");
VM_Operation* op = ((VMThread*) thread)->vm_operation();
JavaThread *ret=op == NULL ? NULL : (JavaThread *)op->calling_thread();
assert(ret->is_Java_thread(), "must be a Java thread");
return ret;
}
}
bool JavaThread::is_lock_owned(address adr) const {
if (Thread::is_lock_owned(adr)) return true;
for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
if (chunk->contains(adr)) return true;
}
return false;
}
void JavaThread::add_monitor_chunk(MonitorChunk* chunk) {
chunk->set_next(monitor_chunks());
set_monitor_chunks(chunk);
}
void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) {
guarantee(monitor_chunks() != NULL, "must be non empty");
if (monitor_chunks() == chunk) {
set_monitor_chunks(chunk->next());
} else {
MonitorChunk* prev = monitor_chunks();
while (prev->next() != chunk) prev = prev->next();
prev->set_next(chunk->next());
}
}
void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
if (has_last_Java_frame() && has_async_condition()) {
if (is_at_poll_safepoint()) {
RegisterMap map(this, false);
frame caller_fr = last_frame().sender(&map);
assert(caller_fr.is_compiled_frame(), "what?");
if (caller_fr.is_deoptimized_frame()) {
if (TraceExceptions) {
ResourceMark rm;
tty->print_cr("deferred async exception at compiled safepoint");
}
return;
}
}
}
JavaThread::AsyncRequests condition = clear_special_runtime_exit_condition();
if (condition == _no_async_condition) {
return;
}
if (_pending_async_exception != NULL) {
if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())) {
set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
if (TraceExceptions) {
ResourceMark rm;
tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", this);
if (has_last_Java_frame() ) {
frame f = last_frame();
tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", f.pc(), f.sp());
}
tty->print_cr(" of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
}
_pending_async_exception = NULL;
clear_has_async_exception();
}
}
if (check_unsafe_error &&
condition == _async_unsafe_access_error && !has_pending_exception()) {
condition = _no_async_condition; // done
switch (thread_state()) {
case _thread_in_vm:
{
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
case _thread_in_native:
{
ThreadInVMfromNative tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
case _thread_in_Java:
{
ThreadInVMfromJava tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
}
default:
ShouldNotReachHere();
}
}
assert(condition == _no_async_condition || has_pending_exception() ||
(!check_unsafe_error && condition == _async_unsafe_access_error),
"must have handled the async condition, if no exception");
}
void JavaThread::handle_special_runtime_exit_condition(bool check_asyncs) {
bool do_self_suspend = is_external_suspend_with_lock();
if (do_self_suspend && (!AllowJNIEnvProxy || this == JavaThread::current())) {
frame_anchor()->make_walkable(this);
java_suspend_self();
}
if (check_asyncs) {
check_and_handle_async_exceptions();
}
JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(this);)
}
void JavaThread::send_thread_stop(oop java_throwable) {
assert(Thread::current()->is_VM_thread(), "should be in the vm thread");
assert(Threads_lock->is_locked(), "Threads_lock should be locked by safepoint code");
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (is_Compiler_thread()) return;
{
if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::ThreadDeath_klass())) {
if (has_last_Java_frame()) {
frame f = last_frame();
if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {
RegisterMap reg_map(this, UseBiasedLocking);
frame compiled_frame = f.sender(®_map);
if (!StressCompiledExceptionHandlers && compiled_frame.can_be_deoptimized()) {
Deoptimization::deoptimize(this, compiled_frame, ®_map);
}
}
}
set_pending_async_exception(java_throwable);
if (TraceExceptions) {
ResourceMark rm;
tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
}
NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
}
}
Thread::interrupt(this);
}
void JavaThread::java_suspend() {
{ MutexLocker mu(Threads_lock);
if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
return;
}
}
{ MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
if (!is_external_suspend()) {
return;
}
uint32_t debug_bits = 0;
if (is_ext_suspend_completed(false /* !called_by_wait */,
SuspendRetryDelay, &debug_bits) ) {
return;
}
}
VM_ForceSafepoint vm_suspend;
VMThread::execute(&vm_suspend);
}
int JavaThread::java_suspend_self() {
int ret = 0;
if (is_exiting()) {
clear_external_suspend();
return ret;
}
assert(_anchor.walkable() ||
(is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
"must have walkable stack");
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
assert(!this->is_ext_suspended(),
"a thread trying to self-suspend should not already be suspended");
if (this->is_suspend_equivalent()) {
this->clear_suspend_equivalent();
}
while (is_external_suspend()) {
ret++;
this->set_ext_suspended();
while (is_ext_suspended()) {
this->SR_lock()->wait(Mutex::_no_safepoint_check_flag);
}
}
return ret;
}
#ifdef ASSERT
void JavaThread::verify_not_published() {
if (!Threads_lock->owned_by_self()) {
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
assert( !Threads::includes(this),
"java thread shouldn't have been published yet!");
}
else {
assert( !Threads::includes(this),
"java thread shouldn't have been published yet!");
}
}
#endif
void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread) {
assert(thread->thread_state() == _thread_in_native_trans, "wrong state");
JavaThread *curJT = JavaThread::current();
bool do_self_suspend = thread->is_external_suspend();
assert(!curJT->has_last_Java_frame() || curJT->frame_anchor()->walkable(), "Unwalkable stack in native->vm transition");
if (do_self_suspend && (!AllowJNIEnvProxy || curJT == thread)) {
JavaThreadState state = thread->thread_state();
thread->set_suspend_equivalent();
thread->set_thread_state(_thread_blocked);
thread->java_suspend_self();
thread->set_thread_state(state);
if (os::is_MP()) {
if (UseMembar) {
OrderAccess::fence();
} else {
InterfaceSupport::serialize_memory(thread);
}
}
}
if (SafepointSynchronize::do_call_back()) {
SafepointSynchronize::block(curJT);
}
if (thread->is_deopt_suspend()) {
thread->clear_deopt_suspend();
RegisterMap map(thread, false);
frame f = thread->last_frame();
while ( f.id() != thread->must_deopt_id() && ! f.is_first_frame()) {
f = f.sender(&map);
}
if (f.id() == thread->must_deopt_id()) {
thread->clear_must_deopt_id();
f.deoptimize(thread);
} else {
fatal("missed deoptimization!");
}
}
JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(thread);)
}
void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
check_safepoint_and_suspend_for_native_trans(thread);
if (thread->has_async_exception()) {
thread->check_and_handle_async_exceptions(false);
}
}
void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
check_special_condition_for_native_trans(thread);
thread->set_thread_state(_thread_in_Java);
if (thread->do_critical_native_unlock()) {
ThreadInVMfromJavaNoAsyncException tiv(thread);
GC_locker::unlock_critical(thread);
thread->clear_critical_native_unlock();
}
}
void JavaThread::java_resume() {
assert_locked_or_safepoint(Threads_lock);
if (!Threads::includes(this) || is_exiting() || !is_external_suspend()) {
return;
}
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
clear_external_suspend();
if (is_ext_suspended()) {
clear_ext_suspended();
SR_lock()->notify_all();
}
}
void JavaThread::create_stack_guard_pages() {
if (!os::uses_stack_guard_pages() ||
_stack_guard_state != stack_guard_unused ||
(DisablePrimordialThreadGuardPages && os::is_primordial_thread())) {
if (TraceThreadEvents) {
tty->print_cr("Stack guard page creation for thread "
UINTX_FORMAT " disabled", os::current_thread_id());
}
return;
}
address low_addr = stack_base() - stack_size();
size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
int allocate = os::allocate_stack_guard_pages();
if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
warning("Attempt to allocate stack guard pages failed.");
return;
}
if (os::guard_memory((char *) low_addr, len)) {
_stack_guard_state = stack_guard_enabled;
} else {
warning("Attempt to protect stack guard pages failed.");
if (os::uncommit_memory((char *) low_addr, len)) {
warning("Attempt to deallocate stack guard pages failed.");
}
}
}
void JavaThread::remove_stack_guard_pages() {
assert(Thread::current() == this, "from different thread");
if (_stack_guard_state == stack_guard_unused) return;
address low_addr = stack_base() - stack_size();
size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
if (os::allocate_stack_guard_pages()) {
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
warning("Attempt to deallocate stack guard pages failed.");
}
} else {
if (_stack_guard_state == stack_guard_unused) return;
if (os::unguard_memory((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
warning("Attempt to unprotect stack guard pages failed.");
}
}
}
void JavaThread::enable_stack_yellow_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
address base = stack_yellow_zone_base() - stack_yellow_zone_size();
guarantee(base < stack_base(),"Error calculating stack yellow zone");
guarantee(base < os::current_stack_pointer(),"Error calculating stack yellow zone");
if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
_stack_guard_state = stack_guard_enabled;
} else {
warning("Attempt to guard stack yellow zone failed.");
}
enable_register_stack_guard();
}
void JavaThread::disable_stack_yellow_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_yellow_disabled, "already disabled");
if (_stack_guard_state == stack_guard_unused) return;
address base = stack_yellow_zone_base() - stack_yellow_zone_size();
if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
_stack_guard_state = stack_guard_yellow_disabled;
} else {
warning("Attempt to unguard stack yellow zone failed.");
}
disable_register_stack_guard();
}
void JavaThread::enable_stack_red_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
address base = stack_red_zone_base() - stack_red_zone_size();
guarantee(base < stack_base(),"Error calculating stack red zone");
guarantee(base < os::current_stack_pointer(),"Error calculating stack red zone");
if(!os::guard_memory((char *) base, stack_red_zone_size())) {
warning("Attempt to guard stack red zone failed.");
}
}
void JavaThread::disable_stack_red_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
address base = stack_red_zone_base() - stack_red_zone_size();
if (!os::unguard_memory((char *)base, stack_red_zone_size())) {
warning("Attempt to unguard stack red zone failed.");
}
}
void JavaThread::frames_do(void f(frame*, const RegisterMap* map)) {
if (!has_last_Java_frame()) return;
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
frame* fr = fst.current();
f(fr, fst.register_map());
}
}
#ifndef PRODUCT
void JavaThread::deoptimize() {
StackFrameStream fst(this, UseBiasedLocking);
bool deopt = false; // Dump stack only if a deopt actually happens.
bool only_at = strlen(DeoptimizeOnlyAt) > 0;
for(; !fst.is_done(); fst.next()) {
if(fst.current()->can_be_deoptimized()) {
if (only_at) {
address pc = fst.current()->pc();
nmethod* nm = (nmethod*) fst.current()->cb();
ScopeDesc* sd = nm->scope_desc_at( pc);
char buffer[8];
jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci());
size_t len = strlen(buffer);
const char * found = strstr(DeoptimizeOnlyAt, buffer);
while (found != NULL) {
if ((found[len] == ',' || found[len] == '\n' || found[len] == '\0') &&
(found == DeoptimizeOnlyAt || found[-1] == ',' || found[-1] == '\n')) {
break;
}
found = strstr(found + 1, buffer);
}
if (!found) {
continue;
}
}
if (DebugDeoptimization && !deopt) {
deopt = true; // One-time only print before deopt
tty->print_cr("[BEFORE Deoptimization]");
trace_frames();
trace_stack();
}
Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
}
}
if (DebugDeoptimization && deopt) {
tty->print_cr("[AFTER Deoptimization]");
trace_frames();
}
}
void JavaThread::make_zombies() {
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
if (fst.current()->can_be_deoptimized()) {
nmethod* nm = CodeCache::find_nmethod(fst.current()->pc());
nm->make_not_entrant();
}
}
}
#endif // PRODUCT
void JavaThread::deoptimized_wrt_marked_nmethods() {
if (!has_last_Java_frame()) return;
StackFrameStream fst(this, UseBiasedLocking);
for(; !fst.is_done(); fst.next()) {
if (fst.current()->should_be_deoptimized()) {
if (LogCompilation && xtty != NULL) {
nmethod* nm = fst.current()->cb()->as_nmethod_or_null();
xtty->elem("deoptimized thread='" UINTX_FORMAT "' compile_id='%d'",
this->name(), nm != NULL ? nm->compile_id() : -1);
}
Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
}
}
}
static void frame_gc_epilogue(frame* f, const RegisterMap* map) { f->gc_epilogue(); }
void JavaThread::gc_epilogue() {
frames_do(frame_gc_epilogue);
}
static void frame_gc_prologue(frame* f, const RegisterMap* map) { f->gc_prologue(); }
void JavaThread::gc_prologue() {
frames_do(frame_gc_prologue);
}
class RememberProcessedThread: public StackObj {
NamedThread* _cur_thr;
public:
RememberProcessedThread(JavaThread* jthr) {
Thread* thread = Thread::current();
if (thread->is_Named_thread()) {
_cur_thr = (NamedThread *)thread;
_cur_thr->set_processed_thread(jthr);
} else {
_cur_thr = NULL;
}
}
~RememberProcessedThread() {
if (_cur_thr) {
_cur_thr->set_processed_thread(NULL);
}
}
};
void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
Thread::oops_do(f, cld_f, cf);
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
RememberProcessedThread rpt(this);
if (_privileged_stack_top != NULL) {
_privileged_stack_top->oops_do(f);
}
if (_array_for_gc != NULL) {
for (int index = 0; index < _array_for_gc->length(); index++) {
f->do_oop(_array_for_gc->adr_at(index));
}
}
for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
chunk->oops_do(f);
}
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->oops_do(f, cld_f, cf, fst.register_map());
}
}
set_callee_target(NULL);
assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
GrowableArray<jvmtiDeferredLocalVariableSet*>* list = deferred_locals();
if (list != NULL) {
for (int i = 0; i < list->length(); i++) {
list->at(i)->oops_do(f);
}
}
f->do_oop((oop*) &_threadObj);
f->do_oop((oop*) &_vm_result);
f->do_oop((oop*) &_exception_oop);
f->do_oop((oop*) &_pending_async_exception);
if (jvmti_thread_state() != NULL) {
jvmti_thread_state()->oops_do(f);
}
}
void JavaThread::nmethods_do(CodeBlobClosure* cf) {
Thread::nmethods_do(cf); // (super method is a no-op)
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->nmethods_do(cf);
}
}
}
void JavaThread::metadata_do(void f(Metadata*)) {
Thread::metadata_do(f);
if (has_last_Java_frame()) {
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
fst.current()->metadata_do(f);
}
} else if (is_Compiler_thread()) {
CompilerThread* ct = (CompilerThread*)this;
if (ct->env() != NULL) {
ct->env()->metadata_do(f);
}
}
}
const char* _get_thread_state_name(JavaThreadState _thread_state) {
switch (_thread_state) {
case _thread_uninitialized: return "_thread_uninitialized";
case _thread_new: return "_thread_new";
case _thread_new_trans: return "_thread_new_trans";
case _thread_in_native: return "_thread_in_native";
case _thread_in_native_trans: return "_thread_in_native_trans";
case _thread_in_vm: return "_thread_in_vm";
case _thread_in_vm_trans: return "_thread_in_vm_trans";
case _thread_in_Java: return "_thread_in_Java";
case _thread_in_Java_trans: return "_thread_in_Java_trans";
case _thread_blocked: return "_thread_blocked";
case _thread_blocked_trans: return "_thread_blocked_trans";
default: return "unknown thread state";
}
}
#ifndef PRODUCT
void JavaThread::print_thread_state_on(outputStream *st) const {
st->print_cr(" JavaThread state: %s", _get_thread_state_name(_thread_state));
};
void JavaThread::print_thread_state() const {
print_thread_state_on(tty);
};
#endif // PRODUCT
void JavaThread::print_on(outputStream *st) const {
st->print("\"%s\" ", get_thread_name());
oop thread_oop = threadObj();
if (thread_oop != NULL) {
st->print("#" INT64_FORMAT " ", java_lang_Thread::thread_id(thread_oop));
if (java_lang_Thread::is_daemon(thread_oop)) st->print("daemon ");
st->print("prio=%d ", java_lang_Thread::priority(thread_oop));
}
Thread::print_on(st);
st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12));
if (thread_oop != NULL && JDK_Version::is_gte_jdk15x_version()) {
st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop));
}
#ifndef PRODUCT
print_thread_state_on(st);
_safepoint_state->print_on(st);
#endif // PRODUCT
}
void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
oop thread_obj = threadObj();
if (thread_obj != NULL) {
if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
}
st->print(" [");
st->print("%s", _get_thread_state_name(_thread_state));
if (osthread()) {
st->print(", id=%d", osthread()->thread_id());
}
st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
_stack_base - _stack_size, _stack_base);
st->print("]");
return;
}
static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
void JavaThread::verify() {
oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
frames_do(frame_verify);
}
const char* JavaThread::get_thread_name() const {
#ifdef ASSERT
if (!SafepointSynchronize::is_at_safepoint()) {
Thread *cur = Thread::current();
if (!(cur->is_Java_thread() && cur == this)) {
assert_locked_or_safepoint(Threads_lock);
}
}
#endif // ASSERT
return get_thread_name_string();
}
const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
const char* name_str;
oop thread_obj = threadObj();
if (thread_obj != NULL) {
oop name = java_lang_Thread::name(thread_obj);
if (name != NULL) {
if (buf == NULL) {
name_str = java_lang_String::as_utf8_string(name);
}
else {
name_str = java_lang_String::as_utf8_string(name, buf, buflen);
}
}
else if (is_attaching_via_jni()) { // workaround for 6412693 - see 6404306
name_str = "<no-name - thread is attaching>";
}
else {
name_str = Thread::name();
}
}
else {
name_str = Thread::name();
}
assert(name_str != NULL, "unexpected NULL thread name");
return name_str;
}
const char* JavaThread::get_threadgroup_name() const {
debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
oop thread_obj = threadObj();
if (thread_obj != NULL) {
oop thread_group = java_lang_Thread::threadGroup(thread_obj);
if (thread_group != NULL) {
typeArrayOop name = java_lang_ThreadGroup::name(thread_group);
if (name != NULL) {
const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
return str;
}
}
}
return NULL;
}
const char* JavaThread::get_parent_name() const {
debug_only(if (JavaThread::current() != this) assert_locked_or_safepoint(Threads_lock);)
oop thread_obj = threadObj();
if (thread_obj != NULL) {
oop thread_group = java_lang_Thread::threadGroup(thread_obj);
if (thread_group != NULL) {
oop parent = java_lang_ThreadGroup::parent(thread_group);
if (parent != NULL) {
typeArrayOop name = java_lang_ThreadGroup::name(parent);
if (name != NULL) {
const char* str = UNICODE::as_utf8((jchar*) name->base(T_CHAR), name->length());
return str;
}
}
}
}
return NULL;
}
ThreadPriority JavaThread::java_priority() const {
oop thr_oop = threadObj();
if (thr_oop == NULL) return NormPriority; // Bootstrapping
ThreadPriority priority = java_lang_Thread::priority(thr_oop);
assert(MinPriority <= priority && priority <= MaxPriority, "sanity check");
return priority;
}
void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
Handle thread_oop(Thread::current(),
JNIHandles::resolve_non_null(jni_thread));
assert(InstanceKlass::cast(thread_oop->klass())->is_linked(),
"must be initialized");
set_threadObj(thread_oop());
java_lang_Thread::set_thread(thread_oop(), this);
if (prio == NoPriority) {
prio = java_lang_Thread::priority(thread_oop());
assert(prio != NoPriority, "A valid priority should be present");
}
Thread::set_priority(this, prio);
prepare_ext();
Threads::add(this);
}
oop JavaThread::current_park_blocker() {
oop thread_oop = threadObj();
if (thread_oop != NULL &&
JDK_Version::current().supports_thread_park_blocker()) {
return java_lang_Thread::park_blocker(thread_oop);
}
return NULL;
}
void JavaThread::print_stack_on(outputStream* st) {
if (!has_last_Java_frame()) return;
ResourceMark rm;
HandleMark hm;
RegisterMap reg_map(this);
vframe* start_vf = last_java_vframe(®_map);
int count = 0;
for (vframe* f = start_vf; f; f = f->sender() ) {
if (f->is_java_frame()) {
javaVFrame* jvf = javaVFrame::cast(f);
java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
if (JavaMonitorsInStackTrace) {
jvf->print_lock_info_on(st, count);
}
} else {
}
count++;
if (MaxJavaStackTraceDepth == count) return;
}
}
void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments");
if (in_bytes(size_in_bytes) != 0) {
_popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes), mtThread);
_popframe_preserved_args_size = in_bytes(size_in_bytes);
Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
}
}
void* JavaThread::popframe_preserved_args() {
return _popframe_preserved_args;
}
ByteSize JavaThread::popframe_preserved_args_size() {
return in_ByteSize(_popframe_preserved_args_size);
}
WordSize JavaThread::popframe_preserved_args_size_in_words() {
int sz = in_bytes(popframe_preserved_args_size());
assert(sz % wordSize == 0, "argument size must be multiple of wordSize");
return in_WordSize(sz / wordSize);
}
void JavaThread::popframe_free_preserved_args() {
assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
FREE_C_HEAP_ARRAY(char, (char*) _popframe_preserved_args, mtThread);
_popframe_preserved_args = NULL;
_popframe_preserved_args_size = 0;
}
#ifndef PRODUCT
void JavaThread::trace_frames() {
tty->print_cr("[Describe stack]");
int frame_no = 1;
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
tty->print(" %d. ", frame_no++);
fst.current()->print_value_on(tty,this);
tty->cr();
}
}
class PrintAndVerifyOopClosure: public OopClosure {
protected:
template <class T> inline void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
if (obj == NULL) return;
tty->print(INTPTR_FORMAT ": ", p);
if (obj->is_oop_or_null()) {
if (obj->is_objArray()) {
tty->print_cr("valid objArray: " INTPTR_FORMAT, (oopDesc*) obj);
} else {
obj->print();
}
} else {
tty->print_cr("invalid oop: " INTPTR_FORMAT, (oopDesc*) obj);
}
tty->cr();
}
public:
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
static void oops_print(frame* f, const RegisterMap *map) {
PrintAndVerifyOopClosure print;
f->print_value();
f->oops_do(&print, NULL, NULL, (RegisterMap*)map);
}
void JavaThread::trace_oops() {
tty->print_cr("[Trace oops]");
frames_do(oops_print);
}
#ifdef ASSERT
void JavaThread::print_frame_layout(int depth, bool validate_only) {
ResourceMark rm;
PRESERVE_EXCEPTION_MARK;
FrameValues values;
int frame_no = 0;
for(StackFrameStream fst(this, false); !fst.is_done(); fst.next()) {
fst.current()->describe(values, ++frame_no);
if (depth == frame_no) break;
}
if (validate_only) {
values.validate();
} else {
tty->print_cr("[Describe stack layout]");
values.print(this);
}
}
#endif
void JavaThread::trace_stack_from(vframe* start_vf) {
ResourceMark rm;
int vframe_no = 1;
for (vframe* f = start_vf; f; f = f->sender() ) {
if (f->is_java_frame()) {
javaVFrame::cast(f)->print_activation(vframe_no++);
} else {
f->print();
}
if (vframe_no > StackPrintLimit) {
tty->print_cr("...<more frames>...");
return;
}
}
}
void JavaThread::trace_stack() {
if (!has_last_Java_frame()) return;
ResourceMark rm;
HandleMark hm;
RegisterMap reg_map(this);
trace_stack_from(last_java_vframe(®_map));
}
#endif // PRODUCT
javaVFrame* JavaThread::last_java_vframe(RegisterMap *reg_map) {
assert(reg_map != NULL, "a map must be given");
frame f = last_frame();
for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender() ) {
if (vf->is_java_frame()) return javaVFrame::cast(vf);
}
return NULL;
}
Klass* JavaThread::security_get_caller_class(int depth) {
vframeStream vfst(this);
vfst.security_get_caller_frame(depth);
if (!vfst.at_end()) {
return vfst.method()->method_holder();
}
return NULL;
}
static void compiler_thread_entry(JavaThread* thread, TRAPS) {
assert(thread->is_Compiler_thread(), "must be compiler thread");
CompileBroker::compiler_thread_loop();
}
CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters)
: JavaThread(&compiler_thread_entry) {
_env = NULL;
_log = NULL;
_task = NULL;
_queue = queue;
_counters = counters;
_buffer_blob = NULL;
_scanned_nmethod = NULL;
_compiler = NULL;
resource_area()->bias_to(mtCompiler);
#ifndef PRODUCT
_ideal_graph_printer = NULL;
#endif
}
void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
JavaThread::oops_do(f, cld_f, cf);
if (_scanned_nmethod != NULL && cf != NULL) {
cf->do_code_blob(_scanned_nmethod);
}
}
JavaThread* Threads::_thread_list = NULL;
int Threads::_number_of_threads = 0;
int Threads::_number_of_non_daemon_threads = 0;
int Threads::_return_code = 0;
size_t JavaThread::_stack_size_at_create = 0;
#ifdef ASSERT
bool Threads::_vm_complete = false;
#endif
#define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
void Threads::threads_do(ThreadClosure* tc) {
assert_locked_or_safepoint(Threads_lock);
ALL_JAVA_THREADS(p) {
tc->do_thread(p);
}
tc->do_thread(VMThread::vm_thread());
Universe::heap()->gc_threads_do(tc);
WatcherThread *wt = WatcherThread::watcher_thread();
if (wt != NULL)
tc->do_thread(wt);
#if INCLUDE_JFR
Thread* sampler_thread = Jfr::sampler_thread();
if (sampler_thread != NULL) {
tc->do_thread(sampler_thread);
}
#endif
}
jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
extern void JDK_Version_init();
VM_Version::early_initialize();
if (!is_supported_jni_version(args->version)) return JNI_EVERSION;
ostream_init();
Arguments::process_sun_java_launcher_properties(args);
os::init();
Arguments::init_system_properties();
JDK_Version_init();
Arguments::init_version_specific_system_properties();
jint parse_result = Arguments::parse(args);
if (parse_result != JNI_OK) return parse_result;
os::init_before_ergo();
jint ergo_result = Arguments::apply_ergo();
if (ergo_result != JNI_OK) return ergo_result;
if (PauseAtStartup) {
os::pause();
}
#ifndef USDT2
HS_DTRACE_PROBE(hotspot, vm__init__begin);
#else /* USDT2 */
HOTSPOT_VM_INIT_BEGIN();
#endif /* USDT2 */
TraceVmCreationTime create_vm_timer;
create_vm_timer.start();
TraceTime timer("Create VM", TraceStartupTime);
jint os_init_2_result = os::init_2();
if (os_init_2_result != JNI_OK) return os_init_2_result;
jint adjust_after_os_result = Arguments::adjust_after_os();
if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
ThreadLocalStorage::init();
ostream_init_log();
if (Arguments::init_libraries_at_startup()) {
convert_vm_init_libraries_to_agents();
}
if (Arguments::init_agents_at_startup()) {
create_vm_init_agents();
}
_thread_list = NULL;
_number_of_threads = 0;
_number_of_non_daemon_threads = 0;
vm_init_globals();
JavaThread* main_thread = new JavaThread();
main_thread->set_thread_state(_thread_in_vm);
main_thread->record_stack_base_and_size();
main_thread->initialize_thread_local_storage();
main_thread->set_active_handles(JNIHandleBlock::allocate_block());
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(
"Failed necessary internal allocation. Out of swap space");
delete main_thread;
return JNI_ENOMEM;
}
main_thread->create_stack_guard_pages();
ObjectMonitor::Initialize() ;
jint status = init_globals();
if (status != JNI_OK) {
delete main_thread;
return status;
}
JFR_ONLY(Jfr::on_vm_init();)
main_thread->cache_global_variables();
HandleMark hm;
{ MutexLocker mu(Threads_lock);
Threads::add(main_thread);
}
JvmtiExport::transition_pending_onload_raw_monitors();
{ TraceTime timer("Start VMThread", TraceStartupTime);
VMThread::create();
Thread* vmthread = VMThread::vm_thread();
if (!os::create_thread(vmthread, os::vm_thread))
vm_exit_during_initialization("Cannot create VM thread. Out of system resources.");
{
MutexLocker ml(Notify_lock);
os::start_thread(vmthread);
while (vmthread->active_handles() == NULL) {
Notify_lock->wait();
}
}
}
assert (Universe::is_fully_initialized(), "not initialized");
if (VerifyDuringStartup) {
VM_Verify verify_op;
VMThread::execute(&verify_op);
}
EXCEPTION_MARK;
if (DumpSharedSpaces) {
MetaspaceShared::preload_and_dump(CHECK_0);
ShouldNotReachHere();
}
JvmtiExport::enter_start_phase();
JvmtiExport::post_vm_start();
{
TraceTime timer("Initialize java.lang classes", TraceStartupTime);
if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
create_vm_init_libraries();
}
initialize_class(vmSymbols::java_lang_String(), CHECK_0);
initialize_class(vmSymbols::java_lang_System(), CHECK_0);
initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0);
Handle thread_group = create_initial_thread_group(CHECK_0);
Universe::set_main_thread_group(thread_group());
initialize_class(vmSymbols::java_lang_Thread(), CHECK_0);
oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
main_thread->set_threadObj(thread_object);
java_lang_Thread::set_thread_status(thread_object,
java_lang_Thread::RUNNABLE);
initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK_0);
call_initializeSystemClass(CHECK_0);
JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK_0);
initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK_0);
initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK_0);
initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK_0);
initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
}
initialize_class(vmSymbols::java_lang_Compiler(), CHECK_0);
reset_vm_info_property(CHECK_0);
quicken_jni_functions();
set_init_completed();
Metaspace::post_initialize();
#ifndef USDT2
HS_DTRACE_PROBE(hotspot, vm__init__end);
#else /* USDT2 */
HOTSPOT_VM_INIT_END();
#endif /* USDT2 */
#if INCLUDE_MANAGEMENT
Management::record_vm_init_completed();
#endif // INCLUDE_MANAGEMENT
SystemDictionary::compute_java_system_loader(THREAD);
if (HAS_PENDING_EXCEPTION) {
vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
}
#if INCLUDE_ALL_GCS
if (UseConcMarkSweepGC || UseG1GC) {
if (UseConcMarkSweepGC) {
ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
} else {
ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
}
if (HAS_PENDING_EXCEPTION) {
vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
}
}
#endif // INCLUDE_ALL_GCS
JvmtiExport::enter_live_phase();
os::signal_init();
if (!DisableAttachMechanism) {
AttachListener::vm_start();
if (StartAttachListener || AttachListener::init_at_startup()) {
AttachListener::init();
}
}
if (!EagerXrunInit && Arguments::init_libraries_at_startup()) {
create_vm_init_libraries();
}
JvmtiExport::post_vm_initialized();
JFR_ONLY(Jfr::on_vm_start();)
if (CleanChunkPoolAsync) {
Chunk::start_chunk_pool_cleaner_task();
}
#if defined(COMPILER1) || defined(COMPILER2) || defined(SHARK)
CompileBroker::compilation_init();
#endif
if (EnableInvokeDynamic) {
initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
}
#if INCLUDE_MANAGEMENT
Management::initialize(THREAD);
#endif // INCLUDE_MANAGEMENT
if (HAS_PENDING_EXCEPTION) {
vm_exit(1);
}
if (Arguments::has_profile()) FlatProfiler::engage(main_thread, true);
if (MemProfiling) MemProfiler::engage();
StatSampler::engage();
if (CheckJNICalls) JniPeriodicChecker::engage();
BiasedLocking::init();
#if INCLUDE_RTM_OPT
RTMLockingCounters::init();
#endif
if (JDK_Version::current().post_vm_init_hook_enabled()) {
call_postVMInitHook(THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
}
{
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
WatcherThread::make_startable();
if (PeriodicTask::num_tasks() > 0) {
WatcherThread::start();
}
}
create_vm_timer.end();
#ifdef ASSERT
_vm_complete = true;
#endif
return JNI_OK;
}
extern "C" {
typedef jint (JNICALL *OnLoadEntry_t)(JavaVM *, char *, void *);
}
static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) {
OnLoadEntry_t on_load_entry = NULL;
void *library = NULL;
if (!agent->valid()) {
char buffer[JVM_MAXPATHLEN];
char ebuf[1024];
const char *name = agent->name();
const char *msg = "Could not find agent library ";
if (os::find_builtin_agent(agent, on_load_symbols, num_symbol_entries)) {
library = agent->os_lib();
} else if (agent->is_absolute_path()) {
library = os::dll_load(name, ebuf, sizeof ebuf);
if (library == NULL) {
const char *sub_msg = " in absolute path, with error: ";
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
vm_exit_during_initialization(buf, NULL);
FREE_C_HEAP_ARRAY(char, buf, mtThread);
}
} else {
if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
name)) {
library = os::dll_load(buffer, ebuf, sizeof ebuf);
}
if (library == NULL) { // Try the local directory
char ns[1] = {0};
if (os::dll_build_name(buffer, sizeof(buffer), ns, name)) {
library = os::dll_load(buffer, ebuf, sizeof ebuf);
}
if (library == NULL) {
const char *sub_msg = " on the library path, with error: ";
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
vm_exit_during_initialization(buf, NULL);
FREE_C_HEAP_ARRAY(char, buf, mtThread);
}
}
}
agent->set_os_lib(library);
agent->set_valid();
}
on_load_entry =
CAST_TO_FN_PTR(OnLoadEntry_t, os::find_agent_function(agent,
false,
on_load_symbols,
num_symbol_entries));
return on_load_entry;
}
sssssssss78
最新推荐文章于 2024-10-05 12:07:50 发布