#endif // COMPILER2 || SHARK
C:\hotspot-69087d08d473\src\share\vm/runtime/deoptimization.hpp
#ifndef SHARE_VM_RUNTIME_DEOPTIMIZATION_HPP
#define SHARE_VM_RUNTIME_DEOPTIMIZATION_HPP
#include "memory/allocation.hpp"
#include "runtime/frame.inline.hpp"
class ProfileData;
class vframeArray;
class MonitorValue;
class ObjectValue;
class Deoptimization : AllStatic {
friend class VMStructs;
public:
enum DeoptReason {
Reason_many = -1, // indicates presence of several reasons
Reason_none = 0, // indicates absence of a relevant deopt.
Reason_null_check, // saw unexpected null or zero divisor (@bci)
Reason_null_assert, // saw unexpected non-null or non-zero (@bci)
Reason_range_check, // saw unexpected array index (@bci)
Reason_class_check, // saw unexpected object class (@bci)
Reason_array_check, // saw unexpected array class (aastore @bci)
Reason_intrinsic, // saw unexpected operand to intrinsic (@bci)
Reason_bimorphic, // saw unexpected object class in bimorphic inlining (@bci)
Reason_unloaded, // unloaded class or constant pool entry
Reason_uninitialized, // bad class state (uninitialized)
Reason_unreached, // code is not reached, compiler
Reason_unhandled, // arbitrary compiler limitation
Reason_constraint, // arbitrary runtime constraint violated
Reason_div0_check, // a null_check due to division by zero
Reason_age, // nmethod too old; tier threshold reached
Reason_predicate, // compiler generated predicate failed
Reason_loop_limit_check, // compiler generated loop limits check failed
Reason_speculate_class_check, // saw unexpected object class from type speculation
Reason_rtm_state_change, // rtm state change detected
Reason_unstable_if, // a branch predicted always false was taken
Reason_LIMIT,
Reason_RECORDED_LIMIT = Reason_bimorphic // some are not recorded per bc
};
enum DeoptAction {
Action_none, // just interpret, do not invalidate nmethod
Action_maybe_recompile, // recompile the nmethod; need not invalidate
Action_reinterpret, // invalidate the nmethod, reset IC, maybe recompile
Action_make_not_entrant, // invalidate the nmethod, recompile (probably)
Action_make_not_compilable, // invalidate the nmethod and do not compile
Action_LIMIT
};
enum {
_action_bits = 3,
_reason_bits = 5,
_action_shift = 0,
_reason_shift = _action_shift+_action_bits,
BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist
};
enum UnpackType {
Unpack_deopt = 0, // normal deoptimization, use pc computed in unpack_vframe_on_stack
Unpack_exception = 1, // exception is pending
Unpack_uncommon_trap = 2, // redo last byte code (C2 only)
Unpack_reexecute = 3 // reexecute bytecode (C1 only)
};
static int deoptimize_dependents();
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
private:
static void deoptimize_single_frame(JavaThread* thread, frame fr);
static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map);
static void revoke_biases_of_monitors(CodeBlob* cb);
#ifdef COMPILER2
static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
#endif // COMPILER2
public:
static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
class UnrollBlock : public CHeapObj<mtCompiler> {
private:
int _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame
int _caller_adjustment; // Adjustment, in bytes, to caller's SP by initial interpreted frame
int _number_of_frames; // Number frames to unroll
int _total_frame_sizes; // Total of number*sizes frames
intptr_t* _frame_sizes; // Array of frame sizes, in bytes, for unrolling the stack
address* _frame_pcs; // Array of frame pc's, in bytes, for unrolling the stack
intptr_t* _register_block; // Block for storing callee-saved registers.
BasicType _return_type; // Tells if we have to restore double or long return value
intptr_t _initial_info; // Platform dependent data for the sender frame (was FP on x86)
int _caller_actual_parameters; // The number of actual arguments at the
intptr_t _counter_temp; // SHOULD BE PD VARIABLE (x86 frame count temp)
intptr_t _unpack_kind; // SHOULD BE PD VARIABLE (x86 unpack kind)
intptr_t _sender_sp_temp; // SHOULD BE PD VARIABLE (x86 sender_sp)
public:
UnrollBlock(int size_of_deoptimized_frame,
int caller_adjustment,
int caller_actual_parameters,
int number_of_frames,
intptr_t* frame_sizes,
address* frames_pcs,
BasicType return_type);
~UnrollBlock();
intptr_t* value_addr_at(int register_number) const;
intptr_t* frame_sizes() const { return _frame_sizes; }
int number_of_frames() const { return _number_of_frames; }
address* frame_pcs() const { return _frame_pcs ; }
int size_of_frames() const;
void set_initial_info(intptr_t info) { _initial_info = info; }
int caller_actual_parameters() const { return _caller_actual_parameters; }
static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); }
static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); }
static int number_of_frames_offset_in_bytes() { return offset_of(UnrollBlock, _number_of_frames); }
static int frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _frame_sizes); }
static int total_frame_sizes_offset_in_bytes() { return offset_of(UnrollBlock, _total_frame_sizes); }
static int frame_pcs_offset_in_bytes() { return offset_of(UnrollBlock, _frame_pcs); }
static int register_block_offset_in_bytes() { return offset_of(UnrollBlock, _register_block); }
static int return_type_offset_in_bytes() { return offset_of(UnrollBlock, _return_type); }
static int counter_temp_offset_in_bytes() { return offset_of(UnrollBlock, _counter_temp); }
static int initial_info_offset_in_bytes() { return offset_of(UnrollBlock, _initial_info); }
static int unpack_kind_offset_in_bytes() { return offset_of(UnrollBlock, _unpack_kind); }
static int sender_sp_temp_offset_in_bytes() { return offset_of(UnrollBlock, _sender_sp_temp); }
BasicType return_type() const { return _return_type; }
void print();
};
static UnrollBlock* fetch_unroll_info(JavaThread* thread);
static BasicType unpack_frames(JavaThread* thread, int exec_mode);
static void cleanup_deopt_info(JavaThread *thread,
vframeArray * array);
static void unwind_callee_save_values(frame* f, vframeArray* vframe_array);
static UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
static void uncommon_trap_inner(JavaThread* thread, jint unloaded_class_index);
static void deoptimize_frame_internal(JavaThread* thread, intptr_t* id);
static void deoptimize_frame(JavaThread* thread, intptr_t* id);
static void gather_statistics(DeoptReason reason, DeoptAction action,
Bytecodes::Code bc = Bytecodes::_illegal);
static void print_statistics();
static int last_frame_adjust(int callee_parameters, int callee_locals);
static DeoptReason trap_request_reason(int trap_request) {
if (trap_request < 0)
return (DeoptReason)
((~(trap_request) >> _reason_shift) & right_n_bits(_reason_bits));
else
return Reason_unloaded;
}
static DeoptAction trap_request_action(int trap_request) {
if (trap_request < 0)
return (DeoptAction)
((~(trap_request) >> _action_shift) & right_n_bits(_action_bits));
else
return _unloaded_action;
}
static int trap_request_index(int trap_request) {
if (trap_request < 0)
return -1;
else
return trap_request;
}
static int make_trap_request(DeoptReason reason, DeoptAction action,
int index = -1) {
assert((1 << _reason_bits) >= Reason_LIMIT, "enough bits");
assert((1 << _action_bits) >= Action_LIMIT, "enough bits");
int trap_request;
if (index != -1)
trap_request = index;
else
trap_request = (~(((reason) << _reason_shift)
+ ((action) << _action_shift)));
assert(reason == trap_request_reason(trap_request), "valid reason");
assert(action == trap_request_action(trap_request), "valid action");
assert(index == trap_request_index(trap_request), "valid index");
return trap_request;
}
static DeoptReason trap_state_reason(int trap_state);
static int trap_state_has_reason(int trap_state, int reason);
static int trap_state_add_reason(int trap_state, int reason);
static bool trap_state_is_recompiled(int trap_state);
static int trap_state_set_recompiled(int trap_state, bool z);
static const char* format_trap_state(char* buf, size_t buflen,
int trap_state);
static bool reason_is_recorded_per_bytecode(DeoptReason reason) {
return reason > Reason_none && reason <= Reason_RECORDED_LIMIT;
}
static DeoptReason reason_recorded_per_bytecode_if_any(DeoptReason reason) {
if (reason_is_recorded_per_bytecode(reason))
return reason;
else if (reason == Reason_div0_check) // null check due to divide-by-zero?
return Reason_null_check; // recorded per BCI as a null check
else if (reason == Reason_speculate_class_check)
return Reason_class_check;
else if (reason == Reason_unstable_if)
return Reason_intrinsic;
else
return Reason_none;
}
static bool reason_is_speculate(int reason) {
if (reason == Reason_speculate_class_check) {
return true;
}
return false;
}
static uint per_method_trap_limit(int reason) {
return reason_is_speculate(reason) ? (uint)PerMethodSpecTrapLimit : (uint)PerMethodTrapLimit;
}
static const char* trap_reason_name(int reason);
static const char* trap_action_name(int action);
static const char* format_trap_request(char* buf, size_t buflen,
int trap_request);
static jint total_deoptimization_count();
static jint deoptimization_count(DeoptReason reason);
static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
private:
static MethodData* get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
static ProfileData* query_update_method_data(MethodData* trap_mdo,
int trap_bci,
DeoptReason reason,
Method* compiled_method,
uint& ret_this_trap_count,
bool& ret_maybe_prior_trap,
bool& ret_maybe_prior_recompile);
static void load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS);
static void load_class_by_index(constantPoolHandle constant_pool, int index);
static UnrollBlock* fetch_unroll_info_helper(JavaThread* thread);
static DeoptAction _unloaded_action; // == Action_reinterpret;
static const char* _trap_reason_name[Reason_LIMIT];
static const char* _trap_action_name[Action_LIMIT];
static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT];
public:
static void update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason);
};
class DeoptimizationMarker : StackObj { // for profiling
static bool _is_active;
public:
DeoptimizationMarker() { _is_active = true; }
~DeoptimizationMarker() { _is_active = false; }
static bool is_active() { return _is_active; }
};
#endif // SHARE_VM_RUNTIME_DEOPTIMIZATION_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/dtraceJSDT.cpp
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "code/codeBlob.hpp"
#include "memory/allocation.hpp"
#include "prims/jvm.h"
#include "runtime/dtraceJSDT.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/utf8.hpp"
#ifdef HAVE_DTRACE_H
jlong DTraceJSDT::activate(
jint version, jstring module_name, jint providers_count,
JVM_DTraceProvider* providers, TRAPS) {
size_t count = 0;
RegisteredProbes* probes = NULL;
if (!is_supported()) {
return 0;
}
assert(module_name != NULL, "valid module name");
assert(providers != NULL, "valid provider array");
for (int i = 0; i < providers_count; ++i) {
count += providers[i].probe_count;
}
probes = new RegisteredProbes(count);
count = 0;
for (int i = 0; i < providers_count; ++i) {
assert(providers[i].name != NULL, "valid provider name");
assert(providers[i].probe_count == 0 || providers[i].probes != NULL,
"valid probe count");
for (int j = 0; j < providers[i].probe_count; ++j) {
JVM_DTraceProbe* probe = &(providers[i].probes[j]);
assert(probe != NULL, "valid probe");
assert(probe->method != NULL, "valid method");
assert(probe->name != NULL, "valid probe name");
assert(probe->function != NULL, "valid probe function spec");
methodHandle h_method =
methodHandle(THREAD, Method::resolve_jmethod_id(probe->method));
nmethod* nm = AdapterHandlerLibrary::create_dtrace_nmethod(h_method);
if (nm == NULL) {
delete probes;
THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
"Unable to register DTrace probes (CodeCache: no room for DTrace nmethods).");
}
h_method()->set_not_compilable();
h_method()->set_code(h_method, nm);
probes->nmethod_at_put(count++, nm);
}
}
int handle = pd_activate((void*)probes,
module_name, providers_count, providers);
if (handle < 0) {
delete probes;
THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
"Unable to register DTrace probes (internal error).");
}
probes->set_helper_handle(handle);
return RegisteredProbes::toOpaqueProbes(probes);
}
jboolean DTraceJSDT::is_probe_enabled(jmethodID method) {
Method* m = Method::resolve_jmethod_id(method);
return nativeInstruction_at(m->code()->trap_address())->is_dtrace_trap();
}
void DTraceJSDT::dispose(OpaqueProbes probes) {
RegisteredProbes* p = RegisteredProbes::toRegisteredProbes(probes);
if (probes != -1 && p != NULL) {
pd_dispose(p->helper_handle());
delete p;
}
}
jboolean DTraceJSDT::is_supported() {
return pd_is_supported();
}
#else // HAVE_DTRACE_H
jlong DTraceJSDT::activate(
jint version, jstring module_name, jint providers_count,
JVM_DTraceProvider* providers, TRAPS) {
return 0;
}
jboolean DTraceJSDT::is_probe_enabled(jmethodID method) {
return false;
}
void DTraceJSDT::dispose(OpaqueProbes probes) {
return;
}
jboolean DTraceJSDT::is_supported() {
return false;
}
#endif // ndef HAVE_DTRACE_H
C:\hotspot-69087d08d473\src\share\vm/runtime/dtraceJSDT.hpp
#ifndef SHARE_VM_RUNTIME_DTRACEJSDT_HPP
#define SHARE_VM_RUNTIME_DTRACEJSDT_HPP
#include "code/nmethod.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "nativeInst_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
class RegisteredProbes;
typedef jlong OpaqueProbes;
class DTraceJSDT : AllStatic {
private:
static int pd_activate(void* moduleBaseAddress, jstring module,
jint providers_count, JVM_DTraceProvider* providers);
static void pd_dispose(int handle);
static jboolean pd_is_supported();
public:
static OpaqueProbes activate(
jint version, jstring module_name, jint providers_count,
JVM_DTraceProvider* providers, TRAPS);
static jboolean is_probe_enabled(jmethodID method);
static void dispose(OpaqueProbes handle);
static jboolean is_supported();
};
class RegisteredProbes : public CHeapObj<mtInternal> {
private:
nmethod** _nmethods; // all the probe methods
size_t _count; // number of probe methods
int _helper_handle; // DTrace-assigned identifier
public:
RegisteredProbes(size_t count) {
_count = count;
_nmethods = NEW_C_HEAP_ARRAY(nmethod*, count, mtInternal);
}
~RegisteredProbes() {
for (size_t i = 0; i < _count; ++i) {
_nmethods[i]->make_not_entrant();
_nmethods[i]->method()->clear_code();
}
FREE_C_HEAP_ARRAY(nmethod*, _nmethods, mtInternal);
_nmethods = NULL;
_count = 0;
}
static RegisteredProbes* toRegisteredProbes(OpaqueProbes p) {
return (RegisteredProbes*)(intptr_t)p;
}
static OpaqueProbes toOpaqueProbes(RegisteredProbes* p) {
return (OpaqueProbes)(intptr_t)p;
}
void set_helper_handle(int handle) { _helper_handle = handle; }
int helper_handle() const { return _helper_handle; }
nmethod* nmethod_at(size_t i) {
assert(i >= 0 && i < _count, "bad nmethod index");
return _nmethods[i];
}
void nmethod_at_put(size_t i, nmethod* nm) {
assert(i >= 0 && i < _count, "bad nmethod index");
_nmethods[i] = nm;
}
};
#endif // SHARE_VM_RUNTIME_DTRACEJSDT_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/extendedPC.hpp
#ifndef SHARE_VM_RUNTIME_EXTENDEDPC_HPP
#define SHARE_VM_RUNTIME_EXTENDEDPC_HPP
class ExtendedPC VALUE_OBJ_CLASS_SPEC {
private:
address _pc;
public:
address pc() const { return _pc; }
ExtendedPC(address pc) { _pc = pc; }
ExtendedPC() { _pc = NULL; }
};
#endif // SHARE_VM_RUNTIME_EXTENDEDPC_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/fieldDescriptor.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/annotations.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/fieldStreams.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/signature.hpp"
oop fieldDescriptor::loader() const {
return _cp->pool_holder()->class_loader();
}
Symbol* fieldDescriptor::generic_signature() const {
if (!has_generic_signature()) {
return NULL;
}
int idx = 0;
InstanceKlass* ik = field_holder();
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (idx == _index) {
return fs.generic_signature();
} else {
idx ++;
}
}
assert(false, "should never happen");
return vmSymbols::void_signature(); // return a default value (for code analyzers)
}
AnnotationArray* fieldDescriptor::annotations() const {
InstanceKlass* ik = field_holder();
Array<AnnotationArray*>* md = ik->fields_annotations();
if (md == NULL)
return NULL;
return md->at(index());
}
AnnotationArray* fieldDescriptor::type_annotations() const {
InstanceKlass* ik = field_holder();
Array<AnnotationArray*>* type_annos = ik->fields_type_annotations();
if (type_annos == NULL)
return NULL;
return type_annos->at(index());
}
constantTag fieldDescriptor::initial_value_tag() const {
return constants()->tag_at(initial_value_index());
}
jint fieldDescriptor::int_initial_value() const {
return constants()->int_at(initial_value_index());
}
jlong fieldDescriptor::long_initial_value() const {
return constants()->long_at(initial_value_index());
}
jfloat fieldDescriptor::float_initial_value() const {
return constants()->float_at(initial_value_index());
}
jdouble fieldDescriptor::double_initial_value() const {
return constants()->double_at(initial_value_index());
}
oop fieldDescriptor::string_initial_value(TRAPS) const {
return constants()->uncached_string_at(initial_value_index(), THREAD);
}
void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
if (_cp.is_null() || field_holder() != ik) {
_cp = constantPoolHandle(Thread::current(), ik->constants());
assert(field_holder() == ik, "must be already initialized to this class");
}
FieldInfo* f = ik->field(index);
assert(!f->is_internal(), "regular Java fields only");
_access_flags = accessFlags_from(f->access_flags());
guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
_index = index;
verify();
}
#ifndef PRODUCT
void fieldDescriptor::verify() const {
if (_cp.is_null()) {
assert(_index == badInt, "constructor must be called"); // see constructor
} else {
assert(_index >= 0, "good index");
assert(_index < field_holder()->java_fields_count(), "oob");
}
}
void fieldDescriptor::print_on(outputStream* st) const {
access_flags().print_on(st);
name()->print_value_on(st);
st->print(" ");
signature()->print_value_on(st);
st->print(" @%d ", offset());
if (WizardMode && has_initial_value()) {
st->print("(initval ");
constantTag t = initial_value_tag();
if (t.is_int()) {
st->print("int %d)", int_initial_value());
} else if (t.is_long()){
st->print_jlong(long_initial_value());
} else if (t.is_float()){
st->print("float %f)", float_initial_value());
} else if (t.is_double()){
st->print("double %lf)", double_initial_value());
}
}
}
void fieldDescriptor::print_on_for(outputStream* st, oop obj) {
print_on(st);
BasicType ft = field_type();
jint as_int = 0;
switch (ft) {
case T_BYTE:
as_int = (jint)obj->byte_field(offset());
st->print(" %d", obj->byte_field(offset()));
break;
case T_CHAR:
as_int = (jint)obj->char_field(offset());
{
jchar c = obj->char_field(offset());
as_int = c;
st->print(" %c %d", isprint(c) ? c : ' ', c);
}
break;
case T_DOUBLE:
st->print(" %lf", obj->double_field(offset()));
break;
case T_FLOAT:
as_int = obj->int_field(offset());
st->print(" %f", obj->float_field(offset()));
break;
case T_INT:
as_int = obj->int_field(offset());
st->print(" %d", obj->int_field(offset()));
break;
case T_LONG:
st->print(" ");
st->print_jlong(obj->long_field(offset()));
break;
case T_SHORT:
as_int = obj->short_field(offset());
st->print(" %d", obj->short_field(offset()));
break;
case T_BOOLEAN:
as_int = obj->bool_field(offset());
st->print(" %s", obj->bool_field(offset()) ? "true" : "false");
break;
case T_ARRAY:
st->print(" ");
NOT_LP64(as_int = obj->int_field(offset()));
obj->obj_field(offset())->print_value_on(st);
break;
case T_OBJECT:
st->print(" ");
NOT_LP64(as_int = obj->int_field(offset()));
obj->obj_field(offset())->print_value_on(st);
break;
default:
ShouldNotReachHere();
break;
}
if (ft == T_LONG || ft == T_DOUBLE LP64_ONLY(|| !is_java_primitive(ft)) ) {
st->print(" (%x %x)", obj->int_field(offset()), obj->int_field(offset()+sizeof(jint)));
} else if (as_int < 0 || as_int > 9) {
st->print(" (%x)", as_int);
}
}
#endif /* PRODUCT */
C:\hotspot-69087d08d473\src\share\vm/runtime/fieldDescriptor.hpp
#ifndef SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
#define SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
#include "oops/constantPool.hpp"
#include "oops/symbol.hpp"
#include "runtime/fieldType.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/constantTag.hpp"
class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
private:
AccessFlags _access_flags;
int _index; // the field index
constantPoolHandle _cp;
void update_klass_field_access_flag() {
InstanceKlass* ik = field_holder();
ik->field(index())->set_access_flags(_access_flags.as_short());
}
FieldInfo* field() const {
InstanceKlass* ik = field_holder();
return ik->field(_index);
}
public:
fieldDescriptor() {
DEBUG_ONLY(_index = badInt);
}
fieldDescriptor(InstanceKlass* ik, int index) {
DEBUG_ONLY(_index = badInt);
reinitialize(ik, index);
}
Symbol* name() const {
return field()->name(_cp);
}
Symbol* signature() const {
return field()->signature(_cp);
}
InstanceKlass* field_holder() const { return _cp->pool_holder(); }
ConstantPool* constants() const { return _cp(); }
AccessFlags access_flags() const { return _access_flags; }
oop loader() const;
int offset() const { return field()->offset(); }
Symbol* generic_signature() const;
int index() const { return _index; }
AnnotationArray* annotations() const;
AnnotationArray* type_annotations() const;
bool has_initial_value() const { return field()->initval_index() != 0; }
int initial_value_index() const { return field()->initval_index(); }
constantTag initial_value_tag() const; // The tag will return true on one of is_int(), is_long(), is_single(), is_double()
jint int_initial_value() const;
jlong long_initial_value() const;
jfloat float_initial_value() const;
jdouble double_initial_value() const;
oop string_initial_value(TRAPS) const;
BasicType field_type() const { return FieldType::basic_type(signature()); }
bool is_public() const { return access_flags().is_public(); }
bool is_private() const { return access_flags().is_private(); }
bool is_protected() const { return access_flags().is_protected(); }
bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
bool is_static() const { return access_flags().is_static(); }
bool is_final() const { return access_flags().is_final(); }
bool is_volatile() const { return access_flags().is_volatile(); }
bool is_transient() const { return access_flags().is_transient(); }
bool is_synthetic() const { return access_flags().is_synthetic(); }
bool is_field_access_watched() const { return access_flags().is_field_access_watched(); }
bool is_field_modification_watched() const
{ return access_flags().is_field_modification_watched(); }
bool has_initialized_final_update() const { return access_flags().has_field_initialized_final_update(); }
bool has_generic_signature() const { return access_flags().field_has_generic_signature(); }
void set_is_field_access_watched(const bool value) {
_access_flags.set_is_field_access_watched(value);
update_klass_field_access_flag();
}
void set_is_field_modification_watched(const bool value) {
_access_flags.set_is_field_modification_watched(value);
update_klass_field_access_flag();
}
void set_has_initialized_final_update(const bool value) {
_access_flags.set_has_field_initialized_final_update(value);
update_klass_field_access_flag();
}
void reinitialize(InstanceKlass* ik, int index);
void print() { print_on(tty); }
void print_on(outputStream* st) const PRODUCT_RETURN;
void print_on_for(outputStream* st, oop obj) PRODUCT_RETURN;
void verify() const PRODUCT_RETURN;
};
#endif // SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/fieldType.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/oopFactory.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "runtime/fieldType.hpp"
#include "runtime/signature.hpp"
void FieldType::skip_optional_size(Symbol* signature, int* index) {
jchar c = signature->byte_at(*index);
while (c >= '0' && c <= '9') {
c = signature->byte_at(*index);
}
}
BasicType FieldType::basic_type(Symbol* signature) {
return char2type(signature->byte_at(0));
}
bool FieldType::is_valid_array_signature(Symbol* sig) {
assert(sig->utf8_length() > 1, "this should already have been checked");
assert(sig->byte_at(0) == '[', "this should already have been checked");
int i = 1;
int len = sig->utf8_length();
while(i < len - 1 && sig->byte_at(i) == '[') i++;
switch(sig->byte_at(i)) {
case 'B': // T_BYTE
case 'C': // T_CHAR
case 'D': // T_DOUBLE
case 'F': // T_FLOAT
case 'I': // T_INT
case 'J': // T_LONG
case 'S': // T_SHORT
case 'Z': // T_BOOLEAN
return (i + 1 == len);
case 'L':
return sig->byte_at(len - 1) == ';';
}
return false;
}
BasicType FieldType::get_array_info(Symbol* signature, FieldArrayInfo& fd, TRAPS) {
assert(basic_type(signature) == T_ARRAY, "must be array");
int index = 1;
int dim = 1;
skip_optional_size(signature, &index);
while (signature->byte_at(index) == '[') {
index++;
dim++;
skip_optional_size(signature, &index);
}
ResourceMark rm;
char *element = signature->as_C_string() + index;
BasicType element_type = char2type(element[0]);
if (element_type == T_OBJECT) {
int len = (int)strlen(element);
assert(element[len-1] == ';', "last char should be a semicolon");
element[len-1] = '\0'; // chop off semicolon
fd._object_key = SymbolTable::new_symbol(element + 1, CHECK_(T_BYTE));
}
fd._dimension = dim;
return element_type;
}
C:\hotspot-69087d08d473\src\share\vm/runtime/fieldType.hpp
#ifndef SHARE_VM_RUNTIME_FIELDTYPE_HPP
#define SHARE_VM_RUNTIME_FIELDTYPE_HPP
#include "memory/allocation.hpp"
#include "oops/symbol.hpp"
class FieldArrayInfo : public StackObj {
friend class FieldType; // field type can set these fields.
int _dimension;
Symbol* _object_key;
public:
int dimension() { return _dimension; }
Symbol* object_key() { return _object_key; }
FieldArrayInfo() : _dimension(0), _object_key(NULL) {}
~FieldArrayInfo() { if (_object_key != NULL) _object_key->decrement_refcount(); }
};
class FieldType: public AllStatic {
private:
static void skip_optional_size(Symbol* signature, int* index);
static bool is_valid_array_signature(Symbol* signature);
public:
static BasicType basic_type(Symbol* signature);
static bool is_array(Symbol* signature) { return signature->utf8_length() > 1 && signature->byte_at(0) == '[' && is_valid_array_signature(signature); }
static bool is_obj(Symbol* signature) {
int sig_length = signature->utf8_length();
return (sig_length >= 2 &&
(signature->byte_at(0) == 'L') &&
(signature->byte_at(sig_length - 1) == ';'));
}
static BasicType get_array_info(Symbol* signature, FieldArrayInfo& ai, TRAPS);
};
#endif // SHARE_VM_RUNTIME_FIELDTYPE_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/fprofiler.cpp
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "code/vtableStubs.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "oops/symbol.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/task.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "utilities/macros.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
int FlatProfiler::received_gc_ticks = 0;
int FlatProfiler::vm_operation_ticks = 0;
int FlatProfiler::threads_lock_ticks = 0;
int FlatProfiler::class_loader_ticks = 0;
int FlatProfiler::extra_ticks = 0;
int FlatProfiler::blocked_ticks = 0;
int FlatProfiler::deopt_ticks = 0;
int FlatProfiler::unknown_ticks = 0;
int FlatProfiler::interpreter_ticks = 0;
int FlatProfiler::compiler_ticks = 0;
int FlatProfiler::received_ticks = 0;
int FlatProfiler::delivered_ticks = 0;
int* FlatProfiler::bytecode_ticks = NULL;
int* FlatProfiler::bytecode_ticks_stub = NULL;
int FlatProfiler::all_int_ticks = 0;
int FlatProfiler::all_comp_ticks = 0;
int FlatProfiler::all_ticks = 0;
bool FlatProfiler::full_profile_flag = false;
ThreadProfiler* FlatProfiler::thread_profiler = NULL;
ThreadProfiler* FlatProfiler::vm_thread_profiler = NULL;
FlatProfilerTask* FlatProfiler::task = NULL;
elapsedTimer FlatProfiler::timer;
int FlatProfiler::interval_ticks_previous = 0;
IntervalData* FlatProfiler::interval_data = NULL;
ThreadProfiler::ThreadProfiler() {
const int area_size = 1 * ProfilerNodeSize * 1024;
area_bottom = AllocateHeap(area_size, mtInternal);
area_top = area_bottom;
area_limit = area_bottom + area_size;
table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal);
initialize();
engaged = false;
}
ThreadProfiler::~ThreadProfiler() {
FreeHeap(area_bottom);
area_bottom = NULL;
area_top = NULL;
area_limit = NULL;
FreeHeap(table);
table = NULL;
}
int ThreadProfiler::table_size = 1024;
int ThreadProfiler::entry(int value) {
value = (value > 0) ? value : -value;
return value % table_size;
}
ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
_r = r;
_pp = NULL;
assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
Thread* tp = Thread::current();
if (tp != NULL && tp->is_Java_thread()) {
JavaThread* jtp = (JavaThread*) tp;
ThreadProfiler* pp = jtp->get_thread_profiler();
_pp = pp;
if (pp != NULL) {
pp->region_flag[r] = true;
}
}
}
ThreadProfilerMark::~ThreadProfilerMark() {
if (_pp != NULL) {
_pp->region_flag[_r] = false;
}
_pp = NULL;
}
static const int col1 = 2; // position of output column 1
static const int col2 = 11; // position of output column 2
static const int col3 = 25; // position of output column 3
static const int col4 = 55; // position of output column 4
class PCRecorder : AllStatic {
private:
static int* counters;
static address base;
enum {
bucket_size = 16
};
static int index_for(address pc) { return (pc - base)/bucket_size; }
static address pc_for(int index) { return base + (index * bucket_size); }
static int size() {
return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
}
public:
static address bucket_start_for(address pc) {
if (counters == NULL) return NULL;
return pc_for(index_for(pc));
}
static int bucket_count_for(address pc) { return counters[index_for(pc)]; }
static void init();
static void record(address pc);
static void print();
static void print_blobs(CodeBlob* cb);
};
int* PCRecorder::counters = NULL;
address PCRecorder::base = NULL;
void PCRecorder::init() {
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int s = size();
counters = NEW_C_HEAP_ARRAY(int, s, mtInternal);
for (int index = 0; index < s; index++) {
counters[index] = 0;
}
base = CodeCache::first_address();
}
void PCRecorder::record(address pc) {
if (counters == NULL) return;
assert(CodeCache::contains(pc), "must be in CodeCache");
counters[index_for(pc)]++;
}
address FlatProfiler::bucket_start_for(address pc) {
return PCRecorder::bucket_start_for(pc);
}
int FlatProfiler::bucket_count_for(address pc) {
return PCRecorder::bucket_count_for(pc);
}
void PCRecorder::print() {
if (counters == NULL) return;
tty->cr();
tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold);
tty->print_cr("===================================================================");
tty->cr();
GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
int s;
{
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
s = size();
}
for (int index = 0; index < s; index++) {
int count = counters[index];
if (count > ProfilerPCTickThreshold) {
address pc = pc_for(index);
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
if (cb != NULL && candidates->find(cb) < 0) {
candidates->push(cb);
}
}
}
for (int i = 0; i < candidates->length(); i++) {
print_blobs(candidates->at(i));
}
}
void PCRecorder::print_blobs(CodeBlob* cb) {
if (cb != NULL) {
cb->print();
if (cb->is_nmethod()) {
((nmethod*)cb)->print_code();
}
tty->cr();
} else {
tty->print_cr("stub code");
}
}
class tick_counter { // holds tick info for one node
public:
int ticks_in_code;
int ticks_in_native;
tick_counter() { ticks_in_code = ticks_in_native = 0; }
tick_counter(int code, int native) { ticks_in_code = code; ticks_in_native = native; }
int total() const {
return (ticks_in_code + ticks_in_native);
}
void add(tick_counter* a) {
ticks_in_code += a->ticks_in_code;
ticks_in_native += a->ticks_in_native;
}
void update(TickPosition where) {
switch(where) {
case tp_code: ticks_in_code++; break;
case tp_native: ticks_in_native++; break;
}
}
void print_code(outputStream* st, int total_ticks) {
st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
}
void print_native(outputStream* st) {
st->print(" + %5d ", ticks_in_native);
}
};
class ProfilerNode {
private:
ProfilerNode* _next;
public:
tick_counter ticks;
public:
void* operator new(size_t size, ThreadProfiler* tp) throw();
void operator delete(void* p);
ProfilerNode() {
_next = NULL;
}
virtual ~ProfilerNode() {
if (_next)
delete _next;
}
void set_next(ProfilerNode* n) { _next = n; }
ProfilerNode* next() { return _next; }
void update(TickPosition where) { ticks.update(where);}
int total_ticks() { return ticks.total(); }
virtual bool is_interpreted() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_stub() const { return false; }
virtual bool is_runtime_stub() const{ return false; }
virtual void oops_do(OopClosure* f) = 0;
virtual bool interpreted_match(Method* m) const { return false; }
virtual bool compiled_match(Method* m ) const { return false; }
virtual bool stub_match(Method* m, const char* name) const { return false; }
virtual bool adapter_match() const { return false; }
virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
static void print_title(outputStream* st) {
st->print(" + native");
st->fill_to(col3);
st->print("Method");
st->fill_to(col4);
st->cr();
}
static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
t->print_code(st, total);
st->fill_to(col2);
t->print_native(st);
st->fill_to(col3);
st->print("%s", msg);
st->cr();
}
virtual Method* method() = 0;
virtual void print_method_on(outputStream* st) {
int limit;
int i;
Method* m = method();
Symbol* k = m->klass_name();
limit = k->utf8_length();
for (i = 0 ; i < limit ; i += 1) {
char c = (char) k->byte_at(i);
if (c == '/') {
c = '.';
}
st->print("%c", c);
}
if (limit > 0) {
st->print(".");
}
Symbol* n = m->name();
limit = n->utf8_length();
for (i = 0 ; i < limit ; i += 1) {
char c = (char) n->byte_at(i);
st->print("%c", c);
}
if (Verbose || WizardMode) {
Symbol* sig = m->signature();
sig->print_symbol_on(st);
} else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id()))
MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true);
}
virtual void print(outputStream* st, int total_ticks) {
ticks.print_code(st, total_ticks);
st->fill_to(col2);
ticks.print_native(st);
st->fill_to(col3);
print_method_on(st);
st->cr();
}
static int hash(Method* method) {
return (
method->code_size() ^
method->max_stack() ^
method->max_locals() ^
method->size_of_parameters());
}
static int compare(ProfilerNode** a, ProfilerNode** b) {
return (*b)->total_ticks() - (*a)->total_ticks();
}
};
void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
void* result = (void*) tp->area_top;
tp->area_top += size;
if (tp->area_top > tp->area_limit) {
fatal("flat profiler buffer overflow");
}
return result;
}
void ProfilerNode::operator delete(void* p){
}
class interpretedNode : public ProfilerNode {
private:
Method* _method;
oop _class_loader; // needed to keep metadata for the method alive
public:
interpretedNode(Method* method, TickPosition where) : ProfilerNode() {
_method = method;
_class_loader = method->method_holder()->class_loader();
update(where);
}
bool is_interpreted() const { return true; }
bool interpreted_match(Method* m) const {
return _method == m;
}
void oops_do(OopClosure* f) {
f->do_oop(&_class_loader);
}
Method* method() { return _method; }
static void print_title(outputStream* st) {
st->fill_to(col1);
st->print("%11s", "Interpreted");
ProfilerNode::print_title(st);
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
ProfilerNode::print_method_on(st);
MethodCounters* mcs = method()->method_counters();
if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short();
}
};
class compiledNode : public ProfilerNode {
private:
Method* _method;
oop _class_loader; // needed to keep metadata for the method alive
public:
compiledNode(Method* method, TickPosition where) : ProfilerNode() {
_method = method;
_class_loader = method->method_holder()->class_loader();
update(where);
}
bool is_compiled() const { return true; }
bool compiled_match(Method* m) const {
return _method == m;
}
Method* method() { return _method; }
void oops_do(OopClosure* f) {
f->do_oop(&_class_loader);
}
static void print_title(outputStream* st) {
st->fill_to(col1);
st->print("%11s", "Compiled");
ProfilerNode::print_title(st);
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
ProfilerNode::print_method_on(st);
}
};
class stubNode : public ProfilerNode {
private:
Method* _method;
oop _class_loader; // needed to keep metadata for the method alive
const char* _symbol; // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
public:
stubNode(Method* method, const char* name, TickPosition where) : ProfilerNode() {
_method = method;
_class_loader = method->method_holder()->class_loader();
_symbol = name;
update(where);
}
bool is_stub() const { return true; }
void oops_do(OopClosure* f) {
f->do_oop(&_class_loader);
}
bool stub_match(Method* m, const char* name) const {
return (_method == m) && (_symbol == name);
}
Method* method() { return _method; }
static void print_title(outputStream* st) {
st->fill_to(col1);
st->print("%11s", "Stub");
ProfilerNode::print_title(st);
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
ProfilerNode::print_method_on(st);
print_symbol_on(st);
}
void print_symbol_on(outputStream* st) {
if(_symbol) {
st->print(" (%s)", _symbol);
}
}
};
class adapterNode : public ProfilerNode {
public:
adapterNode(TickPosition where) : ProfilerNode() {
update(where);
}
bool is_compiled() const { return true; }
bool adapter_match() const { return true; }
Method* method() { return NULL; }
void oops_do(OopClosure* f) {
;
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
st->print("%s", "adapters");
}
};
class runtimeStubNode : public ProfilerNode {
private:
const CodeBlob* _stub;
const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
public:
runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub), _symbol(name) {
assert(stub->is_runtime_stub(), "wrong code blob");
update(where);
}
bool is_runtime_stub() const { return true; }
bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
assert(stub->is_runtime_stub(), "wrong code blob");
return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
(_symbol == name);
}
Method* method() { return NULL; }
static void print_title(outputStream* st) {
st->fill_to(col1);
st->print("%11s", "Runtime stub");
ProfilerNode::print_title(st);
}
void oops_do(OopClosure* f) {
;
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
st->print("%s", ((RuntimeStub*)_stub)->name());
print_symbol_on(st);
}
void print_symbol_on(outputStream* st) {
if(_symbol) {
st->print(" (%s)", _symbol);
}
}
};
class unknown_compiledNode : public ProfilerNode {
const char *_name;
public:
unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
if ( cb->is_buffer_blob() )
_name = ((BufferBlob*)cb)->name();
else
_name = ((SingletonBlob*)cb)->name();
update(where);
}
bool is_compiled() const { return true; }
bool unknown_compiled_match(const CodeBlob* cb) const {
if ( cb->is_buffer_blob() )
return !strcmp(((BufferBlob*)cb)->name(), _name);
else
return !strcmp(((SingletonBlob*)cb)->name(), _name);
}
Method* method() { return NULL; }
void oops_do(OopClosure* f) {
;
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
st->print("%s", _name);
}
};
class vmNode : public ProfilerNode {
private:
const char* _name; // "optional" name obtained by os means such as dll lookup
public:
vmNode(const TickPosition where) : ProfilerNode() {
_name = NULL;
update(where);
}
vmNode(const char* name, const TickPosition where) : ProfilerNode() {
_name = name;
update(where);
}
const char *name() const { return _name; }
bool is_compiled() const { return true; }
bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
Method* method() { return NULL; }
static int hash(const char* name){
const char* cp = name;
int h = 0;
if(name != NULL){
while(*cp != '\0'){
h = (h << 1) ^ *cp;
cp++;
}
}
return h;
}
void oops_do(OopClosure* f) {
;
}
void print(outputStream* st, int total_ticks) {
ProfilerNode::print(st, total_ticks);
}
void print_method_on(outputStream* st) {
if(_name==NULL){
st->print("%s", "unknown code");
}
else {
st->print("%s", _name);
}
}
};
void ThreadProfiler::interpreted_update(Method* method, TickPosition where) {
int index = entry(ProfilerNode::hash(method));
if (!table[index]) {
table[index] = new (this) interpretedNode(method, where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->interpreted_match(method)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) interpretedNode(method, where));
}
}
void ThreadProfiler::compiled_update(Method* method, TickPosition where) {
int index = entry(ProfilerNode::hash(method));
if (!table[index]) {
table[index] = new (this) compiledNode(method, where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->compiled_match(method)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) compiledNode(method, where));
}
}
void ThreadProfiler::stub_update(Method* method, const char* name, TickPosition where) {
int index = entry(ProfilerNode::hash(method));
if (!table[index]) {
table[index] = new (this) stubNode(method, name, where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->stub_match(method, name)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) stubNode(method, name, where));
}
}
void ThreadProfiler::adapter_update(TickPosition where) {
int index = 0;
if (!table[index]) {
table[index] = new (this) adapterNode(where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->adapter_match()) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) adapterNode(where));
}
}
void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
int index = 0;
if (!table[index]) {
table[index] = new (this) runtimeStubNode(stub, name, where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->runtimeStub_match(stub, name)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) runtimeStubNode(stub, name, where));
}
}
void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
int index = 0;
if (!table[index]) {
table[index] = new (this) unknown_compiledNode(cb, where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (node->unknown_compiled_match(cb)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) unknown_compiledNode(cb, where));
}
}
void ThreadProfiler::vm_update(TickPosition where) {
vm_update("", where);
}
void ThreadProfiler::vm_update(const char* name, TickPosition where) {
int index = entry(vmNode::hash(name));
assert(index >= 0, "Must be positive");
if (!table[index]) {
table[index] = new (this) vmNode(os::strdup(name), where);
} else {
ProfilerNode* prev = table[index];
for(ProfilerNode* node = prev; node; node = node->next()) {
if (((vmNode *)node)->vm_match(name)) {
node->update(where);
return;
}
prev = node;
}
prev->set_next(new (this) vmNode(os::strdup(name), where));
}
}
class FlatProfilerTask : public PeriodicTask {
public:
FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
void task();
};
void FlatProfiler::record_vm_operation() {
if (Universe::heap()->is_gc_active()) {
FlatProfiler::received_gc_ticks += 1;
return;
}
if (DeoptimizationMarker::is_active()) {
FlatProfiler::deopt_ticks += 1;
return;
}
FlatProfiler::vm_operation_ticks += 1;
}
void FlatProfiler::record_vm_tick() {
if( ProfileVM ) {
ResourceMark rm;
ExtendedPC epc;
const char *name = NULL;
char buf[256];
buf[0] = '\0';
vm_thread_profiler->inc_thread_ticks();
epc = os::get_thread_pc(VMThread::vm_thread());
if(epc.pc() != NULL) {
if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
name = buf;
}
}
if (name != NULL) {
vm_thread_profiler->vm_update(name, tp_native);
}
}
}
void FlatProfiler::record_thread_ticks() {
int maxthreads, suspendedthreadcount;
JavaThread** threadsList;
bool interval_expired = false;
if (ProfileIntervals &&
(FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
interval_expired = true;
interval_ticks_previous = FlatProfiler::received_ticks;
}
if (Threads_lock->try_lock()) {
{ // Threads_lock scope
maxthreads = Threads::number_of_threads();
threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal);
suspendedthreadcount = 0;
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
if (tp->is_Compiler_thread()) {
CompilerThread* cthread = (CompilerThread*)tp;
if (cthread->task() != NULL) {
FlatProfiler::compiler_ticks += 1;
continue;
}
}
ThreadProfiler* pp = tp->get_thread_profiler();
if (pp != NULL && pp->engaged) {
MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
if (!tp->is_external_suspend() && !tp->is_exiting()) {
tp->set_external_suspend();
threadsList[suspendedthreadcount++] = tp;
}
}
}
Threads_lock->unlock();
}
for (int j = 0; j < suspendedthreadcount; j++) {
JavaThread *tp = threadsList[j];
if (tp) {
tp->java_suspend();
}
}
for (int i = 0; i < suspendedthreadcount; i++) {
JavaThread *tp = threadsList[i];
if (tp) {
ThreadProfiler* pp = tp->get_thread_profiler();
if (pp != NULL && pp->engaged) {
HandleMark hm;
FlatProfiler::delivered_ticks += 1;
if (interval_expired) {
FlatProfiler::interval_record_thread(pp);
}
if (tp->blocked_on_compilation()) {
pp->compiler_ticks += 1;
pp->interval_data_ref()->inc_compiling();
} else {
pp->record_tick(tp);
}
}
MutexLocker ml(Threads_lock);
tp->java_resume();
}
}
if (interval_expired) {
FlatProfiler::interval_print();
FlatProfiler::interval_reset();
}
FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
} else {
FlatProfiler::threads_lock_ticks += 1;
}
}
void FlatProfilerTask::task() {
FlatProfiler::received_ticks += 1;
if (ProfileVM) {
FlatProfiler::record_vm_tick();
}
VM_Operation* op = VMThread::vm_operation();
if (op != NULL) {
FlatProfiler::record_vm_operation();
if (SafepointSynchronize::is_at_safepoint()) {
return;
}
}
FlatProfiler::record_thread_ticks();
}
void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
FlatProfiler::all_int_ticks++;
if (!FlatProfiler::full_profile()) {
return;
}
if (!fr.is_interpreted_frame_valid(thread)) {
interpreter_ticks += 1;
FlatProfiler::interpreter_ticks += 1;
return;
}
Method* method = *fr.interpreter_frame_method_addr();
interpreted_update(method, where);
InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
if (desc != NULL && desc->bytecode() >= 0) {
ticks[desc->bytecode()]++;
}
}
void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
const char *name = NULL;
TickPosition localwhere = where;
FlatProfiler::all_comp_ticks++;
if (!FlatProfiler::full_profile()) return;
CodeBlob* cb = fr.cb();
if (cb->is_runtime_stub()) {
RegisterMap map(thread, false);
fr = fr.sender(&map);
cb = fr.cb();
localwhere = tp_native;
}
Method* method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
(Method*)NULL;
if (method == NULL) {
if (cb->is_runtime_stub())
runtime_stub_update(cb, name, localwhere);
else
unknown_compiled_update(cb, localwhere);
}
else {
if (method->is_native()) {
stub_update(method, name, localwhere);
} else {
compiled_update(method, localwhere);
}
}
}
extern "C" void find(int x);
void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
if (fr.is_interpreted_frame()) {
interval_data_ref()->inc_interpreted();
record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
return;
}
if (CodeCache::contains(fr.pc())) {
interval_data_ref()->inc_compiled();
PCRecorder::record(fr.pc());
record_compiled_tick(thread, fr, tp_code);
return;
}
if (VtableStubs::stub_containing(fr.pc()) != NULL) {
unknown_ticks_array[ut_vtable_stubs] += 1;
return;
}
frame caller = fr.profile_find_Java_sender_frame(thread);
if (caller.sp() != NULL && caller.pc() != NULL) {
record_tick_for_calling_frame(thread, caller);
return;
}
unknown_ticks_array[ut_running_frame] += 1;
FlatProfiler::unknown_ticks += 1;
}
void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
interval_data_ref()->inc_native();
if (fr.is_interpreted_frame()) {
record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
return;
}
if (CodeCache::contains(fr.pc())) {
record_compiled_tick(thread, fr, tp_native);
return;
}
frame caller = fr.profile_find_Java_sender_frame(thread);
if (caller.sp() != NULL && caller.pc() != NULL) {
record_tick_for_calling_frame(thread, caller);
return;
}
unknown_ticks_array[ut_calling_frame] += 1;
FlatProfiler::unknown_ticks += 1;
}
void ThreadProfiler::record_tick(JavaThread* thread) {
FlatProfiler::all_ticks++;
thread_ticks += 1;
if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
class_loader_ticks += 1;
FlatProfiler::class_loader_ticks += 1;
return;
} else if (region_flag[ThreadProfilerMark::extraRegion]) {
extra_ticks += 1;
FlatProfiler::extra_ticks += 1;
return;
}
uint32_t debug_bits = 0;
if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
SuspendRetryDelay, &debug_bits)) {
unknown_ticks_array[ut_unknown_thread_state] += 1;
FlatProfiler::unknown_ticks += 1;
return;
}
frame fr;
switch (thread->thread_state()) {
case _thread_in_native:
case _thread_in_native_trans:
case _thread_in_vm:
case _thread_in_vm_trans:
if (thread->profile_last_Java_frame(&fr)) {
if (fr.is_runtime_frame()) {
RegisterMap map(thread, false);
fr = fr.sender(&map);
}
record_tick_for_calling_frame(thread, fr);
} else {
unknown_ticks_array[ut_no_last_Java_frame] += 1;
FlatProfiler::unknown_ticks += 1;
}
break;
case _thread_in_Java:
case _thread_in_Java_trans:
if (thread->profile_last_Java_frame(&fr)) {
if (fr.is_safepoint_blob_frame()) {
RegisterMap map(thread, false);
fr = fr.sender(&map);
}
record_tick_for_running_frame(thread, fr);
} else {
unknown_ticks_array[ut_no_last_Java_frame] += 1;
FlatProfiler::unknown_ticks += 1;
}
break;
case _thread_blocked:
case _thread_blocked_trans:
if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
if (thread->profile_last_Java_frame(&fr)) {
if (fr.is_safepoint_blob_frame()) {
RegisterMap map(thread, false);
fr = fr.sender(&map);
record_tick_for_running_frame(thread, fr);
} else {
record_tick_for_calling_frame(thread, fr);
}
} else {
unknown_ticks_array[ut_no_last_Java_frame] += 1;
FlatProfiler::unknown_ticks += 1;
}
} else {
blocked_ticks += 1;
FlatProfiler::blocked_ticks += 1;
}
break;
case _thread_uninitialized:
case _thread_new:
case _thread_new_trans:
unknown_ticks_array[ut_no_last_Java_frame] += 1;
FlatProfiler::unknown_ticks += 1;
break;
default:
unknown_ticks_array[ut_unknown_thread_state] += 1;
FlatProfiler::unknown_ticks += 1;
break;
}
return;
}
void ThreadProfiler::engage() {
engaged = true;
timer.start();
}
void ThreadProfiler::disengage() {
engaged = false;
timer.stop();
}
void ThreadProfiler::initialize() {
for (int index = 0; index < table_size; index++) {
table[index] = NULL;
}
thread_ticks = 0;
blocked_ticks = 0;
compiler_ticks = 0;
interpreter_ticks = 0;
for (int ut = 0; ut < ut_end; ut += 1) {
unknown_ticks_array[ut] = 0;
}
region_flag[ThreadProfilerMark::classLoaderRegion] = false;
class_loader_ticks = 0;
region_flag[ThreadProfilerMark::extraRegion] = false;
extra_ticks = 0;
timer.start();
interval_data_ref()->reset();
}
void ThreadProfiler::reset() {
timer.stop();
if (table != NULL) {
for (int index = 0; index < table_size; index++) {
ProfilerNode* n = table[index];
if (n != NULL) {
delete n;
}
}
}
initialize();
}
void FlatProfiler::allocate_table() {
{ // Bytecode table
bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
for(int index = 0; index < Bytecodes::number_of_codes; index++) {
bytecode_ticks[index] = 0;
bytecode_ticks_stub[index] = 0;
}
}
if (ProfilerRecordPC) PCRecorder::init();
interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal);
FlatProfiler::interval_reset();
}
void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
full_profile_flag = fullProfile;
if (bytecode_ticks == NULL) {
allocate_table();
}
if(ProfileVM && (vm_thread_profiler == NULL)){
vm_thread_profiler = new ThreadProfiler();
}
if (task == NULL) {
task = new FlatProfilerTask(WatcherThread::delay_interval);
task->enroll();
}
timer.start();
if (mainThread != NULL) {
ThreadProfiler* pp = mainThread->get_thread_profiler();
if (pp == NULL) {
mainThread->set_thread_profiler(new ThreadProfiler());
} else {
pp->reset();
}
mainThread->get_thread_profiler()->engage();
}
thread_profiler = NULL;
}
void FlatProfiler::disengage() {
if (!task) {
return;
}
timer.stop();
task->disenroll();
delete task;
task = NULL;
if (thread_profiler != NULL) {
thread_profiler->disengage();
} else {
MutexLocker tl(Threads_lock);
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
ThreadProfiler* pp = tp->get_thread_profiler();
if (pp != NULL) {
pp->disengage();
}
}
}
}
void FlatProfiler::reset() {
if (task) {
disengage();
}
class_loader_ticks = 0;
extra_ticks = 0;
received_gc_ticks = 0;
vm_operation_ticks = 0;
compiler_ticks = 0;
deopt_ticks = 0;
interpreter_ticks = 0;
blocked_ticks = 0;
unknown_ticks = 0;
received_ticks = 0;
delivered_ticks = 0;
timer.stop();
}
bool FlatProfiler::is_active() {
return task != NULL;
}
void FlatProfiler::print_byte_code_statistics() {
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
tty->print_cr(" Bytecode ticks:");
for (int index = 0; index < Bytecodes::number_of_codes; index++) {
if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
tty->print_cr(" %4d %4d = %s",
FlatProfiler::bytecode_ticks[index],
FlatProfiler::bytecode_ticks_stub[index],
Bytecodes::name( (Bytecodes::Code) index));
}
}
tty->cr();
}
void print_ticks(const char* title, int ticks, int total) {
if (ticks > 0) {
tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
tty->fill_to(col3);
tty->print("%s", title);
tty->cr();
}
}
void ThreadProfiler::print(const char* thread_name) {
ResourceMark rm;
MutexLocker ppl(ProfilePrint_lock);
int index = 0; // Declared outside for loops for portability
if (table == NULL) {
return;
}
if (thread_ticks <= 0) {
return;
}
const char* title = "too soon to tell";
double secs = timer.seconds();
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
for(index = 0; index < table_size; index++) {
for(ProfilerNode* node = table[index]; node; node = node->next())
array->append(node);
}
array->sort(&ProfilerNode::compare);
int active =
class_loader_ticks +
compiler_ticks +
interpreter_ticks +
unknown_ticks();
for (index = 0; index < array->length(); index++) {
active += array->at(index)->ticks.total();
}
int total = active + blocked_ticks;
tty->cr();
tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
if (total != thread_ticks) {
print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
}
tty->cr();
tick_counter interpreted_ticks;
bool has_interpreted_ticks = false;
int print_count = 0;
for (index = 0; index < array->length(); index++) {
ProfilerNode* n = array->at(index);
if (n->is_interpreted()) {
interpreted_ticks.add(&n->ticks);
if (!has_interpreted_ticks) {
interpretedNode::print_title(tty);
has_interpreted_ticks = true;
}
if (print_count++ < ProfilerNumberOfInterpretedMethods) {
n->print(tty, active);
}
}
}
if (has_interpreted_ticks) {
if (print_count <= ProfilerNumberOfInterpretedMethods) {
title = "Total interpreted";
} else {
title = "Total interpreted (including elided)";
}
interpretedNode::print_total(tty, &interpreted_ticks, active, title);
tty->cr();
}
tick_counter compiled_ticks;
bool has_compiled_ticks = false;
print_count = 0;
for (index = 0; index < array->length(); index++) {
ProfilerNode* n = array->at(index);
if (n->is_compiled()) {
compiled_ticks.add(&n->ticks);
if (!has_compiled_ticks) {
compiledNode::print_title(tty);
has_compiled_ticks = true;
}
if (print_count++ < ProfilerNumberOfCompiledMethods) {
n->print(tty, active);
}
}
}
if (has_compiled_ticks) {
if (print_count <= ProfilerNumberOfCompiledMethods) {
title = "Total compiled";
} else {
title = "Total compiled (including elided)";
}
compiledNode::print_total(tty, &compiled_ticks, active, title);
tty->cr();
}
tick_counter stub_ticks;
bool has_stub_ticks = false;
print_count = 0;
for (index = 0; index < array->length(); index++) {
ProfilerNode* n = array->at(index);
if (n->is_stub()) {
stub_ticks.add(&n->ticks);
if (!has_stub_ticks) {
stubNode::print_title(tty);
has_stub_ticks = true;
}
if (print_count++ < ProfilerNumberOfStubMethods) {
n->print(tty, active);
}
}
}
if (has_stub_ticks) {
if (print_count <= ProfilerNumberOfStubMethods) {
title = "Total stub";
} else {
title = "Total stub (including elided)";
}
stubNode::print_total(tty, &stub_ticks, active, title);
tty->cr();
}
tick_counter runtime_stub_ticks;
bool has_runtime_stub_ticks = false;
print_count = 0;
for (index = 0; index < array->length(); index++) {
ProfilerNode* n = array->at(index);
if (n->is_runtime_stub()) {
runtime_stub_ticks.add(&n->ticks);
if (!has_runtime_stub_ticks) {
runtimeStubNode::print_title(tty);
has_runtime_stub_ticks = true;
}
if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
n->print(tty, active);
}
}
}
if (has_runtime_stub_ticks) {
if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
title = "Total runtime stubs";
} else {
title = "Total runtime stubs (including elided)";
}
runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
tty->cr();
}
if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
tty->fill_to(col1);
tty->print_cr("Thread-local ticks:");
print_ticks("Blocked (of total)", blocked_ticks, total);
print_ticks("Class loader", class_loader_ticks, active);
print_ticks("Extra", extra_ticks, active);
print_ticks("Interpreter", interpreter_ticks, active);
print_ticks("Compilation", compiler_ticks, active);
print_ticks("Unknown: vtable stubs", unknown_ticks_array[ut_vtable_stubs], active);
print_ticks("Unknown: null method", unknown_ticks_array[ut_null_method], active);
print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame], active);
print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame], active);
print_ticks("Unknown: no pc", unknown_ticks_array[ut_no_pc], active);
print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame], active);
print_ticks("Unknown: thread_state", unknown_ticks_array[ut_unknown_thread_state], active);
tty->cr();
}
if (WizardMode) {
tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024);
}
reset();
}
ThreadProfiler::print_unknown(){
if (table == NULL) {
return;
}
if (thread_ticks <= 0) {
return;
}
} */
void FlatProfiler::print(int unused) {
ResourceMark rm;
if (thread_profiler != NULL) {
thread_profiler->print("All threads");
} else {
MutexLocker tl(Threads_lock);
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
ThreadProfiler* pp = tp->get_thread_profiler();
if (pp != NULL) {
pp->print(tp->get_thread_name());
}
}
}
if (ProfilerPrintByteCodeStatistics) {
print_byte_code_statistics();
}
if (non_method_ticks() > 0) {
tty->cr();
tty->print_cr("Global summary of %3.2f seconds:", timer.seconds());
print_ticks("Received ticks", received_ticks, received_ticks);
print_ticks("Received GC ticks", received_gc_ticks, received_ticks);
print_ticks("Compilation", compiler_ticks, received_ticks);
print_ticks("Deoptimization", deopt_ticks, received_ticks);
print_ticks("Other VM operations", vm_operation_ticks, received_ticks);
#ifndef PRODUCT
print_ticks("Blocked ticks", blocked_ticks, received_ticks);
print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks);
print_ticks("Delivered ticks", delivered_ticks, received_ticks);
print_ticks("All ticks", all_ticks, received_ticks);
#endif
print_ticks("Class loader", class_loader_ticks, received_ticks);
print_ticks("Extra ", extra_ticks, received_ticks);
print_ticks("Interpreter", interpreter_ticks, received_ticks);
print_ticks("Unknown code", unknown_ticks, received_ticks);
}
PCRecorder::print();
if(ProfileVM){
tty->cr();
vm_thread_profiler->print("VM Thread");
}
}
void IntervalData::print_header(outputStream* st) {
st->print("i/c/n/g");
}
void IntervalData::print_data(outputStream* st) {
st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling());
}
void FlatProfiler::interval_record_thread(ThreadProfiler* tp) {
IntervalData id = tp->interval_data();
int total = id.total();
tp->interval_data_ref()->reset();
for (int i = 0; i < interval_print_size; i += 1) {
if (total > interval_data[i].total()) {
for (int j = interval_print_size - 1; j > i; j -= 1) {
interval_data[j] = interval_data[j-1];
}
interval_data[i] = id;
break;
}
}
}
void FlatProfiler::interval_print() {
if ((interval_data[0].total() > 0)) {
tty->stamp();
tty->print("\t");
IntervalData::print_header(tty);
for (int i = 0; i < interval_print_size; i += 1) {
if (interval_data[i].total() > 0) {
tty->print("\t");
interval_data[i].print_data(tty);
}
}
tty->cr();
}
}
void FlatProfiler::interval_reset() {
for (int i = 0; i < interval_print_size; i += 1) {
interval_data[i].reset();
}
}
void ThreadProfiler::oops_do(OopClosure* f) {
if (table == NULL) return;
for(int index = 0; index < table_size; index++) {
for(ProfilerNode* node = table[index]; node; node = node->next())
node->oops_do(f);
}
}
void FlatProfiler::oops_do(OopClosure* f) {
if (thread_profiler != NULL) {
thread_profiler->oops_do(f);
} else {
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
ThreadProfiler* pp = tp->get_thread_profiler();
if (pp != NULL) {
pp->oops_do(f);
}
}
}
}
C:\hotspot-69087d08d473\src\share\vm/runtime/fprofiler.hpp
#ifndef SHARE_VM_RUNTIME_FPROFILER_HPP
#define SHARE_VM_RUNTIME_FPROFILER_HPP
#include "utilities/macros.hpp"
#include "runtime/timer.hpp"
class ThreadProfiler;
class ThreadProfilerMark;
class FlatProfiler;
class IntervalData;
class ProfilerNode;
class FlatProfilerTask;
enum TickPosition {
tp_code,
tp_native
};
class ThreadProfilerMark: public StackObj {
public:
enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
ThreadProfilerMark(Region) NOT_FPROF_RETURN;
~ThreadProfilerMark() NOT_FPROF_RETURN;
private:
ThreadProfiler* _pp;
Region _r;
};
#if INCLUDE_FPROF
class IntervalData VALUE_OBJ_CLASS_SPEC {
private:
int _interpreted;
int _compiled;
int _native;
int _compiling;
public:
int interpreted() {
return _interpreted;
}
int compiled() {
return _compiled;
}
int native() {
return _native;
}
int compiling() {
return _compiling;
}
int total() {
return (interpreted() + compiled() + native() + compiling());
}
void inc_interpreted() {
_interpreted += 1;
}
void inc_compiled() {
_compiled += 1;
}
void inc_native() {
_native += 1;
}
void inc_compiling() {
_compiling += 1;
}
void reset() {
_interpreted = 0;
_compiled = 0;
_native = 0;
_compiling = 0;
}
static void print_header(outputStream* st);
void print_data(outputStream* st);
};
#endif // INCLUDE_FPROF
class ThreadProfiler: public CHeapObj<mtInternal> {
public:
ThreadProfiler() NOT_FPROF_RETURN;
~ThreadProfiler() NOT_FPROF_RETURN;
void reset() NOT_FPROF_RETURN;
void engage() NOT_FPROF_RETURN;
void disengage() NOT_FPROF_RETURN;
void print(const char* thread_name) NOT_FPROF_RETURN;
void oops_do(OopClosure* f) NOT_FPROF_RETURN;
#if INCLUDE_FPROF
private:
friend class ProfilerNode;
char* area_bottom; // preallocated area for pnodes
char* area_top;
char* area_limit;
static int table_size;
ProfilerNode** table;
private:
void record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks);
void record_compiled_tick (JavaThread* thread, frame fr, TickPosition where);
void interpreted_update(Method* method, TickPosition where);
void compiled_update (Method* method, TickPosition where);
void stub_update (Method* method, const char* name, TickPosition where);
void adapter_update (TickPosition where);
void runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where);
void unknown_compiled_update (const CodeBlob* cb, TickPosition where);
void vm_update (TickPosition where);
void vm_update (const char* name, TickPosition where);
void record_tick_for_running_frame(JavaThread* thread, frame fr);
void record_tick_for_calling_frame(JavaThread* thread, frame fr);
void initialize();
static int entry(int value);
private:
friend class FlatProfiler;
void record_tick(JavaThread* thread);
bool engaged;
int thread_ticks;
int compiler_ticks;
int interpreter_ticks;
public:
void inc_thread_ticks() { thread_ticks += 1; }
private:
friend class ThreadProfilerMark;
bool region_flag[ThreadProfilerMark::maxRegion];
int class_loader_ticks;
int extra_ticks;
private:
int blocked_ticks;
enum UnknownTickSites {
ut_null_method,
ut_vtable_stubs,
ut_running_frame,
ut_calling_frame,
ut_no_pc,
ut_no_last_Java_frame,
ut_unknown_thread_state,
ut_end
};
int unknown_ticks_array[ut_end];
int unknown_ticks() {
int result = 0;
for (int ut = 0; ut < ut_end; ut += 1) {
result += unknown_ticks_array[ut];
}
return result;
}
elapsedTimer timer;
private:
IntervalData _interval_data;
IntervalData interval_data() {
return _interval_data;
}
IntervalData* interval_data_ref() {
return &_interval_data;
}
#endif // INCLUDE_FPROF
};
class FlatProfiler: AllStatic {
public:
static void reset() NOT_FPROF_RETURN ;
static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
static void disengage() NOT_FPROF_RETURN ;
static void print(int unused) NOT_FPROF_RETURN ;
static bool is_active() NOT_FPROF_RETURN_(false) ;
static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
enum { MillisecsPerTick = 10 }; // ms per profiling ticks
static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
#if INCLUDE_FPROF
private:
static bool full_profile() {
return full_profile_flag;
}
friend class ThreadProfiler;
static int received_gc_ticks; // ticks during which gc was active
static int vm_operation_ticks; // total ticks in vm_operations other than GC
static int threads_lock_ticks; // the number of times we couldn't get the Threads_lock without blocking
static int blocked_ticks; // ticks when the thread was blocked.
static int class_loader_ticks; // total ticks in class loader
static int extra_ticks; // total ticks an extra temporary measuring
static int compiler_ticks; // total ticks in compilation
static int interpreter_ticks; // ticks in unknown interpreted method
static int deopt_ticks; // ticks in deoptimization
static int unknown_ticks; // ticks that cannot be categorized
static int received_ticks; // ticks that were received by task
static int delivered_ticks; // ticks that were delivered by task
static int non_method_ticks() {
return
( received_gc_ticks
+ vm_operation_ticks
+ deopt_ticks
+ threads_lock_ticks
+ blocked_ticks
+ compiler_ticks
+ interpreter_ticks
+ unknown_ticks );
}
static elapsedTimer timer;
static int* bytecode_ticks;
static int* bytecode_ticks_stub;
static void print_byte_code_statistics();
static int all_ticks; // total count of ticks received so far
static int all_int_ticks; // ticks in interpreter
static int all_comp_ticks; // ticks in compiled code (+ native)
static bool full_profile_flag; // collecting full profile?
static ThreadProfiler* thread_profiler;
static ThreadProfiler* vm_thread_profiler;
static void allocate_table();
friend class FlatProfilerTask;
static FlatProfilerTask* task;
static void record_vm_operation();
static void record_vm_tick();
static void record_thread_ticks();
private:
static int interval_ticks_previous; // delivered_ticks from the last interval
static void interval_record_thread(ThreadProfiler* tp); // extract ticks from ThreadProfiler.
static void interval_print(); // print interval data.
static void interval_reset(); // reset interval data.
enum {interval_print_size = 10};
static IntervalData* interval_data;
#endif // INCLUDE_FPROF
};
#endif // SHARE_VM_RUNTIME_FPROFILER_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/frame.cpp
#include "precompiled.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/markOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/decoder.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "nativeInst_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
_thread = thread;
_update_map = update_map;
clear();
debug_only(_update_for_id = NULL;)
#ifndef PRODUCT
for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
#endif /* PRODUCT */
}
RegisterMap::RegisterMap(const RegisterMap* map) {
assert(map != this, "bad initialization parameter");
assert(map != NULL, "RegisterMap must be present");
_thread = map->thread();
_update_map = map->update_map();
_include_argument_oops = map->include_argument_oops();
debug_only(_update_for_id = map->_update_for_id;)
pd_initialize_from(map);
if (update_map()) {
for(int i = 0; i < location_valid_size; i++) {
LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
_location_valid[i] = bits;
int j = i*location_valid_type_size;
while (bits != 0) {
if ((bits & 1) != 0) {
assert(0 <= j && j < reg_count, "range check");
_location[j] = map->_location[j];
}
bits >>= 1;
j += 1;
}
}
}
}
void RegisterMap::clear() {
set_include_argument_oops(true);
if (_update_map) {
for(int i = 0; i < location_valid_size; i++) {
_location_valid[i] = 0;
}
pd_clear();
} else {
pd_initialize();
}
}
#ifndef PRODUCT
void RegisterMap::print_on(outputStream* st) const {
st->print_cr("Register map");
for(int i = 0; i < reg_count; i++) {
VMReg r = VMRegImpl::as_VMReg(i);
intptr_t* src = (intptr_t*) location(r);
if (src != NULL) {
r->print_on(st);
st->print(" [" INTPTR_FORMAT "] = ", src);
if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
st->print_cr("<misaligned>");
} else {
st->print_cr(INTPTR_FORMAT, *src);
}
}
}
}
void RegisterMap::print() const {
print_on(tty);
}
#endif
address frame::raw_pc() const {
if (is_deoptimized_frame()) {
nmethod* nm = cb()->as_nmethod_or_null();
if (nm->is_method_handle_return(pc()))
return nm->deopt_mh_handler_begin() - pc_return_offset;
else
return nm->deopt_handler_begin() - pc_return_offset;
} else {
return (pc() - pc_return_offset);
}
}
void frame::set_pc(address newpc ) {
#ifdef ASSERT
if (_cb != NULL && _cb->is_nmethod()) {
assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
}
#endif // ASSERT
_deopt_state = unknown;
_pc = newpc;
_cb = CodeCache::find_blob_unsafe(_pc);
}
bool frame::is_ignored_frame() const {
return false; // FIXME: some LambdaForm frames should be ignored
}
bool frame::is_deoptimized_frame() const {
assert(_deopt_state != unknown, "not answerable");
return _deopt_state == is_deoptimized;
}
bool frame::is_native_frame() const {
return (_cb != NULL &&
_cb->is_nmethod() &&
((nmethod*)_cb)->is_native_method());
}
bool frame::is_java_frame() const {
if (is_interpreted_frame()) return true;
if (is_compiled_frame()) return true;
return false;
}
bool frame::is_compiled_frame() const {
if (_cb != NULL &&
_cb->is_nmethod() &&
((nmethod*)_cb)->is_java_method()) {
return true;
}
return false;
}
bool frame::is_runtime_frame() const {
return (_cb != NULL && _cb->is_runtime_stub());
}
bool frame::is_safepoint_blob_frame() const {
return (_cb != NULL && _cb->is_safepoint_stub());
}
bool frame::is_first_java_frame() const {
RegisterMap map(JavaThread::current(), false); // No update
frame s;
for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map));
return s.is_first_frame();
}
bool frame::entry_frame_is_first() const {
return entry_frame_call_wrapper()->is_first_frame();
}
JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const {
JavaCallWrapper** jcw = entry_frame_call_wrapper_addr();
address addr = (address) jcw;
if (thread->is_in_usable_stack(addr)) {
return *jcw;
}
return NULL;
}
bool frame::is_entry_frame_valid(JavaThread* thread) const {
address jcw = (address)entry_frame_call_wrapper();
bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)fp()); // less than stack base
if (!jcw_safe) {
return false;
}
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
return (jfa->last_Java_sp() > sp());
}
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false;
assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod");
nmethod* nm = (nmethod *)_cb;
if (TraceDependencies) {
tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
nm->print_value_on(tty);
tty->cr();
}
if( !nm->is_marked_for_deoptimization() )
return false;
return !nm->is_at_poll_return(pc());
}
bool frame::can_be_deoptimized() const {
if (!is_compiled_frame()) return false;
nmethod* nm = (nmethod*)_cb;
if( !nm->can_be_deoptimized() )
return false;
return !nm->is_at_poll_return(pc());
}
void frame::deoptimize(JavaThread* thread) {
assert(_cb != NULL && _cb->is_nmethod(), "must be");
nmethod* nm = (nmethod*)_cb;
if (NeedsDeoptSuspend && Thread::current() != thread) {
assert(SafepointSynchronize::is_at_safepoint(),
"patching other threads for deopt may only occur at a safepoint");
if (id() == thread->must_deopt_id()) {
assert(thread->is_deopt_suspend(), "lost suspension");
return;
}
JavaThreadState state = thread->safepoint_state()->orig_thread_state();
if (state == _thread_in_native || state == _thread_in_native_trans) {
RegisterMap map(thread, false);
frame at_risk = thread->last_frame().sender(&map);
if (id() == at_risk.id()) {
thread->set_must_deopt_id(id());
thread->set_deopt_suspend();
return;
}
}
} // NeedsDeoptSuspend
address deopt = nm->is_method_handle_return(pc()) ?
nm->deopt_mh_handler_begin() :
nm->deopt_handler_begin();
nm->set_original_pc(this, pc());
patch_pc(thread, deopt);
#ifdef ASSERT
{
RegisterMap map(thread, false);
frame check = thread->last_frame();
while (id() != check.id()) {
check = check.sender(&map);
}
assert(check.is_deoptimized_frame(), "missed deopt");
}
#endif // ASSERT
}
frame frame::java_sender() const {
RegisterMap map(JavaThread::current(), false);
frame s;
for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ;
guarantee(s.is_java_frame(), "tried to get caller of first java frame");
return s;
}
frame frame::real_sender(RegisterMap* map) const {
frame result = sender(map);
while (result.is_runtime_frame() ||
result.is_ignored_frame()) {
result = result.sender(map);
}
return result;
}
frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
RegisterMap map(thread, false);
frame first_java_frame = frame();
if (is_java_frame()) {
first_java_frame = *this;
} else if (safe_for_sender(thread)) {
for (frame sender_frame = sender(&map);
sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
sender_frame = sender_frame.sender(&map)) {
if (sender_frame.is_java_frame()) {
first_java_frame = sender_frame;
break;
}
}
}
return first_java_frame;
}
void frame::interpreter_frame_set_locals(intptr_t* locs) {
assert(is_interpreted_frame(), "Not an interpreted frame");
}
Method* frame::interpreter_frame_method() const {
assert(is_interpreted_frame(), "interpreted frame expected");
Method* m = *interpreter_frame_method_addr();
assert(m->is_method(), "not a Method*");
return m;
}
void frame::interpreter_frame_set_method(Method* method) {
assert(is_interpreted_frame(), "interpreted frame expected");
}
void frame::interpreter_frame_set_bcx(intptr_t bcx) {
assert(is_interpreted_frame(), "Not an interpreted frame");
if (ProfileInterpreter) {
bool formerly_bci = is_bci(interpreter_frame_bcx());
bool is_now_bci = is_bci(bcx);
intptr_t mdx = interpreter_frame_mdx();
if (mdx != 0) {
if (formerly_bci) {
if (!is_now_bci) {
MethodData* mdo = interpreter_frame_method()->method_data();
assert(mdo != NULL, "");
int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one.
address mdp = mdo->di_to_dp(mdi);
interpreter_frame_set_mdx((intptr_t)mdp);
}
} else {
if (is_now_bci) {
MethodData* mdo = interpreter_frame_method()->method_data();
assert(mdo != NULL, "");
int mdi = mdo->dp_to_di((address)mdx);
interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0.
}
}
}
} else {
}
}
jint frame::interpreter_frame_bci() const {
assert(is_interpreted_frame(), "interpreted frame expected");
intptr_t bcx = interpreter_frame_bcx();
return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx);
}
void frame::interpreter_frame_set_bci(jint bci) {
assert(is_interpreted_frame(), "interpreted frame expected");
assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC");
interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci));
}
address frame::interpreter_frame_bcp() const {
assert(is_interpreted_frame(), "interpreted frame expected");
intptr_t bcx = interpreter_frame_bcx();
return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx;
}
void frame::interpreter_frame_set_bcp(address bcp) {
assert(is_interpreted_frame(), "interpreted frame expected");
assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC");
interpreter_frame_set_bcx((intptr_t)bcp);
}
void frame::interpreter_frame_set_mdx(intptr_t mdx) {
assert(is_interpreted_frame(), "Not an interpreted frame");
assert(ProfileInterpreter, "must be profiling interpreter");
}
address frame::interpreter_frame_mdp() const {
assert(ProfileInterpreter, "must be profiling interpreter");
assert(is_interpreted_frame(), "interpreted frame expected");
intptr_t bcx = interpreter_frame_bcx();
intptr_t mdx = interpreter_frame_mdx();
assert(!is_bci(bcx), "should not access mdp during GC");
return (address)mdx;
}
void frame::interpreter_frame_set_mdp(address mdp) {
assert(is_interpreted_frame(), "interpreted frame expected");
if (mdp == NULL) {
interpreter_frame_set_mdx((intptr_t)mdp);
}
intptr_t bcx = interpreter_frame_bcx();
assert(!is_bci(bcx), "should not set mdp during GC");
interpreter_frame_set_mdx((intptr_t)mdp);
}
BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
#ifdef ASSERT
interpreter_frame_verify_monitor(current);
#endif
BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
return next;
}
BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
#ifdef ASSERT
#endif
BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size());
return previous;
}
intptr_t* frame::interpreter_frame_local_at(int index) const {
const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
return &((*interpreter_frame_locals_addr())[n]);
}
intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
const int i = offset * interpreter_frame_expression_stack_direction();
const int n = i * Interpreter::stackElementWords;
return &(interpreter_frame_expression_stack()[n]);
}
jint frame::interpreter_frame_expression_stack_size() const {
int element_size = Interpreter::stackElementWords;
size_t stack_size = 0;
if (frame::interpreter_frame_expression_stack_direction() < 0) {
stack_size = (interpreter_frame_expression_stack() -
interpreter_frame_tos_address() + 1)/element_size;
} else {
stack_size = (interpreter_frame_tos_address() -
interpreter_frame_expression_stack() + 1)/element_size;
}
assert( stack_size <= (size_t)max_jint, "stack size too big");
return ((jint)stack_size);
}
const char* frame::print_name() const {
if (is_native_frame()) return "Native";
if (is_interpreted_frame()) return "Interpreted";
if (is_compiled_frame()) {
if (is_deoptimized_frame()) return "Deoptimized";
return "Compiled";
}
if (sp() == NULL) return "Empty";
return "C";
}
void frame::print_value_on(outputStream* st, JavaThread *thread) const {
NOT_PRODUCT(address begin = pc()-40;)
NOT_PRODUCT(address end = NULL;)
st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
if (sp() != NULL)
st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc());
if (StubRoutines::contains(pc())) {
st->print_cr(")");
st->print("(");
StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
st->print("~Stub::%s", desc->name());
NOT_PRODUCT(begin = desc->begin(); end = desc->end();)
} else if (Interpreter::contains(pc())) {
st->print_cr(")");
st->print("(");
InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
if (desc != NULL) {
st->print("~");
desc->print_on(st);
NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
} else {
st->print("~interpreter");
}
}
st->print_cr(")");
if (_cb != NULL) {
st->print(" ");
_cb->print_value_on(st);
st->cr();
#ifndef PRODUCT
if (end == NULL) {
begin = _cb->code_begin();
end = _cb->code_end();
}
#endif
}
NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
}
void frame::print_on(outputStream* st) const {
print_value_on(st,NULL);
if (is_interpreted_frame()) {
interpreter_frame_print_on(st);
}
}
void frame::interpreter_frame_print_on(outputStream* st) const {
#ifndef PRODUCT
assert(is_interpreted_frame(), "Not an interpreted frame");
jint i;
for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
intptr_t x = *interpreter_frame_local_at(i);
st->print(" - local [" INTPTR_FORMAT "]", x);
st->fill_to(23);
st->print_cr("; #%d", i);
}
for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
intptr_t x = *interpreter_frame_expression_stack_at(i);
st->print(" - stack [" INTPTR_FORMAT "]", x);
st->fill_to(23);
st->print_cr("; #%d", i);
}
for (BasicObjectLock* current = interpreter_frame_monitor_end();
current < interpreter_frame_monitor_begin();
current = next_monitor_in_interpreter_frame(current)) {
st->print(" - obj [");
current->obj()->print_value_on(st);
st->print_cr("]");
st->print(" - lock [");
current->lock()->print_on(st);
st->print_cr("]");
}
st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp());
st->fill_to(23);
st->print_cr("; @%d", interpreter_frame_bci());
st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0));
st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method());
st->fill_to(23);
st->print("; ");
interpreter_frame_method()->print_name(st);
st->cr();
#endif
}
void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
bool in_vm = os::address_is_in_vm(pc);
st->print(in_vm ? "V" : "C");
int offset;
bool found;
found = os::dll_address_to_library_name(pc, buf, buflen, &offset);
if (found) {
const char *p1, *p2;
p1 = buf;
int len = (int)strlen(os::file_separator());
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
st->print(" [%s+0x%x]", p1, offset);
} else {
st->print(" " PTR_FORMAT, pc);
}
if (!in_vm || Decoder::can_decode_C_frame_in_vm()) {
found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
if (found) {
st->print(" %s+0x%x", buf, offset);
}
}
}
void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
if (_cb != NULL) {
if (Interpreter::contains(pc())) {
Method* m = this->interpreter_frame_method();
if (m != NULL) {
m->name_and_sig_as_C_string(buf, buflen);
st->print("j %s", buf);
st->print("+%d", this->interpreter_frame_bci());
} else {
st->print("j " PTR_FORMAT, pc());
}
} else if (StubRoutines::contains(pc())) {
StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
if (desc != NULL) {
st->print("v ~StubRoutines::%s", desc->name());
} else {
st->print("v ~StubRoutines::" PTR_FORMAT, pc());
}
} else if (_cb->is_buffer_blob()) {
st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
} else if (_cb->is_nmethod()) {
nmethod* nm = (nmethod*)_cb;
Method* m = nm->method();
if (m != NULL) {
m->name_and_sig_as_C_string(buf, buflen);
st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]",
nm->compile_id(), (nm->is_osr_method() ? "%" : ""),
((nm->compiler() != NULL) ? nm->compiler()->name() : ""),
buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin());
} else {
st->print("J " PTR_FORMAT, pc());
}
} else if (_cb->is_runtime_stub()) {
st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
} else if (_cb->is_deoptimization_stub()) {
st->print("v ~DeoptimizationBlob");
} else if (_cb->is_exception_stub()) {
st->print("v ~ExceptionBlob");
} else if (_cb->is_safepoint_stub()) {
st->print("v ~SafepointBlob");
} else {
st->print("v blob " PTR_FORMAT, pc());
}
} else {
print_C_frame(st, buf, buflen, pc());
}
}
The interpreter_frame_expression_stack_at method in the case of SPARC needs the
max_stack value of the method in order to compute the expression stack address.
It uses the Method* in order to get the max_stack value but during GC this
Method* value saved on the frame is changed by reverse_and_push and hence cannot
be used. So we save the max_stack value in the FrameClosure object and pass it
down to the interpreter_frame_expression_stack_at method
class InterpreterFrameClosure : public OffsetClosure {
private:
frame* _fr;
OopClosure* _f;
int _max_locals;
int _max_stack;
public:
InterpreterFrameClosure(frame* fr, int max_locals, int max_stack,
OopClosure* f) {
_fr = fr;
_max_locals = max_locals;
_max_stack = max_stack;
_f = f;
}
void offset_do(int offset) {
oop* addr;
if (offset < _max_locals) {
addr = (oop*) _fr->interpreter_frame_local_at(offset);
assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
_f->do_oop(addr);
} else {
addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
bool in_stack;
if (frame::interpreter_frame_expression_stack_direction() > 0) {
in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
} else {
in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
}
if (in_stack) {
_f->do_oop(addr);
}
}
}
int max_locals() { return _max_locals; }
frame* fr() { return _fr; }
};
class InterpretedArgumentOopFinder: public SignatureInfo {
private:
OopClosure* _f; // Closure to invoke
int _offset; // TOS-relative offset, decremented with each argument
bool _has_receiver; // true if the callee has a receiver
frame* _fr;
void set(int size, BasicType type) {
_offset -= size;
if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();
}
void oop_offset_do() {
oop* addr;
addr = (oop*)_fr->interpreter_frame_tos_at(_offset);
_f->do_oop(addr);
}
public:
InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
assert(!fr->is_interpreted_frame() ||
args_size <= fr->interpreter_frame_expression_stack_size(),
"args cannot be on stack anymore");
_f = f;
_fr = fr;
_offset = args_size;
}
void oops_do() {
if (_has_receiver) {
--_offset;
oop_offset_do();
}
iterate_parameters();
}
};
class EntryFrameOopFinder: public SignatureInfo {
private:
bool _is_static;
int _offset;
frame* _fr;
OopClosure* _f;
void set(int size, BasicType type) {
assert (_offset >= 0, "illegal offset");
if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset);
_offset -= size;
}
void oop_at_offset_do(int offset) {
assert (offset >= 0, "illegal offset");
oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
_f->do_oop(addr);
}
public:
EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) {
_f = NULL; // will be set later
_fr = frame;
_is_static = is_static;
_offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0
}
void arguments_do(OopClosure* f) {
_f = f;
if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver
iterate_parameters();
}
};
oop* frame::interpreter_callee_receiver_addr(Symbol* signature) {
ArgumentSizeComputer asc(signature);
int size = asc.size();
return (oop *)interpreter_frame_tos_at(size);
}
void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
const RegisterMap* map, bool query_oop_map_cache) {
assert(is_interpreted_frame(), "Not an interpreted frame");
assert(map != NULL, "map must be set");
Thread *thread = Thread::current();
methodHandle m (thread, interpreter_frame_method());
jint bci = interpreter_frame_bci();
assert(!Universe::heap()->is_in(m()),
"must be valid oop");
assert(m->is_method(), "checking frame value");
assert((m->is_native() && bci == 0) ||
(!m->is_native() && bci >= 0 && bci < m->code_size()),
"invalid bci value");
for (
BasicObjectLock* current = interpreter_frame_monitor_end();
current < interpreter_frame_monitor_begin();
current = next_monitor_in_interpreter_frame(current)
) {
#ifdef ASSERT
interpreter_frame_verify_monitor(current);
#endif
current->oops_do(f);
}
if (cld_f != NULL) {
cld_f->do_cld(m->method_holder()->class_loader_data());
}
if (m->is_native() PPC32_ONLY(&& m->is_static())) {
f->do_oop(interpreter_frame_temp_oop_addr());
}
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
Symbol* signature = NULL;
bool has_receiver = false;
if (!m->is_native()) {
Bytecode_invoke call = Bytecode_invoke_check(m, bci);
if (call.is_valid()) {
signature = call.signature();
has_receiver = call.has_receiver();
if (map->include_argument_oops() &&
interpreter_frame_expression_stack_size() > 0) {
ResourceMark rm(thread); // is this right ???
oops_interpreted_arguments_do(signature, has_receiver, f);
}
}
}
InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
InterpreterOopMap mask;
if (query_oop_map_cache) {
m->mask_for(bci, &mask);
} else {
OopMapCache::compute_one_oop_map(m, bci, &mask);
}
mask.iterate_oop(&blk);
}
void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) {
InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
finder.oops_do();
}
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
assert(_cb != NULL, "sanity check");
if (_cb->oop_maps() != NULL) {
OopMapSet::oops_do(this, reg_map, f);
if (reg_map->include_argument_oops()) {
_cb->preserve_callee_argument_oops(*this, reg_map, f);
}
}
if (cf != NULL)
cf->do_code_blob(_cb);
}
class CompiledArgumentOopFinder: public SignatureInfo {
protected:
OopClosure* _f;
int _offset; // the current offset, incremented with each argument
bool _has_receiver; // true if the callee has a receiver
bool _has_appendix; // true if the call has an appendix
frame _fr;
RegisterMap* _reg_map;
int _arg_size;
VMRegPair* _regs; // VMReg list of arguments
void set(int size, BasicType type) {
if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset();
_offset += size;
}
virtual void handle_oop_offset() {
VMReg reg = _regs[_offset].first();
oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
_f->do_oop(loc);
}
public:
CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
: SignatureInfo(signature) {
_f = f;
_offset = 0;
_has_receiver = has_receiver;
_has_appendix = has_appendix;
_fr = fr;
_reg_map = (RegisterMap*)reg_map;
_arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
int arg_size;
_regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
assert(arg_size == _arg_size, "wrong arg size");
}
void oops_do() {
if (_has_receiver) {
handle_oop_offset();
_offset++;
}
iterate_parameters();
if (_has_appendix) {
handle_oop_offset();
_offset++;
}
}
};
void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) {
ResourceMark rm;
CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
finder.oops_do();
}
oop frame::retrieve_receiver(RegisterMap* reg_map) {
frame caller = *this;
VMReg reg = SharedRuntime::name_for_receiver();
oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map);
if (oop_adr == NULL) {
guarantee(oop_adr != NULL, "bad register save location");
return NULL;
}
oop r = *oop_adr;
assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r));
return r;
}
oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
if(reg->is_reg()) {
return (oop *)reg_map->location(reg);
} else {
int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes);
}
}
BasicLock* frame::get_native_monitor() {
nmethod* nm = (nmethod*)_cb;
assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
"Should not call this unless it's a native nmethod");
int byte_offset = in_bytes(nm->native_basic_lock_sp_offset());
assert(byte_offset >= 0, "should not see invalid offset");
return (BasicLock*) &sp()[byte_offset / wordSize];
}
oop frame::get_native_receiver() {
nmethod* nm = (nmethod*)_cb;
assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
"Should not call this unless it's a native nmethod");
int byte_offset = in_bytes(nm->native_receiver_sp_offset());
assert(byte_offset >= 0, "should not see invalid offset");
oop owner = ((oop*) sp())[byte_offset / wordSize];
assert( Universe::heap()->is_in(owner), "bad receiver" );
return owner;
}
void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
assert(map != NULL, "map must be set");
if (map->include_argument_oops()) {
Thread *thread = Thread::current();
methodHandle m (thread, entry_frame_call_wrapper()->callee_method());
EntryFrameOopFinder finder(this, m->signature(), m->is_static());
finder.arguments_do(f);
}
entry_frame_call_wrapper()->oops_do(f);
}
void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
#ifndef PRODUCT
if (CrashGCForDumpingJavaThread) {
char *t = NULL;
}
#endif
if (is_interpreted_frame()) {
oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache);
} else if (is_entry_frame()) {
oops_entry_do(f, map);
} else if (CodeCache::contains(pc())) {
oops_code_blob_do(f, cf, map);
#ifdef SHARK
} else if (is_fake_stub_frame()) {
#endif // SHARK
} else {
ShouldNotReachHere();
}
}
void frame::nmethods_do(CodeBlobClosure* cf) {
if (_cb != NULL && _cb->is_nmethod()) {
cf->do_code_blob(_cb);
}
}
void frame::metadata_do(void f(Metadata*)) {
if (_cb != NULL && Interpreter::contains(pc())) {
Method* m = this->interpreter_frame_method();
assert(m != NULL, "huh?");
f(m);
}
}
void frame::gc_prologue() {
if (is_interpreted_frame()) {
interpreter_frame_set_bcx(interpreter_frame_bci());
}
}
void frame::gc_epilogue() {
if (is_interpreted_frame()) {
interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp());
}
pd_gc_epilog();
}
# ifdef ENABLE_ZAP_DEAD_LOCALS
void frame::CheckValueClosure::do_oop(oop* p) {
if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) {
warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
}
}
frame::CheckValueClosure frame::_check_value;
void frame::CheckOopClosure::do_oop(oop* p) {
if (*p != NULL && !(*p)->is_oop()) {
warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
}
}
frame::CheckOopClosure frame::_check_oop;
void frame::check_derived_oop(oop* base, oop* derived) {
_check_oop.do_oop(base);
}
void frame::ZapDeadClosure::do_oop(oop* p) {
if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
}
frame::ZapDeadClosure frame::_zap_dead;
void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) {
assert(thread == Thread::current(), "need to synchronize to do this to another thread");
if (TraceZapDeadLocals) {
ResourceMark rm(thread);
tty->print_cr("--------------------------------------------------------------------------------");
tty->print("Zapping dead locals in ");
print_on(tty);
tty->cr();
}
if (is_entry_frame ()) zap_dead_entry_locals (thread, map);
else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map);
else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map);
else
;
if (TraceZapDeadLocals) {
tty->cr();
}
}
void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) {
assert(is_interpreted_frame(), "Not an interpreted frame");
Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci();
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
&_check_value);
InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(),
&_check_oop );
InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(),
&_zap_dead );
InterpreterOopMap mask;
m->mask_for(bci, &mask);
mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
}
void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) {
ResourceMark rm(thread);
assert(_cb != NULL, "sanity check");
if (_cb->oop_maps() != NULL) {
OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
}
}
void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) {
if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented");
}
void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) {
if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented");
}
# endif // ENABLE_ZAP_DEAD_LOCALS
void frame::verify(const RegisterMap* map) {
if (is_interpreted_frame()) {
Method* method = interpreter_frame_method();
guarantee(method->is_method(), "method is wrong in frame::verify");
if (!method->is_static()) {
oop* p = (oop*) interpreter_frame_local_at(0);
}
}
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false);
}
#ifdef ASSERT
bool frame::verify_return_pc(address x) {
if (StubRoutines::returns_to_call_stub(x)) {
return true;
}
if (CodeCache::contains(x)) {
return true;
}
if (Interpreter::contains(x)) {
return true;
}
return false;
}
#endif
#ifdef ASSERT
void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
assert(is_interpreted_frame(), "Not an interpreted frame");
address low_mark = (address) interpreter_frame_monitor_end();
address high_mark = (address) interpreter_frame_monitor_begin();
address current = (address) value;
const int monitor_size = frame::interpreter_frame_monitor_size();
guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*");
guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark");
guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
}
#endif
#ifndef PRODUCT
void frame::describe(FrameValues& values, int frame_no) {
values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
intptr_t* info_address = MAX2(sp(), frame_pointer);
if (info_address != frame_pointer) {
values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
}
if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
}
if (is_interpreted_frame()) {
Method* m = interpreter_frame_method();
int bci = interpreter_frame_bci();
values.describe(-1, info_address,
FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
values.describe(-1, info_address,
err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
if (m->max_locals() > 0) {
intptr_t* l0 = interpreter_frame_local_at(0);
intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1);
values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1);
for (int l = 0; l < m->max_locals(); l++) {
intptr_t* l0 = interpreter_frame_local_at(l);
values.describe(frame_no, l0, err_msg("local %d", l));
}
}
InterpreterOopMap mask;
OopMapCache::compute_one_oop_map(m, bci, &mask);
intptr_t* tos = NULL;
for (int e = 0; e < mask.expression_stack_size(); e++) {
tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
values.describe(frame_no, interpreter_frame_expression_stack_at(e),
err_msg("stack %d", e));
}
if (tos != NULL) {
values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1);
}
if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) {
values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end");
}
} else if (is_entry_frame()) {
values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
} else if (is_compiled_frame()) {
nmethod* nm = cb()->as_nmethod_or_null();
values.describe(-1, info_address,
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
nm, nm->method()->name_and_sig_as_C_string(),
(_deopt_state == is_deoptimized) ?
" (deoptimized)" :
((_deopt_state == unknown) ? " (state unknown)" : "")),
2);
} else if (is_native_frame()) {
nmethod* nm = cb()->as_nmethod_or_null();
values.describe(-1, info_address,
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
nm, nm->method()->name_and_sig_as_C_string()), 2);
} else {
char *info = (char *) "special frame";
if ((_cb != NULL) &&
(_cb->name() != NULL)) {
info = (char *)_cb->name();
}
values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
}
describe_pd(values, frame_no);
}
#endif
StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
assert(thread->has_last_Java_frame(), "sanity check");
_fr = thread->last_frame();
_is_done = false;
}
#ifndef PRODUCT
void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
FrameValue fv;
fv.location = location;
fv.owner = owner;
fv.priority = priority;
fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1);
strcpy(fv.description, description);
_values.append(fv);
}
#ifdef ASSERT
void FrameValues::validate() {
_values.sort(compare);
bool error = false;
FrameValue prev;
prev.owner = -1;
for (int i = _values.length() - 1; i >= 0; i--) {
FrameValue fv = _values.at(i);
if (fv.owner == -1) continue;
if (prev.owner == -1) {
prev = fv;
continue;
}
if (prev.location == fv.location) {
if (fv.owner != prev.owner) {
tty->print_cr("overlapping storage");
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description);
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
error = true;
}
} else {
prev = fv;
}
}
assert(!error, "invalid layout");
}
#endif // ASSERT
void FrameValues::print(JavaThread* thread) {
_values.sort(compare);
int min_index = 0;
int max_index = _values.length() - 1;
intptr_t* v0 = _values.at(min_index).location;
intptr_t* v1 = _values.at(max_index).location;
if (thread == Thread::current()) {
while (!thread->is_in_stack((address)v0)) {
v0 = _values.at(++min_index).location;
}
while (!thread->is_in_stack((address)v1)) {
v1 = _values.at(--max_index).location;
}
} else {
while (!thread->on_local_stack((address)v0)) {
v0 = _values.at(++min_index).location;
}
while (!thread->on_local_stack((address)v1)) {
v1 = _values.at(--max_index).location;
}
}
intptr_t* min = MIN2(v0, v1);
intptr_t* max = MAX2(v0, v1);
intptr_t* cur = max;
intptr_t* last = NULL;
for (int i = max_index; i >= min_index; i--) {
FrameValue fv = _values.at(i);
while (cur > fv.location) {
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur);
cur--;
}
if (last == fv.location) {
const char* spacer = " " LP64_ONLY(" ");
tty->print_cr(" %s %s %s", spacer, spacer, fv.description);
} else {
tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
last = fv.location;
cur--;
}
}
}
#endif // ndef PRODUCT
C:\hotspot-69087d08d473\src\share\vm/runtime/frame.hpp
#ifndef SHARE_VM_RUNTIME_FRAME_HPP
#define SHARE_VM_RUNTIME_FRAME_HPP
#include "oops/method.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/registerMap.hpp"
#include "utilities/top.hpp"
#ifdef COMPILER2
#if defined ADGLOBALS_MD_HPP
# include ADGLOBALS_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/adGlobals_x86_32.hpp"
#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/adGlobals_x86_64.hpp"
#elif defined TARGET_ARCH_MODEL_aarch64
# include "adfiles/adGlobals_aarch64.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/adGlobals_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/adGlobals_zero.hpp"
#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/adGlobals_ppc_64.hpp"
#endif
#endif // COMPILER2
#ifdef TARGET_ARCH_zero
# include "stack_zero.hpp"
#endif
typedef class BytecodeInterpreter* interpreterState;
class CodeBlob;
class FrameValues;
class vframeArray;
class frame VALUE_OBJ_CLASS_SPEC {
private:
intptr_t* _sp; // stack pointer (from Thread::last_Java_sp)
address _pc; // program counter (the next instruction after the call)
CodeBlob* _cb; // CodeBlob that "owns" pc
enum deopt_state {
not_deoptimized,
is_deoptimized,
unknown
};
deopt_state _deopt_state;
public:
frame();
#ifndef PRODUCT
frame(void* sp, void* fp, void* pc);
#endif
address pc() const { return _pc; }
address raw_pc() const;
void set_pc( address newpc );
intptr_t* sp() const { return _sp; }
void set_sp( intptr_t* newsp ) { _sp = newsp; }
CodeBlob* cb() const { return _cb; }
void patch_pc(Thread* thread, address pc);
intptr_t* id(void) const;
bool is_younger(intptr_t* id) const;
bool is_older(intptr_t* id) const;
bool equal(frame other) const;
bool is_interpreted_frame() const;
bool is_java_frame() const;
bool is_entry_frame() const; // Java frame called from C?
bool is_stub_frame() const;
bool is_ignored_frame() const;
bool is_native_frame() const;
bool is_runtime_frame() const;
bool is_compiled_frame() const;
bool is_safepoint_blob_frame() const;
bool is_deoptimized_frame() const;
bool is_first_frame() const; // oldest frame? (has no sender)
bool is_first_java_frame() const; // same for Java frame
bool is_interpreted_frame_valid(JavaThread* thread) const; // performs sanity checks on interpreted frames.
bool should_be_deoptimized() const;
bool can_be_deoptimized() const;
int frame_size(RegisterMap* map) const;
frame sender(RegisterMap* map) const;
frame profile_find_Java_sender_frame(JavaThread *thread);
bool safe_for_sender(JavaThread *thread);
frame real_sender(RegisterMap* map) const;
frame java_sender() const;
private:
frame sender_for_compiled_frame(RegisterMap* map) const;
frame sender_for_entry_frame(RegisterMap* map) const;
frame sender_for_interpreter_frame(RegisterMap* map) const;
frame sender_for_native_frame(RegisterMap* map) const;
bool is_entry_frame_valid(JavaThread* thread) const;
public:
intptr_t* addr_at(int index) const { return &fp()[index]; }
intptr_t at(int index) const { return *addr_at(index); }
oop obj_at(int offset) const { return *obj_at_addr(offset); }
void obj_at_put(int offset, oop value) { *obj_at_addr(offset) = value; }
jint int_at(int offset) const { return *int_at_addr(offset); }
void int_at_put(int offset, jint value) { *int_at_addr(offset) = value; }
oop* obj_at_addr(int offset) const { return (oop*) addr_at(offset); }
oop* adjusted_obj_at_addr(Method* method, int index) { return obj_at_addr(adjust_offset(method, index)); }
private:
jint* int_at_addr(int offset) const { return (jint*) addr_at(offset); }
public:
intptr_t* link() const;
void set_link(intptr_t* addr);
address sender_pc() const;
void deoptimize(JavaThread* thread);
intptr_t* unextended_sp() const;
intptr_t* sender_sp() const;
intptr_t* real_fp() const;
intptr_t *initial_deoptimization_info();
private:
intptr_t** interpreter_frame_locals_addr() const;
intptr_t* interpreter_frame_bcx_addr() const;
intptr_t* interpreter_frame_mdx_addr() const;
public:
intptr_t* interpreter_frame_local_at(int index) const;
void interpreter_frame_set_locals(intptr_t* locs);
intptr_t interpreter_frame_bcx() const { return *interpreter_frame_bcx_addr(); }
void interpreter_frame_set_bcx(intptr_t bcx);
jint interpreter_frame_bci() const;
void interpreter_frame_set_bci(jint bci);
address interpreter_frame_bcp() const;
void interpreter_frame_set_bcp(address bcp);
intptr_t interpreter_frame_mdx() const { return *interpreter_frame_mdx_addr(); }
void interpreter_frame_set_mdx(intptr_t mdx);
address interpreter_frame_mdp() const;
void interpreter_frame_set_mdp(address dp);
oop retrieve_receiver(RegisterMap *reg_map);
BasicLock* get_native_monitor();
oop get_native_receiver();
oop interpreter_callee_receiver(Symbol* signature) { return *interpreter_callee_receiver_addr(signature); }
oop* interpreter_callee_receiver_addr(Symbol* signature);
public:
intptr_t* interpreter_frame_expression_stack() const;
static jint interpreter_frame_expression_stack_direction();
intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
intptr_t* interpreter_frame_tos_at(jint offset) const;
intptr_t* interpreter_frame_tos_address() const;
jint interpreter_frame_expression_stack_size() const;
intptr_t* interpreter_frame_sender_sp() const;
#ifndef CC_INTERP
void set_interpreter_frame_sender_sp(intptr_t* sender_sp);
void interpreter_frame_set_monitor_end(BasicObjectLock* value);
#endif // CC_INTERP
oop* interpreter_frame_temp_oop_addr() const;
BasicObjectLock* interpreter_frame_monitor_begin() const;
BasicObjectLock* interpreter_frame_monitor_end() const;
BasicObjectLock* next_monitor_in_interpreter_frame(BasicObjectLock* current) const;
BasicObjectLock* previous_monitor_in_interpreter_frame(BasicObjectLock* current) const;
static int interpreter_frame_monitor_size();
void interpreter_frame_verify_monitor(BasicObjectLock* value) const;
bool interpreter_frame_equals_unpacked_fp(intptr_t* fp);
BasicType interpreter_frame_result(oop* oop_result, jvalue* value_result);
public:
Method* interpreter_frame_method() const;
void interpreter_frame_set_method(Method* method);
Method** interpreter_frame_method_addr() const;
ConstantPoolCache** interpreter_frame_cache_addr() const;
public:
JavaCallWrapper* entry_frame_call_wrapper() const { return *entry_frame_call_wrapper_addr(); }
JavaCallWrapper* entry_frame_call_wrapper_if_safe(JavaThread* thread) const;
JavaCallWrapper** entry_frame_call_wrapper_addr() const;
intptr_t* entry_frame_argument_at(int offset) const;
bool entry_frame_is_first() const;
public:
static int local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
static int monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
static int min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors);
static bool volatile_across_calls(Register reg);
public:
oop saved_oop_result(RegisterMap* map) const;
void set_saved_oop_result(RegisterMap* map, oop obj);
private:
const char* print_name() const;
void describe_pd(FrameValues& values, int frame_no);
public:
void print_value() const { print_value_on(tty,NULL); }
void print_value_on(outputStream* st, JavaThread *thread) const;
void print_on(outputStream* st) const;
void interpreter_frame_print_on(outputStream* st) const;
void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
void describe(FrameValues& values, int frame_no);
oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
private:
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
void oops_entry_do(OopClosure* f, const RegisterMap* map);
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
int adjust_offset(Method* method, int index); // helper for above fn
public:
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
void nmethods_do(CodeBlobClosure* cf);
void metadata_do(void f(Metadata*));
void gc_prologue();
void gc_epilogue();
void pd_gc_epilog();
# ifdef ENABLE_ZAP_DEAD_LOCALS
private:
class CheckValueClosure: public OopClosure {
public:
void do_oop(oop* p);
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static CheckValueClosure _check_value;
class CheckOopClosure: public OopClosure {
public:
void do_oop(oop* p);
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static CheckOopClosure _check_oop;
static void check_derived_oop(oop* base, oop* derived);
class ZapDeadClosure: public OopClosure {
public:
void do_oop(oop* p);
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
static ZapDeadClosure _zap_dead;
public:
void zap_dead_locals (JavaThread* thread, const RegisterMap* map);
void zap_dead_interpreted_locals(JavaThread* thread, const RegisterMap* map);
void zap_dead_compiled_locals (JavaThread* thread, const RegisterMap* map);
void zap_dead_entry_locals (JavaThread* thread, const RegisterMap* map);
void zap_dead_deoptimized_locals(JavaThread* thread, const RegisterMap* map);
# endif
void verify(const RegisterMap* map);
static bool verify_return_pc(address x);
static bool is_bci(intptr_t bcx);
int pd_oop_map_offset_adjustment() const;
#ifdef TARGET_ARCH_x86
# include "frame_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "frame_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "frame_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "frame_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "frame_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "frame_ppc.hpp"
#endif
};
#ifndef PRODUCT
class FrameValue VALUE_OBJ_CLASS_SPEC {
public:
intptr_t* location;
char* description;
int owner;
int priority;
};
class FrameValues {
private:
GrowableArray<FrameValue> _values;
static int compare(FrameValue* a, FrameValue* b) {
if (a->location == b->location) {
return a->priority - b->priority;
}
return a->location - b->location;
}
public:
void describe(int owner, intptr_t* location, const char* description, int priority = 0);
#ifdef ASSERT
void validate();
#endif
void print(JavaThread* thread);
};
#endif
class StackFrameStream : public StackObj {
private:
frame _fr;
RegisterMap _reg_map;
bool _is_done;
public:
StackFrameStream(JavaThread *thread, bool update = true);
bool is_done() { return (_is_done) ? true : (_is_done = _fr.is_first_frame(), false); }
void next() { if (!_is_done) _fr = _fr.sender(&_reg_map); }
frame *current() { return &_fr; }
RegisterMap* register_map() { return &_reg_map; }
};
#endif // SHARE_VM_RUNTIME_FRAME_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/frame.inline.hpp
#ifndef SHARE_VM_RUNTIME_FRAME_INLINE_HPP
#define SHARE_VM_RUNTIME_FRAME_INLINE_HPP
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/bytecodeInterpreter.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/method.hpp"
#include "runtime/frame.hpp"
#include "runtime/signature.hpp"
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "jniTypes_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "jniTypes_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "jniTypes_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "jniTypes_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "jniTypes_ppc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "entryFrame_zero.hpp"
# include "fakeStubFrame_zero.hpp"
# include "interpreterFrame_zero.hpp"
# include "sharkFrame_zero.hpp"
#endif
inline bool frame::is_bci(intptr_t bcx) {
#ifdef _LP64
return ((uintptr_t) bcx) <= ((uintptr_t) max_method_code_size) ;
#else
return 0 <= bcx && bcx <= max_method_code_size;
#endif
}
inline bool frame::is_entry_frame() const {
return StubRoutines::returns_to_call_stub(pc());
}
inline bool frame::is_stub_frame() const {
return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob());
}
inline bool frame::is_first_frame() const {
return is_entry_frame() && entry_frame_is_first();
}
#ifdef CC_INTERP
inline oop* frame::interpreter_frame_temp_oop_addr() const {
interpreterState istate = get_interpreterState();
return (oop *)&istate->_oop_temp;
}
#endif // CC_INTERP
#ifdef TARGET_ARCH_x86
# include "frame_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "frame_aarch64.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "frame_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "frame_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "frame_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "frame_ppc.inline.hpp"
#endif
#endif // SHARE_VM_RUNTIME_FRAME_INLINE_HPP
C:\hotspot-69087d08d473\src\share\vm/runtime/globals.cpp
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
#include "utilities/top.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1_globals.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
#include "c1/c1_globals.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2_globals.hpp"
#endif
#ifdef SHARK
#include "shark/shark_globals.hpp"
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
RUNTIME_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \
MATERIALIZE_NOTPRODUCT_FLAG, \
MATERIALIZE_MANAGEABLE_FLAG, MATERIALIZE_PRODUCT_RW_FLAG, \
MATERIALIZE_LP64_PRODUCT_FLAG)
RUNTIME_OS_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, \
MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
ARCH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, \
MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, \
MATERIALIZE_NOTPRODUCT_FLAG)
MATERIALIZE_FLAGS_EXT
static bool is_product_build() {
#ifdef PRODUCT
return true;
#else
return false;
#endif
}
void Flag::check_writable() {
if (is_constant_in_binary()) {
fatal(err_msg("flag is constant: %s", _name));
}
}
bool Flag::is_bool() const {
return strcmp(_type, "bool") == 0;
}
bool Flag::get_bool() const {
return *((bool*) _addr);
}
void Flag::set_bool(bool value) {
check_writable();
}
bool Flag::is_intx() const {
return strcmp(_type, "intx") == 0;
}
intx Flag::get_intx() const {
return *((intx*) _addr);
}
void Flag::set_intx(intx value) {
check_writable();
}
bool Flag::is_uintx() const {
return strcmp(_type, "uintx") == 0;
}
uintx Flag::get_uintx() const {
return *((uintx*) _addr);
}
void Flag::set_uintx(uintx value) {
check_writable();
}
bool Flag::is_uint64_t() const {
return strcmp(_type, "uint64_t") == 0;
}
uint64_t Flag::get_uint64_t() const {
return *((uint64_t*) _addr);
}
void Flag::set_uint64_t(uint64_t value) {
check_writable();
}
bool Flag::is_double() const {
return strcmp(_type, "double") == 0;
}
double Flag::get_double() const {
return *((double*) _addr);
}
void Flag::set_double(double value) {
check_writable();
}
bool Flag::is_ccstr() const {
return strcmp(_type, "ccstr") == 0 || strcmp(_type, "ccstrlist") == 0;
}
bool Flag::ccstr_accumulates() const {
return strcmp(_type, "ccstrlist") == 0;
}
ccstr Flag::get_ccstr() const {
return *((ccstr*) _addr);
}
void Flag::set_ccstr(ccstr value) {
check_writable();
}
Flag::Flags Flag::get_origin() {
return Flags(_flags & VALUE_ORIGIN_MASK);
}
void Flag::set_origin(Flags origin) {
assert((origin & VALUE_ORIGIN_MASK) == origin, "sanity");
_flags = Flags((_flags & ~VALUE_ORIGIN_MASK) | origin);
}
bool Flag::is_default() {
return (get_origin() == DEFAULT);
}
bool Flag::is_ergonomic() {
return (get_origin() == ERGONOMIC);
}
bool Flag::is_command_line() {
return (get_origin() == COMMAND_LINE);
}
bool Flag::is_product() const {
return (_flags & KIND_PRODUCT) != 0;
}
bool Flag::is_manageable() const {
return (_flags & KIND_MANAGEABLE) != 0;
}
bool Flag::is_diagnostic() const {
return (_flags & KIND_DIAGNOSTIC) != 0;
}
bool Flag::is_experimental() const {
return (_flags & KIND_EXPERIMENTAL) != 0;
}
bool Flag::is_notproduct() const {
return (_flags & KIND_NOT_PRODUCT) != 0;
}
bool Flag::is_develop() const {
return (_flags & KIND_DEVELOP) != 0;
}
bool Flag::is_read_write() const {
return (_flags & KIND_READ_WRITE) != 0;
}
bool Flag::is_commercial() const {
return (_flags & KIND_COMMERCIAL) != 0;
}
bool Flag::is_constant_in_binary() const {
#ifdef PRODUCT
return is_notproduct() || is_develop();
#else
return false;
#endif
}
bool Flag::is_unlocker() const {
return strcmp(_name, "UnlockDiagnosticVMOptions") == 0 ||
strcmp(_name, "UnlockExperimentalVMOptions") == 0 ||
is_unlocker_ext();
}
bool Flag::is_unlocked() const {
if (is_diagnostic()) {
return UnlockDiagnosticVMOptions;
}
if (is_experimental()) {
return UnlockExperimentalVMOptions;
}
return is_unlocked_ext();
}
void Flag::unlock_diagnostic() {
assert(is_diagnostic(), "sanity");
_flags = Flags(_flags & ~KIND_DIAGNOSTIC);
}
void Flag::get_locked_message(char* buf, int buflen) const {
buf[0] = '\0';
if (is_diagnostic() && !is_unlocked()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
_name);
return;
}
if (is_experimental() && !is_unlocked()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
_name);
return;
}
if (is_develop() && is_product_build()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
_name);
return;
}
if (is_notproduct() && is_product_build()) {
jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
_name);
return;
}
get_locked_message_ext(buf, buflen);
}
bool Flag::is_writeable() const {
return is_manageable() || (is_product() && is_read_write()) || is_writeable_ext();
}
bool Flag::is_external() const {
return is_manageable() || is_external_ext();
}
#define FORMAT_BUFFER_LEN 16
PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
void Flag::print_on(outputStream* st, bool withComments) {
if (is_constant_in_binary()) {
return;
}
st->print("%9s %-40s %c= ", _type, _name, (!is_default() ? ':' : ' '));
if (is_bool()) {
st->print("%-16s", get_bool() ? "true" : "false");
}
if (is_intx()) {
st->print("%-16ld", get_intx());
}
if (is_uintx()) {
st->print("%-16lu", get_uintx());
}
if (is_uint64_t()) {
st->print("%-16lu", get_uint64_t());
}
if (is_double()) {
st->print("%-16f", get_double());
}
if (is_ccstr()) {
const char* cp = get_ccstr();
if (cp != NULL) {
const char* eol;
while ((eol = strchr(cp, '\n')) != NULL) {
char format_buffer[FORMAT_BUFFER_LEN];
size_t llen = pointer_delta(eol, cp, sizeof(char));
jio_snprintf(format_buffer, FORMAT_BUFFER_LEN,
"%%." SIZE_FORMAT "s", llen);
PRAGMA_DIAG_PUSH
PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
st->print(format_buffer, cp);
PRAGMA_DIAG_POP
st->cr();
cp = eol+1;
st->print("%5s %-35s += ", "", _name);
}
st->print("%-16s", cp);
}
else st->print("%-16s", "");
}
st->print("%-20s", " ");
print_kind(st);
if (withComments) {
#ifndef PRODUCT
st->print("%s", _doc);
#endif
}
st->cr();
}
void Flag::print_kind(outputStream* st) {
struct Data {
int flag;
const char* name;
};
Data data[] = {
{ KIND_C1, "C1" },
{ KIND_C2, "C2" },
{ KIND_ARCH, "ARCH" },
{ KIND_SHARK, "SHARK" },
{ KIND_PLATFORM_DEPENDENT, "pd" },
{ KIND_PRODUCT, "product" },
{ KIND_MANAGEABLE, "manageable" },
{ KIND_DIAGNOSTIC, "diagnostic" },
{ KIND_EXPERIMENTAL, "experimental" },
{ KIND_COMMERCIAL, "commercial" },
{ KIND_NOT_PRODUCT, "notproduct" },
{ KIND_DEVELOP, "develop" },
{ KIND_LP64_PRODUCT, "lp64_product" },
{ KIND_READ_WRITE, "rw" },
{ -1, "" }
};
if ((_flags & KIND_MASK) != 0) {
st->print("{");
bool is_first = true;
for (int i = 0; data[i].flag != -1; i++) {
Data d = data[i];
if ((_flags & d.flag) != 0) {
if (is_first) {
is_first = false;
} else {
st->print(" ");
}
st->print("%s", d.name);
}
}
st->print("}");
}
}
void Flag::print_as_flag(outputStream* st) {
if (is_bool()) {
st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
} else if (is_intx()) {
st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
} else if (is_uintx()) {
st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
} else if (is_uint64_t()) {
st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
} else if (is_double()) {
st->print("-XX:%s=%f", _name, get_double());
} else if (is_ccstr()) {
st->print("-XX:%s=", _name);
const char* cp = get_ccstr();
if (cp != NULL) {
for (; *cp != '\0'; cp += 1) {
switch (*cp) {
default:
st->print("%c", *cp);
break;
case '\n':
st->print(" -XX:%s=", _name);
break;
}
}
}
} else {
ShouldNotReachHere();
}
}
#define NAME(name) NOT_PRODUCT(&name) PRODUCT_ONLY(&CONST_##name)
#define RUNTIME_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT) },
#define RUNTIME_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DIAGNOSTIC) },
#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_EXPERIMENTAL) },
#define RUNTIME_MANAGEABLE_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_MANAGEABLE) },
#define RUNTIME_PRODUCT_RW_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_READ_WRITE) },
#define RUNTIME_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP) },
#define RUNTIME_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
#define RUNTIME_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_NOT_PRODUCT) },
#ifdef _LP64
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_LP64_PRODUCT) },
#else
#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
#endif // _LP64
#define C1_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT) },
#define C1_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
#define C1_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DIAGNOSTIC) },
#define C1_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP) },
#define C1_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
#define C1_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_NOT_PRODUCT) },
#define C2_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT) },
#define C2_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
#define C2_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DIAGNOSTIC) },
#define C2_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_EXPERIMENTAL) },
#define C2_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP) },
#define C2_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
#define C2_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_NOT_PRODUCT) },
#define ARCH_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_PRODUCT) },
#define ARCH_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DIAGNOSTIC) },
#define ARCH_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_EXPERIMENTAL) },
#define ARCH_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DEVELOP) },
#define ARCH_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_NOT_PRODUCT) },
#define SHARK_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT) },
#define SHARK_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
#define SHARK_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DIAGNOSTIC) },
#define SHARK_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP) },
#define SHARK_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
#define SHARK_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), NAME(name), NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_SHARK | Flag::KIND_NOT_PRODUCT) },
static Flag flagTable[] = {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT)
#if INCLUDE_ALL_GCS
G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
#endif
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
#endif
#ifdef SHARK
SHARK_FLAGS(SHARK_DEVELOP_FLAG_STRUCT, SHARK_PD_DEVELOP_FLAG_STRUCT, SHARK_PRODUCT_FLAG_STRUCT, SHARK_PD_PRODUCT_FLAG_STRUCT, SHARK_DIAGNOSTIC_FLAG_STRUCT, SHARK_NOTPRODUCT_FLAG_STRUCT)
#endif
ARCH_FLAGS(ARCH_DEVELOP_FLAG_STRUCT, ARCH_PRODUCT_FLAG_STRUCT, ARCH_DIAGNOSTIC_FLAG_STRUCT, ARCH_EXPERIMENTAL_FLAG_STRUCT, ARCH_NOTPRODUCT_FLAG_STRUCT)
FLAGTABLE_EXT
{0, NULL, NULL}
};
Flag* Flag::flags = flagTable;
size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag));
inline bool str_equal(const char* s, const char* q, size_t len) {
if (strlen(s) != (unsigned int) len) return false;
return strncmp(s, q, len) == 0;
}
Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
if (str_equal(current->_name, name, length)) {
if (current->is_constant_in_binary()) {
return (return_flag == true ? current : NULL);
}
if (!(current->is_unlocked() || current->is_unlocker())) {
if (!allow_locked) {
return NULL;
}
}
return current;
}
}
return NULL;
}
static float str_similar(const char* str1, const char* str2, size_t len2) {
int len1 = (int) strlen(str1);
int total = len1 + (int) len2;
int hit = 0;
for (int i = 0; i < len1 -1; ++i) {
for (int j = 0; j < (int) len2 -1; ++j) {
if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) {
++hit;
break;
}
}
}
return 2.0f * (float) hit / (float) total;
}
Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
float VMOptionsFuzzyMatchSimilarity = 0.7f;
Flag* match = NULL;
float score;
float max_score = -1;
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
score = str_similar(current->_name, name, length);
if (score > max_score) {
max_score = score;
match = current;
}
}
if (!(match->is_unlocked() || match->is_unlocker())) {
if (!allow_locked) {
return NULL;
}
}
if (max_score < VMOptionsFuzzyMatchSimilarity) {
return NULL;
}
return match;
}
static Flag* address_of_flag(CommandLineFlagWithType flag) {
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
return &Flag::flags[flag];
}
bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
Flag* f = &Flag::flags[flag];
return f->is_default();
}
bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
Flag* f = &Flag::flags[flag];
return f->is_ergonomic();
}
bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
assert((size_t)flag < Flag::numFlags, "bad command line flag index");
Flag* f = &Flag::flags[flag];
return f->is_command_line();
}
bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
Flag* result = Flag::find_flag((char*)name, strlen(name));
if (result == NULL) return false;
return true;
}
template<class E, class T>
static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin)
{
E e;
e.set_name(name);
e.set_oldValue(old_value);
e.set_newValue(new_value);
e.set_origin(origin);
e.commit();
}
bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_bool()) return false;
return true;
}
bool CommandLineFlags::boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_bool()) return false;
bool old_value = result->get_bool();
trace_flag_changed<EventBooleanFlagChanged, bool>(name, old_value, *value, origin);
result->set_bool(*value);
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
trace_flag_changed<EventBooleanFlagChanged, bool>(faddr->_name, faddr->get_bool(), value, origin);
faddr->set_bool(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_intx()) return false;
return true;
}
bool CommandLineFlags::intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_intx()) return false;
intx old_value = result->get_intx();
trace_flag_changed<EventLongFlagChanged, s8>(name, old_value, *value, origin);
result->set_intx(*value);
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
trace_flag_changed<EventLongFlagChanged, s8>(faddr->_name, faddr->get_intx(), value, origin);
faddr->set_intx(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uintx()) return false;
return true;
}
bool CommandLineFlags::uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_uintx()) return false;
uintx old_value = result->get_uintx();
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
result->set_uintx(*value);
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uintx(), value, origin);
faddr->set_uintx(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
return true;
}
bool CommandLineFlags::uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
uint64_t old_value = result->get_uint64_t();
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
result->set_uint64_t(*value);
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uint64_t(), value, origin);
faddr->set_uint64_t(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_double()) return false;
return true;
}
bool CommandLineFlags::doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_double()) return false;
double old_value = result->get_double();
trace_flag_changed<EventDoubleFlagChanged, double>(name, old_value, *value, origin);
result->set_double(*value);
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
trace_flag_changed<EventDoubleFlagChanged, double>(faddr->_name, faddr->get_double(), value, origin);
faddr->set_double(value);
faddr->set_origin(origin);
}
bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
if (result == NULL) return false;
if (!result->is_ccstr()) return false;
return true;
}
bool CommandLineFlags::ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_ccstr()) return false;
ccstr old_value = result->get_ccstr();
trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
char* new_value = NULL;
if (*value != NULL) {
new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
strcpy(new_value, *value);
}
result->set_ccstr(new_value);
if (result->is_default() && old_value != NULL) {
char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal);
strcpy(old_value_to_free, old_value);
old_value = old_value_to_free;
}
result->set_origin(origin);
return true;
}
void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
ccstr old_value = faddr->get_ccstr();
trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
strcpy(new_value, value);
faddr->set_ccstr(new_value);
if (!faddr->is_default() && old_value != NULL) {
FREE_C_HEAP_ARRAY(char, old_value, mtInternal);
}
faddr->set_origin(origin);
}
extern "C" {
static int compare_flags(const void* void_a, const void* void_b) {
return strcmp((*((Flag**) void_a))->_name, (*((Flag**) void_b))->_name);
}
}
void CommandLineFlags::printSetFlags(outputStream* out) {
const size_t length = Flag::numFlags - 1;
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
for (size_t i = 0; i < length; i++) {
array[i] = &flagTable[i];
}
qsort(array, length, sizeof(Flag*), compare_flags);
for (size_t i = 0; i < length; i++) {
if (array[i]->get_origin() /* naked field! */) {
array[i]->print_as_flag(out);
out->print(" ");
}
}
out->cr();
FREE_C_HEAP_ARRAY(Flag*, array, mtInternal);
}
#ifndef PRODUCT
void CommandLineFlags::verify() {
assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
}
#endif // PRODUCT
void CommandLineFlags::printFlags(outputStream* out, bool withComments) {
const size_t length = Flag::numFlags - 1;
Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal);
for (size_t i = 0; i < length; i++) {
array[i] = &flagTable[i];
}
qsort(array, length, sizeof(Flag*), compare_flags);
out->print_cr("[Global flags]");
for (size_t i = 0; i < length; i++) {
if (array[i]->is_unlocked()) {
array[i]->print_on(out, withComments);
}
}
FREE_C_HEAP_ARRAY(Flag*, array, mtInternal);
}
C:\hotspot-69087d08d473\src\share\vm/runtime/globals.hpp
#ifndef SHARE_VM_RUNTIME_GLOBALS_HPP
#define SHARE_VM_RUNTIME_GLOBALS_HPP
#include "utilities/debug.hpp"
#ifdef TIERED
#define trueInTiered true
#define falseInTiered false
#else
#define trueInTiered false
#define falseInTiered true
#endif
#ifdef TARGET_ARCH_x86
# include "globals_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "globals_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "globals_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "globals_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "globals_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "globals_ppc.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "globals_linux.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "globals_solaris.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "globals_windows.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "globals_aix.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "globals_bsd.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_x86
# include "globals_linux_x86.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_aarch64
# include "globals_linux_aarch64.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "globals_linux_sparc.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "globals_linux_zero.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "globals_solaris_x86.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "globals_solaris_sparc.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "globals_windows_x86.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "globals_linux_arm.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "globals_linux_ppc.hpp"
#endif
#ifdef TARGET_OS_ARCH_aix_ppc
# include "globals_aix_ppc.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "globals_bsd_x86.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "globals_bsd_zero.hpp"
#endif
#ifdef COMPILER1
#ifdef TARGET_ARCH_x86
# include "c1_globals_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_globals_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_globals_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_globals_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_globals_ppc.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "c1_globals_linux.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "c1_globals_solaris.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "c1_globals_windows.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "c1_globals_aix.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "c1_globals_bsd.hpp"
#endif
#endif
#ifdef COMPILER2
#ifdef TARGET_ARCH_x86
# include "c2_globals_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c2_globals_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c2_globals_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c2_globals_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c2_globals_ppc.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "c2_globals_linux.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "c2_globals_solaris.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "c2_globals_windows.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "c2_globals_aix.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "c2_globals_bsd.hpp"
#endif
#endif
#ifdef SHARK
#ifdef TARGET_ARCH_zero
# include "shark_globals_zero.hpp"
#endif
#endif
#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)
define_pd_global(bool, BackgroundCompilation, false);
define_pd_global(bool, UseTLAB, false);
define_pd_global(bool, CICompileOSR, false);
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, UseOnStackReplacement, false);
define_pd_global(bool, InlineIntrinsics, false);
define_pd_global(bool, PreferInterpreterNativeStubs, true);
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 0);
define_pd_global(intx, BackEdgeThreshold, 0);
define_pd_global(intx, OnStackReplacePercentage, 0);
define_pd_global(bool, ResizeTLAB, false);
define_pd_global(intx, FreqInlineSize, 0);
define_pd_global(intx, NewSizeThreadIncrease, 4*K);
define_pd_global(intx, InlineClassNatives, true);
define_pd_global(intx, InlineUnsafeOps, true);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
define_pd_global(intx, CodeCacheMinBlockLength, 1);
define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
#define CI_COMPILER_COUNT 0
#else
#ifdef COMPILER2
#define CI_COMPILER_COUNT 2
#else
#define CI_COMPILER_COUNT 1
#endif // COMPILER2
#endif // no compilers
#if !INCLUDE_JFR
#define LogJFR false
#endif
typedef const char* ccstr;
typedef const char* ccstrlist; // represents string arguments which accumulate
struct Flag {
enum Flags {
DEFAULT = 0,
COMMAND_LINE = 1,
ENVIRON_VAR = 2,
CONFIG_FILE = 3,
MANAGEMENT = 4,
ERGONOMIC = 5,
ATTACH_ON_DEMAND = 6,
INTERNAL = 7,
LAST_VALUE_ORIGIN = INTERNAL,
VALUE_ORIGIN_BITS = 4,
VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS),
KIND_PRODUCT = 1 << 4,
KIND_MANAGEABLE = 1 << 5,
KIND_DIAGNOSTIC = 1 << 6,
KIND_EXPERIMENTAL = 1 << 7,
KIND_NOT_PRODUCT = 1 << 8,
KIND_DEVELOP = 1 << 9,
KIND_PLATFORM_DEPENDENT = 1 << 10,
KIND_READ_WRITE = 1 << 11,
KIND_C1 = 1 << 12,
KIND_C2 = 1 << 13,
KIND_ARCH = 1 << 14,
KIND_SHARK = 1 << 15,
KIND_LP64_PRODUCT = 1 << 16,
KIND_COMMERCIAL = 1 << 17,
KIND_MASK = ~VALUE_ORIGIN_MASK
};
const char* _type;
const char* _name;
void* _addr;
NOT_PRODUCT(const char* _doc;)
Flags _flags;
static Flag* flags;
static size_t numFlags;
static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
void check_writable();
bool is_bool() const;
bool get_bool() const;
void set_bool(bool value);
bool is_intx() const;
intx get_intx() const;
void set_intx(intx value);
bool is_uintx() const;
uintx get_uintx() const;
void set_uintx(uintx value);
bool is_uint64_t() const;
uint64_t get_uint64_t() const;
void set_uint64_t(uint64_t value);
bool is_double() const;
double get_double() const;
void set_double(double value);
bool is_ccstr() const;
bool ccstr_accumulates() const;
ccstr get_ccstr() const;
void set_ccstr(ccstr value);
Flags get_origin();
void set_origin(Flags origin);
bool is_default();
bool is_ergonomic();
bool is_command_line();
bool is_product() const;
bool is_manageable() const;
bool is_diagnostic() const;
bool is_experimental() const;
bool is_notproduct() const;
bool is_develop() const;
bool is_read_write() const;
bool is_commercial() const;
bool is_constant_in_binary() const;
bool is_unlocker() const;
bool is_unlocked() const;
bool is_writeable() const;
bool is_external() const;
bool is_unlocker_ext() const;
bool is_unlocked_ext() const;
bool is_writeable_ext() const;
bool is_external_ext() const;
void unlock_diagnostic();
void get_locked_message(char*, int) const;
void get_locked_message_ext(char*, int) const;
void print_on(outputStream* st, bool withComments = false );
void print_kind(outputStream* st);
void print_as_flag(outputStream* st);
};
class FlagSetting {
bool val;
bool* flag;
public:
FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
~FlagSetting() { *flag = val; }
};
class CounterSetting {
intx* counter;
public:
CounterSetting(intx* cnt) { counter = cnt; (*counter)++; }
~CounterSetting() { (*counter)--; }
};
class UIntFlagSetting {
uintx val;
uintx* flag;
public:
UIntFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; }
~UIntFlagSetting() { *flag = val; }
};
class DoubleFlagSetting {
double val;
double* flag;
public:
DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
~DoubleFlagSetting() { *flag = val; }
};
class CommandLineFlags {
public:
static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
static bool boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
static bool intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
static bool ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
static bool wasSetOnCmdline(const char* name, bool* value);
static void printSetFlags(outputStream* out);
static void printFlags(outputStream* out, bool withComments);
static void verify() PRODUCT_RETURN;
};
#ifdef ASSERT
#define trueInDebug true
#define falseInDebug false
#else
#define trueInDebug false
#define falseInDebug true
#endif
#ifdef PRODUCT
#define trueInProduct true
#define falseInProduct false
#else
#define trueInProduct false
#define falseInProduct true
#endif
#ifdef JAVASE_EMBEDDED
#define falseInEmbedded false
#else
#define falseInEmbedded true
#endif
#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \
\
lp64_product(bool, UseCompressedOops, false, \
"Use 32-bit object references in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \
\
lp64_product(bool, UseCompressedClassPointers, false, \
"Use 32-bit class pointers in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \
\
notproduct(bool, CheckCompressedOops, true, \
"Generate checks in encoding/decoding code in debug VM") \
\
product_pd(uintx, HeapBaseMinAddress, \
"OS specific low limit for heap base address") \
\
diagnostic(bool, PrintCompressedOopsMode, false, \
"Print compressed oops base address and encoding mode") \
\
lp64_product(intx, ObjectAlignmentInBytes, 8, \
"Default object alignment in bytes, 8 is minimum") \
\
product(bool, AssumeMP, false, \
"Instruct the VM to assume multiple processors are available") \
\
product_pd(bool, UseMembar, \
"(Unstable) Issues membars on thread state transitions") \
\
develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
"Clean the chunk pool asynchronously") \
\
experimental(bool, UseMemSetInBOT, true, \
"(Unstable) uses memset in BOT updates in GC code") \
\
diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \
"Enable normal processing of flags relating to field diagnostics")\
\
experimental(bool, UnlockExperimentalVMOptions, false, \
"Enable normal processing of flags relating to experimental " \
"features") \
\
product(bool, JavaMonitorsInStackTrace, true, \
"Print information about Java monitor locks when the stacks are" \
"dumped") \
\
product_pd(bool, UseLargePages, \
"Use large page memory") \
\
product_pd(bool, UseLargePagesIndividualAllocation, \
"Allocate large pages individually for better affinity") \
\
develop(bool, LargePagesIndividualAllocationInjectError, false, \
"Fail large pages individual allocation") \
\
product(bool, UseLargePagesInMetaspace, false, \
"Use large page memory in metaspace. " \
"Only used if UseLargePages is enabled.") \
\
develop(bool, TracePageSizes, false, \
"Trace page size selection and usage") \
\
product(bool, UseNUMA, false, \
"Use NUMA if available") \
\
product(bool, UseNUMAInterleaving, false, \
"Interleave memory across NUMA nodes if available") \
\
product(uintx, NUMAInterleaveGranularity, 2*M, \
"Granularity to use for NUMA interleaving on Windows OS") \
\
product(bool, ForceNUMA, false, \
"Force NUMA optimizations on single-node/UMA systems") \
\
product(uintx, NUMAChunkResizeWeight, 20, \
"Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying average for " \
"AdaptiveNUMAChunkSizing") \
\
product(uintx, NUMASpaceResizeRate, 1*G, \
"Do not reallocate more than this amount per collection") \
\
product(bool, UseAdaptiveNUMAChunkSizing, true, \
"Enable adaptive chunk sizing for NUMA") \
\
product(bool, NUMAStats, false, \
"Print NUMA stats in detailed heap information") \
\
product(uintx, NUMAPageScanRate, 256, \
"Maximum number of pages to include in the page scan procedure") \
\
product_pd(bool, NeedsDeoptSuspend, \
"True for register window machines (sparc/ia64)") \
\
product(intx, UseSSE, 99, \
"Highest supported SSE instructions set on x86/x64") \
\
product(bool, UseAES, false, \
"Control whether AES instructions can be used on x86/x64") \
\
product(bool, UseSHA, false, \
"Control whether SHA instructions can be used on SPARC") \
\
product(bool, UseGHASHIntrinsics, false, \
"Use intrinsics for GHASH versions of crypto") \
\
product(uintx, LargePageSizeInBytes, 0, \
"Large page size (0 to let VM choose the page size)") \
\
product(uintx, LargePageHeapSizeThreshold, 128*M, \
"Use large pages if maximum heap is at least this big") \
\
product(bool, ForceTimeHighResolution, false, \
"Using high time resolution (for Win32 only)") \
\
develop(bool, TraceItables, false, \
"Trace initialization and use of itables") \
\
develop(bool, TracePcPatching, false, \
"Trace usage of frame::patch_pc") \
\
develop(bool, TraceJumps, false, \
"Trace assembly jumps in thread ring buffer") \
\
develop(bool, TraceRelocator, false, \
"Trace the bytecode relocator") \
\
develop(bool, TraceLongCompiles, false, \
"Print out every time compilation is longer than " \
"a given threshold") \
\
develop(bool, SafepointALot, false, \
"Generate a lot of safepoints. This works with " \
"GuaranteedSafepointInterval") \
\
product_pd(bool, BackgroundCompilation, \
"A thread requesting compilation is not blocked during " \
"compilation") \
\
product(bool, PrintVMQWaitTime, false, \
"Print out the waiting time in VM operation queue") \
\
develop(bool, NoYieldsInMicrolock, false, \
"Disable yields in microlock") \
\
develop(bool, TraceOopMapGeneration, false, \
"Show OopMapGeneration") \
\
product(bool, MethodFlushing, true, \
"Reclamation of zombie and not-entrant methods") \
\
develop(bool, VerifyStack, false, \
"Verify stack of each thread when it is entering a runtime call") \
\
diagnostic(bool, ForceUnreachable, false, \
"Make all non code cache addresses to be unreachable by " \
"forcing use of 64bit literal fixups") \
\
notproduct(bool, StressDerivedPointers, false, \
"Force scavenge when a derived pointer is detected on stack " \
"after rtm call") \
\
develop(bool, TraceDerivedPointers, false, \
"Trace traversal of derived pointers on stack") \
\
notproduct(bool, TraceCodeBlobStacks, false, \
"Trace stack-walk of codeblobs") \
\
product(bool, PrintJNIResolving, false, \
"Used to implement -v:jni") \
\
notproduct(bool, PrintRewrites, false, \
"Print methods that are being rewritten") \
\
product(bool, UseInlineCaches, true, \
"Use Inline Caches for virtual calls ") \
\
develop(bool, InlineArrayCopy, true, \
"Inline arraycopy native that is known to be part of " \
"base library DLL") \
\
develop(bool, InlineObjectHash, true, \
"Inline Object::hashCode() native that is known to be part " \
"of base library DLL") \
\
develop(bool, InlineNatives, true, \
"Inline natives that are known to be part of base library DLL") \
\
develop(bool, InlineMathNatives, true, \
"Inline SinD, CosD, etc.") \
\
develop(bool, InlineClassNatives, true, \
"Inline Class.isInstance, etc") \
\
develop(bool, InlineThreadNatives, true, \
"Inline Thread.currentThread, etc") \
\
develop(bool, InlineUnsafeOps, true, \
"Inline memory ops (native methods) from sun.misc.Unsafe") \
\
product(bool, CriticalJNINatives, true, \
"Check for critical JNI entry points") \
\
product(bool, UseLegacyJNINameEscaping, false, \
"Use the original JNI name escaping scheme") \
\
notproduct(bool, StressCriticalJNINatives, false, \
"Exercise register saving code in critical natives") \
\
product(bool, UseSSE42Intrinsics, false, \
"SSE4.2 versions of intrinsics") \
\
product(bool, UseAESIntrinsics, false, \
"Use intrinsics for AES versions of crypto") \
\
product(bool, UseSHA1Intrinsics, false, \
"Use intrinsics for SHA-1 crypto hash function") \
\
product(bool, UseSHA256Intrinsics, false, \
"Use intrinsics for SHA-224 and SHA-256 crypto hash functions") \
\
product(bool, UseSHA512Intrinsics, false, \
"Use intrinsics for SHA-384 and SHA-512 crypto hash functions") \
\
product(bool, UseCRC32Intrinsics, false, \
"use intrinsics for java.util.zip.CRC32") \
\
develop(bool, TraceCallFixup, false, \
"Trace all call fixups") \
\
develop(bool, DeoptimizeALot, false, \
"Deoptimize at every exit from the runtime system") \
\
notproduct(ccstrlist, DeoptimizeOnlyAt, "", \
"A comma separated list of bcis to deoptimize at") \
\
product(bool, DeoptimizeRandom, false, \
"Deoptimize random frames on random exit from the runtime system")\
\
notproduct(bool, ZombieALot, false, \
"Create zombies (non-entrant) at exit from the runtime system") \
\
product(bool, UnlinkSymbolsALot, false, \
"Unlink unreferenced symbols from the symbol table at safepoints")\
\
notproduct(bool, WalkStackALot, false, \
"Trace stack (no print) at every exit from the runtime system") \
\
product(bool, Debugging, false, \
"Set when executing debug methods in debug.cpp " \
"(to prevent triggering assertions)") \
\
notproduct(bool, StrictSafepointChecks, trueInDebug, \
"Enable strict checks that safepoints cannot happen for threads " \
"that use No_Safepoint_Verifier") \
\
notproduct(bool, VerifyLastFrame, false, \
"Verify oops on last frame on entry to VM") \
\
develop(bool, TraceHandleAllocation, false, \
"Print out warnings when suspiciously many handles are allocated")\
\
product(bool, UseCompilerSafepoints, true, \
"Stop at safepoints in compiled code") \
\
product(bool, FailOverToOldVerifier, true, \
"Fail over to old verifier when split verifier fails") \
\
develop(bool, ShowSafepointMsgs, false, \
"Show message about safepoint synchronization") \
\
product(bool, SafepointTimeout, false, \
"Time out and warn or fail after SafepointTimeoutDelay " \
"milliseconds if failed to reach safepoint") \
\
diagnostic(bool, AbortVMOnSafepointTimeout, false, \
"Abort upon failure to reach safepoint (see SafepointTimeout)") \
\
product(intx, SuspendRetryCount, 50, \
"Maximum retry count for an external suspend request") \
\
product(intx, SuspendRetryDelay, 5, \
"Milliseconds to delay per retry (* current_retry_count)") \
\
product(bool, AssertOnSuspendWaitFailure, false, \
"Assert/Guarantee on external suspend wait failure") \
\
product(bool, TraceSuspendWaitFailures, false, \
"Trace external suspend wait failures") \
\
product(bool, MaxFDLimit, true, \
"Bump the number of file descriptors to maximum in Solaris") \
\
diagnostic(bool, LogEvents, true, \
"Enable the various ring buffer event logs") \
\
diagnostic(uintx, LogEventsBufferEntries, 10, \
"Number of ring buffer event logs") \
\
product(bool, BytecodeVerificationRemote, true, \
"Enable the Java bytecode verifier for remote classes") \
\
product(bool, BytecodeVerificationLocal, false, \
"Enable the Java bytecode verifier for local classes") \
\
develop(bool, ForceFloatExceptions, trueInDebug, \
"Force exceptions on FP stack under/overflow") \
\
develop(bool, VerifyStackAtCalls, false, \
"Verify that the stack pointer is unchanged after calls") \
\
develop(bool, TraceJavaAssertions, false, \
"Trace java language assertions") \
\
notproduct(bool, CheckAssertionStatusDirectives, false, \
"Temporary - see javaClasses.cpp") \
\
notproduct(bool, PrintMallocFree, false, \
"Trace calls to C heap malloc/free allocation") \
\
product(bool, PrintOopAddress, false, \
"Always print the location of the oop") \
\
notproduct(bool, VerifyCodeCacheOften, false, \
"Verify compiled-code cache often") \
\
develop(bool, ZapDeadCompiledLocals, false, \
"Zap dead locals in compiler frames") \
\
notproduct(bool, ZapDeadLocalsOld, false, \
"Zap dead locals (old version, zaps all frames when " \
"entering the VM") \
\
notproduct(bool, CheckOopishValues, false, \
"Warn if value contains oop (requires ZapDeadLocals)") \
\
develop(bool, UseMallocOnly, false, \
"Use only malloc/free for allocation (no resource area/arena)") \
\
develop(bool, PrintMalloc, false, \
"Print all malloc/free calls") \
\
develop(bool, PrintMallocStatistics, false, \
"Print malloc/free statistics") \
\
develop(bool, ZapResourceArea, trueInDebug, \
"Zap freed resource/arena space with 0xABABABAB") \
\
notproduct(bool, ZapVMHandleArea, trueInDebug, \
"Zap freed VM handle space with 0xBCBCBCBC") \
\
develop(bool, ZapJNIHandleArea, trueInDebug, \
"Zap freed JNI handle space with 0xFEFEFEFE") \
\
notproduct(bool, ZapStackSegments, trueInDebug, \
"Zap allocated/freed stack segments with 0xFADFADED") \
\
develop(bool, ZapUnusedHeapArea, trueInDebug, \
"Zap unused heap space with 0xBAADBABE") \
\
develop(bool, TraceZapUnusedHeapArea, false, \
"Trace zapping of unused heap space") \
\
develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \
\
develop(bool, ZapFillerObjects, trueInDebug, \
"Zap filler objects with 0xDEAFBABE") \
\
develop(bool, PrintVMMessages, true, \
"Print VM messages on console") \
\
product(bool, PrintGCApplicationConcurrentTime, false, \
"Print the time the application has been running") \
\
product(bool, PrintGCApplicationStoppedTime, false, \
"Print the time the application has been stopped") \
\
diagnostic(bool, VerboseVerification, false, \
"Display detailed verification details") \
\
notproduct(uintx, ErrorHandlerTest, 0, \
"If > 0, provokes an error after VM initialization; the value " \
"determines which error to provoke. See test_error_handler() " \
"in debug.cpp.") \
\
develop(bool, Verbose, false, \
"Print additional debugging information from other modes") \
\
develop(bool, PrintMiscellaneous, false, \
"Print uncategorized debugging information (requires +Verbose)") \
\
develop(bool, WizardMode, false, \
"Print much more debugging information") \
\
product(bool, ShowMessageBoxOnError, false, \
"Keep process alive on VM fatal error") \
\
product(bool, CreateMinidumpOnCrash, false, \
"Create minidump on VM fatal error") \
\
product_pd(bool, UseOSErrorReporting, \
"Let VM fatal error propagate to the OS (ie. WER on Windows)") \
\
product(bool, SuppressFatalErrorMessage, false, \
"Report NO fatal error message (avoid deadlock)") \
\
product(ccstrlist, OnError, "", \
"Run user-defined commands on fatal error; see VMError.cpp " \
"for examples") \
\
product(ccstrlist, OnOutOfMemoryError, "", \
"Run user-defined commands on first java.lang.OutOfMemoryError") \
\
manageable(bool, HeapDumpBeforeFullGC, false, \
"Dump heap to file before any major stop-the-world GC") \
\
manageable(bool, HeapDumpAfterFullGC, false, \
"Dump heap to file after any major stop-the-world GC") \
\
manageable(bool, HeapDumpOnOutOfMemoryError, false, \
"Dump heap to file when java.lang.OutOfMemoryError is thrown") \
\
manageable(ccstr, HeapDumpPath, NULL, \
"When HeapDumpOnOutOfMemoryError is on, the path (filename or " \
"directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \
\
develop(uintx, SegmentedHeapDumpThreshold, 2*G, \
"Generate a segmented heap dump (JAVA PROFILE 1.0.2 format) " \
"when the heap usage is larger than this") \
\
develop(uintx, HeapDumpSegmentSize, 1*G, \
"Approximate segment size when generating a segmented heap dump") \
\
develop(bool, BreakAtWarning, false, \
"Execute breakpoint upon encountering VM warning") \
\
develop(bool, TraceVMOperation, false, \
"Trace VM operations") \
\
develop(bool, UseFakeTimers, false, \
"Tell whether the VM should use system time or a fake timer") \
\
product(ccstr, NativeMemoryTracking, "off", \
"Native memory tracking options") \
\
diagnostic(bool, PrintNMTStatistics, false, \
"Print native memory tracking summary data if it is on") \
\
diagnostic(bool, LogCompilation, false, \
"Log compilation activity in detail to LogFile") \
\
product(bool, PrintCompilation, false, \
"Print compilations") \
\
diagnostic(bool, TraceNMethodInstalls, false, \
"Trace nmethod installation") \
\
diagnostic(intx, ScavengeRootsInCode, 2, \
"0: do not allow scavengable oops in the code cache; " \
"1: allow scavenging from the code cache; " \
"2: emit as many constants as the compiler can see") \
\
product(bool, AlwaysRestoreFPU, false, \
"Restore the FPU control word after every JNI call (expensive)") \
\
diagnostic(bool, PrintCompilation2, false, \
"Print additional statistics per compilation") \
\
diagnostic(bool, PrintAdapterHandlers, false, \
"Print code generated for i2c/c2i adapters") \
\
diagnostic(bool, VerifyAdapterCalls, trueInDebug, \
"Verify that i2c/c2i adapters are called properly") \
\
develop(bool, VerifyAdapterSharing, false, \
"Verify that the code for shared adapters is the equivalent") \
\
diagnostic(bool, PrintAssembly, false, \
"Print assembly code (using external disassembler.so)") \
\
diagnostic(ccstr, PrintAssemblyOptions, NULL, \
"Print options string passed to disassembler.so") \
\
diagnostic(bool, PrintNMethods, false, \
"Print assembly code for nmethods when generated") \
\
diagnostic(bool, PrintNativeNMethods, false, \
"Print assembly code for native nmethods when generated") \
\
develop(bool, PrintDebugInfo, false, \
"Print debug information for all nmethods when generated") \
\
develop(bool, PrintRelocations, false, \
"Print relocation information for all nmethods when generated") \
\
develop(bool, PrintDependencies, false, \
"Print dependency information for all nmethods when generated") \
\
develop(bool, PrintExceptionHandlers, false, \
"Print exception handler tables for all nmethods when generated") \
\
develop(bool, StressCompiledExceptionHandlers, false, \
"Exercise compiled exception handlers") \
\
develop(bool, InterceptOSException, false, \
"Start debugger when an implicit OS (e.g. NULL) " \
"exception happens") \
\
product(bool, PrintCodeCache, false, \
"Print the code cache memory usage when exiting") \
\
develop(bool, PrintCodeCache2, false, \
"Print detailed usage information on the code cache when exiting")\
\
product(bool, PrintCodeCacheOnCompilation, false, \
"Print the code cache memory usage each time a method is " \
"compiled") \
\
diagnostic(bool, PrintStubCode, false, \
"Print generated stub code") \
\
product(bool, StackTraceInThrowable, true, \
"Collect backtrace in throwable when exception happens") \
\
product(bool, OmitStackTraceInFastThrow, true, \
"Omit backtraces for some 'hot' exceptions in optimized code") \
\
product(bool, ProfilerPrintByteCodeStatistics, false, \
"Print bytecode statistics when dumping profiler output") \
\
product(bool, ProfilerRecordPC, false, \
"Collect ticks for each 16 byte interval of compiled code") \
\
product(bool, ProfileVM, false, \
"Profile ticks that fall within VM (either in the VM Thread " \
"or VM code called through stubs)") \
\
product(bool, ProfileIntervals, false, \
"Print profiles for each interval (see ProfileIntervalsTicks)") \
\
notproduct(bool, ProfilerCheckIntervals, false, \
"Collect and print information on spacing of profiler ticks") \
\
develop(bool, PrintJVMWarnings, false, \
"Print warnings for unimplemented JVM functions") \
\
product(bool, PrintWarnings, true, \
"Print JVM warnings to output stream") \
\
notproduct(uintx, WarnOnStalledSpinLock, 0, \
"Print warnings for stalled SpinLocks") \
\
product(bool, RegisterFinalizersAtInit, true, \
"Register finalizable objects at end of Object.<init> or " \
"after allocation") \
\
develop(bool, RegisterReferences, true, \
"Tell whether the VM should register soft/weak/final/phantom " \
"references") \
\
develop(bool, IgnoreRewrites, false, \
"Suppress rewrites of bytecodes in the oopmap generator. " \
"This is unsafe!") \
\
develop(bool, PrintCodeCacheExtension, false, \
"Print extension of code cache") \
\
develop(bool, UsePrivilegedStack, true, \
"Enable the security JVM functions") \
\
develop(bool, ProtectionDomainVerification, true, \
"Verify protection domain before resolution in system dictionary")\
\
product(bool, ClassUnloading, true, \
"Do unloading of classes") \
\
product(bool, ClassUnloadingWithConcurrentMark, true, \
"Do unloading of classes with a concurrent marking cycle") \
\
develop(bool, DisableStartThread, false, \
"Disable starting of additional Java threads " \
"(for debugging only)") \
\
develop(bool, MemProfiling, false, \
"Write memory usage profiling to log file") \
\
notproduct(bool, PrintSystemDictionaryAtExit, false, \
"Print the system dictionary at exit") \
\
experimental(intx, PredictedLoadedClassCount, 0, \
"Experimental: Tune loaded class cache starting size") \
\
diagnostic(bool, UnsyncloadClass, false, \
"Unstable: VM calls loadClass unsynchronized. Custom " \
"class loader must call VM synchronized for findClass " \
"and defineClass.") \
\
product(bool, AlwaysLockClassLoader, false, \
"Require the VM to acquire the class loader lock before calling " \
"loadClass() even for class loaders registering " \
"as parallel capable") \
\
product(bool, AllowParallelDefineClass, false, \
"Allow parallel defineClass requests for class loaders " \
"registering as parallel capable") \
\
product(bool, MustCallLoadClassInternal, false, \
"Call loadClassInternal() rather than loadClass()") \
\
product_pd(bool, DontYieldALot, \
"Throw away obvious excess yield calls (for Solaris only)") \
\
product_pd(bool, ConvertSleepToYield, \
"Convert sleep(0) to thread yield " \
"(may be off for Solaris to improve GUI)") \
\
product(bool, ConvertYieldToSleep, false, \
"Convert yield to a sleep of MinSleepInterval to simulate Win32 " \
"behavior (Solaris only)") \
\
product(bool, UseBoundThreads, true, \
"Bind user level threads to kernel threads (for Solaris only)") \
\
develop(bool, UseDetachedThreads, true, \
"Use detached threads that are recycled upon termination " \
"(for Solaris only)") \
\
experimental(bool, DisablePrimordialThreadGuardPages, false, \
"Disable the use of stack guard pages if the JVM is loaded " \
"on the primordial process thread") \
\
product(bool, UseLWPSynchronization, true, \
"Use LWP-based instead of libthread-based synchronization " \
"(SPARC only)") \
\
product(ccstr, SyncKnobs, NULL, \
"(Unstable) Various monitor synchronization tunables") \
\
product(intx, EmitSync, 0, \
"(Unsafe, Unstable) " \
"Control emission of inline sync fast-path code") \
\
product(intx, MonitorBound, 0, "Bound Monitor population") \
\
product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
\
product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
\
product(intx, SyncVerbose, 0, "(Unstable)") \
\
product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \
\
product(intx, hashCode, 5, \
"(Unstable) select hashCode generation algorithm") \
\
product(intx, WorkAroundNPTLTimedWaitHang, 1, \
"(Unstable, Linux-specific) " \
"avoid NPTL-FUTEX hang pthread_cond_timedwait") \
\
product(bool, FilterSpuriousWakeups, true, \
"Prevent spurious or premature wakeups from object.wait " \
"(Solaris only)") \
\
experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \
\
experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \
\
experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
\
develop(bool, UsePthreads, false, \
"Use pthread-based instead of libthread-based synchronization " \
"(SPARC only)") \
\
product(bool, AdjustConcurrency, false, \
"Call thr_setconcurrency at thread creation time to avoid " \
"LWP starvation on MP systems (for Solaris Only)") \
\
product(bool, ReduceSignalUsage, false, \
"Reduce the use of OS signals in Java and/or the VM") \
\
develop(bool, LoadLineNumberTables, true, \
"Tell whether the class file parser loads line number tables") \
\
develop(bool, LoadLocalVariableTables, true, \
"Tell whether the class file parser loads local variable tables") \
\
develop(bool, LoadLocalVariableTypeTables, true, \
"Tell whether the class file parser loads local variable type" \
"tables") \
\
product(bool, AllowUserSignalHandlers, false, \
"Do not complain if the application installs signal handlers " \
"(Solaris & Linux only)") \
\
product(bool, UseSignalChaining, true, \
"Use signal-chaining to invoke signal handlers installed " \
"by the application (Solaris & Linux only)") \
\
product(bool, UseAltSigs, false, \
"Use alternate signals instead of SIGUSR1 & SIGUSR2 for VM " \
"internal signals (Solaris only)") \
\
product(bool, AllowJNIEnvProxy, false, \
"Allow JNIEnv proxies for jdbx") \
\
product(bool, JNIDetachReleasesMonitors, true, \
"JNI DetachCurrentThread releases monitors owned by thread") \
\
product(bool, RestoreMXCSROnJNICalls, false, \
"Restore MXCSR when returning from JNI calls") \
\
product(bool, CheckJNICalls, false, \
"Verify all arguments to JNI calls") \
\
product(bool, CheckEndorsedAndExtDirs, false, \
"Verify the endorsed and extension directories are not used") \
\
product(bool, UseFastJNIAccessors, true, \
"Use optimized versions of Get<Primitive>Field") \
\
product(intx, MaxJNILocalCapacity, 65536, \
"Maximum allowable local JNI handle capacity to " \
"EnsureLocalCapacity() and PushLocalFrame(), " \
"where <= 0 is unlimited, default: 65536") \
\
product(bool, EagerXrunInit, false, \
"Eagerly initialize -Xrun libraries; allows startup profiling, " \
"but not all -Xrun libraries may support the state of the VM " \
"at this time") \
\
product(bool, PreserveAllAnnotations, false, \
"Preserve RuntimeInvisibleAnnotations as well " \
"as RuntimeVisibleAnnotations") \
\
develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \
"Number of OutOfMemoryErrors preallocated with backtrace") \
\
product(bool, LazyBootClassLoader, true, \
"Enable/disable lazy opening of boot class path entries") \
\
product(bool, UseXMMForArrayCopy, false, \
"Use SSE2 MOVQ instruction for Arraycopy") \
\
product(intx, FieldsAllocationStyle, 1, \
"0 - type based with oops first, 1 - with oops last, " \
"2 - oops in super and sub classes are together") \
\
product(bool, CompactFields, true, \
"Allocate nonstatic fields in gaps between previous fields") \
\
notproduct(bool, PrintFieldLayout, false, \
"Print field layout for each class") \
\
product(intx, ContendedPaddingWidth, 128, \
"How many bytes to pad the fields/classes marked @Contended with")\
\
product(bool, EnableContended, true, \
"Enable @Contended annotation support") \
\
product(bool, RestrictContended, true, \
"Restrict @Contended to trusted classes") \
\
product(bool, UseBiasedLocking, true, \
"Enable biased locking in JVM") \
\
product(intx, BiasedLockingStartupDelay, 4000, \
"Number of milliseconds to wait before enabling biased locking") \
\
diagnostic(bool, PrintBiasedLockingStatistics, false, \
"Print statistics of biased locking in JVM") \
\
product(intx, BiasedLockingBulkRebiasThreshold, 20, \
"Threshold of number of revocations per type to try to " \
"rebias all objects in the heap of that type") \
\
product(intx, BiasedLockingBulkRevokeThreshold, 40, \
"Threshold of number of revocations per type to permanently " \
"revoke biases of all objects in the heap of that type") \
\
product(intx, BiasedLockingDecayTime, 25000, \
"Decay time (in milliseconds) to re-enable bulk rebiasing of a " \
"type after previous bulk rebias") \
\
product(bool, ExitOnOutOfMemoryError, false, \
"JVM exits on the first occurrence of an out-of-memory error") \
\
product(bool, CrashOnOutOfMemoryError, false, \
"JVM aborts, producing an error log and core/mini dump, on the " \
"first occurrence of an out-of-memory error") \
\
\
notproduct(bool, TraceRuntimeCalls, false, \
"Trace run-time calls") \
\
develop(bool, TraceJNICalls, false, \
"Trace JNI calls") \
\
develop(bool, StressRewriter, false, \
"Stress linktime bytecode rewriting") \
\
notproduct(bool, TraceJVMCalls, false, \
"Trace JVM calls") \
\
product(ccstr, TraceJVMTI, NULL, \
"Trace flags for JVMTI functions and events") \
\
product(bool, StressLdcRewrite, false, \
"Force ldc -> ldc_w rewrite during RedefineClasses") \
\
product(intx, TraceRedefineClasses, 0, \
"Trace level for JVMTI RedefineClasses") \
\
develop(bool, StressMethodComparator, false, \
"Run the MethodComparator on all loaded methods") \
\
product(bool, VerifyMergedCPBytecodes, true, \
"Verify bytecodes after RedefineClasses constant pool merging") \
\
develop(bool, TraceJNIHandleAllocation, false, \
"Trace allocation/deallocation of JNI handle blocks") \
\
develop(bool, TraceThreadEvents, false, \
"Trace all thread events") \
\
develop(bool, TraceBytecodes, false, \
"Trace bytecode execution") \
\
develop(bool, TraceClassInitialization, false, \
"Trace class initialization") \
\
product(bool, TraceExceptions, false, \
"Trace exceptions") \
\
develop(bool, TraceICs, false, \
"Trace inline cache changes") \
\
notproduct(bool, TraceInvocationCounterOverflow, false, \
"Trace method invocation counter overflow") \
\
develop(bool, TraceInlineCacheClearing, false, \
"Trace clearing of inline caches in nmethods") \
\
develop(bool, TraceDependencies, false, \
"Trace dependencies") \
\
develop(bool, VerifyDependencies, trueInDebug, \
"Exercise and verify the compilation dependency mechanism") \
\
develop(bool, TraceNewOopMapGeneration, false, \
"Trace OopMapGeneration") \
\
develop(bool, TraceNewOopMapGenerationDetailed, false, \
"Trace OopMapGeneration: print detailed cell states") \
\
develop(bool, TimeOopMap, false, \
"Time calls to GenerateOopMap::compute_map() in sum") \
\
develop(bool, TimeOopMap2, false, \
"Time calls to GenerateOopMap::compute_map() individually") \
\
develop(bool, TraceMonitorMismatch, false, \
"Trace monitor matching failures during OopMapGeneration") \
\
develop(bool, TraceOopMapRewrites, false, \
"Trace rewriting of method oops during oop map generation") \
\
develop(bool, TraceSafepoint, false, \
"Trace safepoint operations") \
\
develop(bool, TraceICBuffer, false, \
"Trace usage of IC buffer") \
\
develop(bool, TraceCompiledIC, false, \
"Trace changes of compiled IC") \
\
notproduct(bool, TraceZapDeadLocals, false, \
"Trace zapping dead locals") \
\
develop(bool, TraceStartupTime, false, \
"Trace setup time") \
\
develop(bool, TraceProtectionDomainVerification, false, \
"Trace protection domain verification") \
\
develop(bool, TraceClearedExceptions, false, \
"Print when an exception is forcibly cleared") \
\
product(bool, TraceClassResolution, false, \
"Trace all constant pool resolutions (for debugging)") \
\
product(bool, TraceBiasedLocking, false, \
"Trace biased locking in JVM") \
\
product(bool, TraceMonitorInflation, false, \
"Trace monitor inflation in JVM") \
\
\
product(bool, UseSerialGC, false, \
"Use the Serial garbage collector") \
\
product(bool, UseG1GC, false, \
"Use the Garbage-First garbage collector") \
\
product(bool, UseParallelGC, false, \
"Use the Parallel Scavenge garbage collector") \
\
product(bool, UseParallelOldGC, false, \
"Use the Parallel Old garbage collector") \
\
product(uintx, HeapMaximumCompactionInterval, 20, \
"How often should we maximally compact the heap (not allowing " \
"any dead space)") \
\
product(uintx, HeapFirstMaximumCompactionCount, 3, \
"The collection count for the first maximum compaction") \
\
product(bool, UseMaximumCompactionOnSystemGC, true, \
"Use maximum compaction in the Parallel Old garbage collector " \
"for a system GC") \
\
product(uintx, ParallelOldDeadWoodLimiterMean, 50, \
"The mean used by the parallel compact dead wood " \
"limiter (a number between 0-100)") \
\
product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \
"The standard deviation used by the parallel compact dead wood " \
"limiter (a number between 0-100)") \
\
product(uintx, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \
\
product(bool, UseDynamicNumberOfGCThreads, false, \
"Dynamically choose the number of parallel threads " \
"parallel gc will use") \
\
diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \
"Force dynamic selection of the number of " \
"parallel threads parallel gc will use to aid debugging") \
\
product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \
"Size of heap (bytes) per GC thread used in calculating the " \
"number of GC threads") \
\
product(bool, TraceDynamicGCThreads, false, \
"Trace the dynamic GC thread usage") \
\
develop(bool, ParallelOldGCSplitALot, false, \
"Provoke splitting (copying data from a young gen space to " \
"multiple destination spaces)") \
\
develop(uintx, ParallelOldGCSplitInterval, 3, \
"How often to provoke splitting a young gen space") \
\
product(uintx, ConcGCThreads, 0, \
"Number of threads concurrent gc will use") \
\
product(uintx, YoungPLABSize, 4096, \
"Size of young gen promotion LAB's (in HeapWords)") \
\
product(uintx, OldPLABSize, 1024, \
"Size of old gen promotion LAB's (in HeapWords)") \
\
product(uintx, GCTaskTimeStampEntries, 200, \
"Number of time stamp entries per gc worker thread") \
\
product(bool, AlwaysTenure, false, \
"Always tenure objects in eden (ParallelGC only)") \
\
product(bool, NeverTenure, false, \
"Never tenure objects in eden, may tenure on overflow " \
"(ParallelGC only)") \
\
product(bool, ScavengeBeforeFullGC, true, \
"Scavenge youngest generation before each full GC, " \
"used with UseParallelGC") \
\
develop(bool, ScavengeWithObjectsInToSpace, false, \
"Allow scavenges to occur when to-space contains objects") \
\
product(bool, UseConcMarkSweepGC, false, \
"Use Concurrent Mark-Sweep GC in the old generation") \
\
product(bool, ExplicitGCInvokesConcurrent, false, \
"A System.gc() request invokes a concurrent collection; " \
"(effective only when UseConcMarkSweepGC)") \
\
product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
"A System.gc() request invokes a concurrent collection and " \
"also unloads classes during such a concurrent gc cycle " \
"(effective only when UseConcMarkSweepGC)") \
\
product(bool, GCLockerInvokesConcurrent, false, \
"The exit of a JNI critical section necessitating a scavenge, " \
"also kicks off a background concurrent collection") \
\
product(uintx, GCLockerEdenExpansionPercent, 5, \
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
diagnostic(uintx, GCLockerRetryAllocationCount, 2, \
"Number of times to retry allocations when " \
"blocked by the GC locker") \
\
develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use adaptive free lists in the CMS generation") \
\
develop(bool, UseAsyncConcMarkSweepGC, true, \
"Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
\
develop(bool, RotateCMSCollectionTypes, false, \
"Rotate the CMS collections among concurrent and STW") \
\
product(bool, UseCMSBestFit, true, \
"Use CMS best fit allocation strategy") \
\
product(bool, UseCMSCollectionPassing, true, \
"Use passing of collection from background to foreground") \
\
product(bool, UseParNewGC, false, \
"Use parallel threads in the new generation") \
\
product(bool, ParallelGCVerbose, false, \
"Verbose output for parallel gc") \
\
product(uintx, ParallelGCBufferWastePct, 10, \
"Wasted fraction of parallel allocation buffer") \
\
diagnostic(bool, ParallelGCRetainPLAB, false, \
"Retain parallel allocation buffers across scavenges; " \
"it is disabled because this currently conflicts with " \
"parallel card scanning under certain conditions.") \
\
product(uintx, TargetPLABWastePct, 10, \
"Target wasted space in last buffer as percent of overall " \
"allocation") \
\
product(uintx, PLABWeight, 75, \
"Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying average for ResizePLAB") \
\
product(bool, ResizePLAB, true, \
"Dynamically resize (survivor space) promotion LAB's") \
\
product(bool, PrintPLAB, false, \
"Print (survivor space) promotion LAB's sizing decisions") \
\
product(intx, ParGCArrayScanChunk, 50, \
"Scan a subset of object array and push remainder, if array is " \
"bigger than this") \
\
product(bool, ParGCUseLocalOverflow, false, \
"Instead of a global overflow list, use local overflow stacks") \
\
product(bool, ParGCTrimOverflow, true, \
"Eagerly trim the local overflow lists " \
"(when ParGCUseLocalOverflow)") \
\
notproduct(bool, ParGCWorkQueueOverflowALot, false, \
"Simulate work queue overflow in ParNew") \
\
notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \
"An `interval' counter that determines how frequently " \
"we simulate overflow; a smaller number increases frequency") \
\
product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
"The desired number of objects to claim from the overflow list") \
\
diagnostic(uintx, ParGCStridesPerThread, 2, \
"The number of strides per worker thread that we divide up the " \
"card table scanning work into") \
\
diagnostic(intx, ParGCCardsPerStrideChunk, 256, \
"The number of cards in each chunk of the parallel chunks used " \
"during card table scanning") \
\
product(uintx, CMSParPromoteBlocksToClaim, 16, \
"Number of blocks to attempt to claim when refilling CMS LAB's " \
"for parallel GC") \
\
product(uintx, OldPLABWeight, 50, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for resizing " \
"CMSParPromoteBlocksToClaim") \
\
product(bool, ResizeOldPLAB, true, \
"Dynamically resize (old gen) promotion LAB's") \
\
product(bool, PrintOldPLAB, false, \
"Print (old gen) promotion LAB's sizing decisions") \
\
product(uintx, CMSOldPLABMin, 16, \
"Minimum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\
product(uintx, CMSOldPLABMax, 1024, \
"Maximum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\
product(uintx, CMSOldPLABNumRefills, 4, \
"Nominal number of refills of CMS gen promotion LAB cache " \
"per worker per block size") \
\
product(bool, CMSOldPLABResizeQuicker, false, \
"React on-the-fly during a scavenge to a sudden " \
"change in block demand rate") \
\
product(uintx, CMSOldPLABToleranceFactor, 4, \
"The tolerance of the phase-change detector for on-the-fly " \
"PLAB resizing during a scavenge") \
\
product(uintx, CMSOldPLABReactivityFactor, 2, \
"The gain in the feedback loop for on-the-fly PLAB resizing " \
"during a scavenge") \
\
product(bool, AlwaysPreTouch, false, \
"Force all freshly committed pages to be pre-touched") \
\
product_pd(uintx, CMSYoungGenPerWorker, \
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
product(bool, CMSIncrementalMode, false, \
"Whether CMS GC should operate in \"incremental\" mode") \
\
product(uintx, CMSIncrementalDutyCycle, 10, \
"Percentage (0-100) of CMS incremental mode duty cycle. If " \
"CMSIncrementalPacing is enabled, then this is just the initial " \
"value.") \
\
product(bool, CMSIncrementalPacing, true, \
"Whether the CMS incremental mode duty cycle should be " \
"automatically adjusted") \
\
product(uintx, CMSIncrementalDutyCycleMin, 0, \
"Minimum percentage (0-100) of the CMS incremental duty cycle " \
"used when CMSIncrementalPacing is enabled") \
\
product(uintx, CMSIncrementalSafetyFactor, 10, \
"Percentage (0-100) used to add conservatism when computing the " \
"duty cycle") \
\
product(uintx, CMSIncrementalOffset, 0, \
"Percentage (0-100) by which the CMS incremental mode duty cycle "\
"is shifted to the right within the period between young GCs") \
\
product(uintx, CMSExpAvgFactor, 50, \
"Percentage (0-100) used to weigh the current sample when " \
"computing exponential averages for CMS statistics") \
\
product(uintx, CMS_FLSWeight, 75, \
"Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying averages for CMS FLS " \
"statistics") \
\
product(uintx, CMS_FLSPadding, 1, \
"The multiple of deviation from mean to use for buffering " \
"against volatility in free list demand") \
\
product(uintx, FLSCoalescePolicy, 2, \
"CMS: aggressiveness level for coalescing, increasing " \
"from 0 to 4") \
\
product(bool, FLSAlwaysCoalesceLarge, false, \
"CMS: larger free blocks are always available for coalescing") \
\
product(double, FLSLargestBlockCoalesceProximity, 0.99, \
"CMS: the smaller the percentage the greater the coalescing " \
"force") \
\
product(double, CMSSmallCoalSurplusPercent, 1.05, \
"CMS: the factor by which to inflate estimated demand of small " \
"block sizes to prevent coalescing with an adjoining block") \
\
product(double, CMSLargeCoalSurplusPercent, 0.95, \
"CMS: the factor by which to inflate estimated demand of large " \
"block sizes to prevent coalescing with an adjoining block") \
\
product(double, CMSSmallSplitSurplusPercent, 1.10, \
"CMS: the factor by which to inflate estimated demand of small " \
"block sizes to prevent splitting to supply demand for smaller " \
"blocks") \
\
product(double, CMSLargeSplitSurplusPercent, 1.00, \
"CMS: the factor by which to inflate estimated demand of large " \
"block sizes to prevent splitting to supply demand for smaller " \
"blocks") \
\
product(bool, CMSExtrapolateSweep, false, \
"CMS: cushion for block demand during sweep") \
\
product(uintx, CMS_SweepWeight, 75, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for inter-sweep " \
"duration") \
sssssss71
最新推荐文章于 2024-05-14 18:28:32 发布