void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
vmIntrinsics::ID iid,
Register receiver_reg,
Register member_reg,
bool for_compiler_entry) {
assert(is_signature_polymorphic(iid), "expected invoke iid");
Register rbx_method = rbx; // eventual target of this invocation
#ifdef _LP64
Register temp1 = rscratch1;
Register temp2 = rscratch2;
Register temp3 = rax;
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
}
#else
Register temp1 = (for_compiler_entry ? rsi : rdx);
Register temp2 = rdi;
Register temp3 = rax;
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
assert_different_registers(temp1, rcx, rdx);
assert_different_registers(temp2, rcx, rdx);
assert_different_registers(temp3, rcx, rdx);
}
#endif
else {
assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP
}
assert_different_registers(temp1, temp2, temp3, receiver_reg);
assert_different_registers(temp1, temp2, temp3, member_reg);
if (iid == vmIntrinsics::_invokeBasic) {
jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);
} else {
if (VerifyMethodHandles) {
verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
"MemberName required for invokeVirtual etc.");
}
Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
__ verify_oop(receiver_reg);
if (iid == vmIntrinsics::_linkToSpecial) {
__ null_check(receiver_reg);
} else {
__ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
BLOCK_COMMENT("check_receiver {");
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
Label L_ok;
Register temp2_defc = temp2;
__ load_heap_oop(temp2_defc, member_clazz);
load_klass_from_Class(_masm, temp2_defc);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
__ STOP("receiver class disagrees with MemberName.clazz");
__ bind(L_ok);
}
BLOCK_COMMENT("} check_receiver");
}
if (iid == vmIntrinsics::_linkToSpecial ||
iid == vmIntrinsics::_linkToStatic) {
DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
}
Label L_incompatible_class_change_error;
switch (iid) {
case vmIntrinsics::_linkToSpecial:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
}
__ movptr(rbx_method, member_vmtarget);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
}
__ movptr(rbx_method, member_vmtarget);
break;
case vmIntrinsics::_linkToVirtual:
{
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
}
Register temp2_index = temp2;
__ movptr(temp2_index, member_vmindex);
if (VerifyMethodHandles) {
Label L_index_ok;
__ cmpl(temp2_index, 0);
__ jcc(Assembler::greaterEqual, L_index_ok);
__ STOP("no virtual index");
__ BIND(L_index_ok);
}
__ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
break;
}
case vmIntrinsics::_linkToInterface:
{
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
}
Register temp3_intf = temp3;
__ load_heap_oop(temp3_intf, member_clazz);
load_klass_from_Class(_masm, temp3_intf);
__ verify_klass_ptr(temp3_intf);
Register rbx_index = rbx_method;
__ movptr(rbx_index, member_vmindex);
if (VerifyMethodHandles) {
Label L;
__ cmpl(rbx_index, 0);
__ jcc(Assembler::greaterEqual, L);
__ STOP("invalid vtable index for MH.invokeInterface");
__ bind(L);
}
__ lookup_interface_method(temp1_recv_klass, temp3_intf,
rbx_index, rbx_method,
temp2,
L_incompatible_class_change_error);
break;
}
default:
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
break;
}
__ verify_method_ptr(rbx_method);
jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);
if (iid == vmIntrinsics::_linkToInterface) {
__ bind(L_incompatible_class_change_error);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
}
}
}
#ifndef PRODUCT
void trace_method_handle_stub(const char* adaptername,
oop mh,
intptr_t* saved_regs,
intptr_t* entry_sp) {
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
tty->print_cr("MH %s %s=" PTR_FORMAT " sp=" PTR_FORMAT,
adaptername, mh_reg_name,
(void *)mh, entry_sp);
if (Verbose) {
tty->print_cr("Registers:");
const int saved_regs_count = RegisterImpl::number_of_registers;
for (int i = 0; i < saved_regs_count; i++) {
Register r = as_Register(i);
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
if ((i + 1) % 4 == 0) {
tty->cr();
} else {
tty->print(", ");
}
}
tty->cr();
{
JavaThread* p = JavaThread::active();
ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
FrameValues values;
frame cur_frame = os::current_frame();
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
while (trace_calling_frame.fp() < saved_regs) {
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
}
intptr_t *dump_sp = trace_calling_frame.sender_sp();
intptr_t *dump_fp = trace_calling_frame.link();
bool walkable = has_mh; // whether the traced frame shoud be walkable
if (walkable) {
frame dump_frame = frame(dump_sp, dump_fp);
dump_frame.describe(values, 1);
} else {
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
values.describe(-1, dump_sp, "sp for #1");
}
values.describe(-1, entry_sp, "raw top of stack");
tty->print_cr("Stack layout:");
values.print(p);
}
if (has_mh && mh->is_oop()) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
java_lang_invoke_MethodHandle::form(mh)->print();
}
}
}
}
struct MethodHandleStubArguments {
const char* adaptername;
oopDesc* mh;
intptr_t* saved_regs;
intptr_t* entry_sp;
};
void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
trace_method_handle_stub(args->adaptername,
args->mh,
args->saved_regs,
args->entry_sp);
}
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {");
__ enter();
__ andptr(rsp, -16); // align stack if needed for FPU state
__ pusha();
__ mov(rbx, rsp); // for retreiving saved_regs
__ increment(rsp, -2 * wordSize);
if (UseSSE >= 2) {
__ movdbl(Address(rsp, 0), xmm0);
} else if (UseSSE == 1) {
__ movflt(Address(rsp, 0), xmm0);
} else {
__ fst_d(Address(rsp, 0));
}
__ push(rbp); // entry_sp (with extra align space)
__ push(rbx); // pusha saved_regs
__ push(rcx); // mh
__ push(rcx); // slot for adaptername
__ movptr(Address(rsp, 0), (intptr_t) adaptername);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
__ increment(rsp, sizeof(MethodHandleStubArguments));
if (UseSSE >= 2) {
__ movdbl(xmm0, Address(rsp, 0));
} else if (UseSSE == 1) {
__ movflt(xmm0, Address(rsp, 0));
} else {
__ fld_d(Address(rsp, 0));
}
__ increment(rsp, 2 * wordSize);
__ popa();
__ leave();
BLOCK_COMMENT("} trace_method_handle");
}
#endif //PRODUCT
C:\hotspot-69087d08d473\src\cpu\x86\vm/methodHandles_x86.hpp
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
};
public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg);
static void verify_klass(MacroAssembler* _masm,
Register obj, SystemDictionary::WKID klass_id,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) {
verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle),
"reference is a MH");
}
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
bool for_compiler_entry);
static void jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2,
bool for_compiler_entry);
static Register saved_last_sp_register() {
return LP64_ONLY(r13) NOT_LP64(rsi);
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/nativeInst_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/ostream.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void NativeInstruction::wrote(int offset) {
ICache::invalidate_word(addr_at(offset));
}
void NativeCall::verify() {
int inst = ubyte_at(0);
if (inst != instruction_code) {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
inst);
fatal("not a call disp32");
}
}
address NativeCall::destination() const {
return return_address() + displacement();
}
void NativeCall::print() {
tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
instruction_address(), destination());
}
void NativeCall::insert(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
#endif // AMD64
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
assert (instr_addr != NULL, "illegal address for code patching");
NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
if (os::is_MP()) {
guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
}
unsigned char patch[4];
assert(sizeof(patch)==sizeof(jint), "sanity check");
patch[0] = 0xEB; // jmp rel8
patch[1] = 0xFE; // jmp to self
patch[2] = 0xEB;
patch[3] = 0xFE;
n_call->wrote(0);
instr_addr[4] = code_buffer[4];
n_call->wrote(4);
n_call->wrote(0);
#ifdef ASSERT
for ( int i = 0; i < instruction_size; i++) {
address ptr = (address)((intptr_t)code_buffer + i);
int a_byte = (*ptr) & 0xFF;
assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
}
#endif
}
void NativeCall::set_destination_mt_safe(address dest) {
debug_only(verify());
assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
((uintptr_t)displacement_address() + 3) / cache_line_size;
guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
if (is_aligned) {
set_destination(dest);
} else if ((uintptr_t)instruction_address() / cache_line_size ==
((uintptr_t)instruction_address()+1) / cache_line_size) {
intptr_t disp = dest - return_address();
#ifdef AMD64
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
#endif // AMD64
int call_opcode = instruction_address()[0];
{
u_char patch_jump[2];
patch_jump[0] = 0xEB; // jmp rel8
patch_jump[1] = 0xFE; // jmp to self
assert(sizeof(patch_jump)==sizeof(short), "sanity check");
}
wrote(0);
u_char patch_disp[5];
patch_disp[0] = call_opcode;
assert(sizeof(patch_disp)==instruction_size, "sanity check");
for (int i = sizeof(short); i < instruction_size; i++)
instruction_address()[i] = patch_disp[i];
wrote(sizeof(short));
wrote(0);
debug_only(verify());
guarantee(destination() == dest, "patch succeeded");
} else {
ShouldNotReachHere();
}
}
void NativeMovConstReg::verify() {
#ifdef AMD64
if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
(ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
print();
fatal("not a REX.W[B] mov reg64, imm64");
}
#else
u_char test_byte = *(u_char*)instruction_address();
u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
#endif // AMD64
}
void NativeMovConstReg::print() {
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
instruction_address(), data());
}
int NativeMovRegMem::instruction_start() const {
int off = 0;
u_char instr_0 = ubyte_at(off);
if (instr_0 == instruction_VEX_prefix_2bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 2;
}
if (instr_0 == instruction_VEX_prefix_3bytes) {
assert((UseAVX > 0), "shouldn't have VEX prefix");
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
return 3;
}
if (instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);
}
if (instr_0 == instruction_code_xor) {
off += 2;
instr_0 = ubyte_at(off);
}
if (instr_0 == instruction_operandsize_prefix ) { // 0x66
off++; // Not SSE instructions
instr_0 = ubyte_at(off);
}
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
off++;
instr_0 = ubyte_at(off);
}
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);
}
if (instr_0 == instruction_extended_prefix ) { // 0x0f
off++;
}
return off;
}
address NativeMovRegMem::instruction_address() const {
return addr_at(instruction_start());
}
address NativeMovRegMem::next_instruction_address() const {
address ret = instruction_address() + instruction_size;
u_char instr_0 = *(u_char*) instruction_address();
switch (instr_0) {
case instruction_operandsize_prefix:
fatal("should have skipped instruction_operandsize_prefix");
break;
case instruction_extended_prefix:
fatal("should have skipped instruction_extended_prefix");
break;
case instruction_code_mem2reg_movslq: // 0x63
case instruction_code_mem2reg_movzxb: // 0xB6
case instruction_code_mem2reg_movsxb: // 0xBE
case instruction_code_mem2reg_movzxw: // 0xB7
case instruction_code_mem2reg_movsxw: // 0xBF
case instruction_code_reg2mem: // 0x89 (q/l)
case instruction_code_mem2reg: // 0x8B (q/l)
case instruction_code_reg2memb: // 0x88
case instruction_code_mem2regb: // 0x8a
case instruction_code_float_s: // 0xd9 fld_s a
case instruction_code_float_d: // 0xdd fld_d a
case instruction_code_xmm_load: // 0x10
case instruction_code_xmm_store: // 0x11
case instruction_code_xmm_lpd: // 0x12
{
u_char mod_rm = *(u_char*)(instruction_address() + 1);
if ((mod_rm & 7) == 0x4) {
ret++;
}
}
case instruction_code_xor:
fatal("should have skipped xor lead in");
break;
default:
fatal("not a NativeMovRegMem");
}
return ret;
}
int NativeMovRegMem::offset() const{
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
if ((mod_rm & 7) == 0x4) {
off++;
}
return int_at(off);
}
void NativeMovRegMem::set_offset(int x) {
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
if ((mod_rm & 7) == 0x4) {
off++;
}
set_int_at(off, x);
}
void NativeMovRegMem::verify() {
u_char test_byte = *(u_char*)instruction_address();
switch (test_byte) {
case instruction_code_reg2memb: // 0x88 movb a, r
case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit)
case instruction_code_mem2regb: // 0x8a movb r, a
case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit)
break;
case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
break;
case instruction_code_float_s: // 0xd9 fld_s a
case instruction_code_float_d: // 0xdd fld_d a
case instruction_code_xmm_load: // 0x10 movsd xmm, a
case instruction_code_xmm_store: // 0x11 movsd a, xmm
case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
break;
default:
fatal ("not a mov [reg+offs], reg instruction");
}
}
void NativeMovRegMem::print() {
tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset());
}
void NativeLoadAddress::verify() {
u_char test_byte = *(u_char*)instruction_address();
#ifdef _LP64
if ( (test_byte == instruction_prefix_wide ||
test_byte == instruction_prefix_wide_extended) ) {
test_byte = *(u_char*)(instruction_address() + 1);
}
#endif // _LP64
if ( ! ((test_byte == lea_instruction_code)
LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
fatal ("not a lea reg, [reg+offs] instruction");
}
}
void NativeLoadAddress::print() {
tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
}
void NativeJump::verify() {
if (*(u_char*)instruction_address() != instruction_code) {
fatal("not a jump instruction");
}
}
void NativeJump::insert(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
#endif // AMD64
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
#ifdef AMD64
const int linesize = 64;
#else
const int linesize = 32;
#endif // AMD64
guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
"illegal address for code patching 2");
guarantee((uintptr_t) verified_entry / linesize ==
((uintptr_t) verified_entry + 4) / linesize,
"illegal address for code patching 3");
}
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
unsigned char code_buffer[5];
code_buffer[0] = instruction_code;
intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
#endif // AMD64
check_verified_entry_alignment(entry, verified_entry);
NativeJump* n_jump = (NativeJump*) verified_entry;
unsigned char patch[4];
assert(sizeof(patch)==sizeof(int32_t), "sanity check");
patch[0] = 0xEB; // jmp rel8
patch[1] = 0xFE; // jmp to self
patch[2] = 0xEB;
patch[3] = 0xFE;
n_jump->wrote(0);
verified_entry[4] = code_buffer[4];
n_jump->wrote(4);
n_jump->wrote(0);
}
void NativePopReg::insert(address code_pos, Register reg) {
assert(reg->encoding() < 8, "no space for REX");
assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeIllegalInstruction::insert(address code_pos) {
assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeGeneralJump::verify() {
assert(((NativeInstruction *)this)->is_jump() ||
((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
}
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
#ifdef AMD64
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
#endif // AMD64
ICache::invalidate_range(code_pos, instruction_size);
}
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
assert (instr_addr != NULL, "illegal address for code patching (4)");
NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
unsigned char patch[4];
assert(sizeof(patch)==sizeof(int32_t), "sanity check");
patch[0] = 0xEB; // jmp rel8
patch[1] = 0xFE; // jmp to self
patch[2] = 0xEB;
patch[3] = 0xFE;
n_jump->wrote(0);
instr_addr[4] = code_buffer[4];
n_jump->wrote(4);
n_jump->wrote(0);
#ifdef ASSERT
for ( int i = 0; i < instruction_size; i++) {
address ptr = (address)((intptr_t)code_buffer + i);
int a_byte = (*ptr) & 0xFF;
assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
}
#endif
}
address NativeGeneralJump::jump_destination() const {
int op_code = ubyte_at(0);
bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F);
int offset = (op_code == 0x0F) ? 2 : 1;
int length = offset + ((is_rel32off) ? 4 : 1);
if (is_rel32off)
return addr_at(0) + length + int_at(offset);
else
return addr_at(0) + length + sbyte_at(offset);
}
bool NativeInstruction::is_dtrace_trap() {
return (*(int32_t*)this & 0xff) == 0xcc;
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/nativeInst_x86.hpp
#ifndef CPU_X86_VM_NATIVEINST_X86_HPP
#define CPU_X86_VM_NATIVEINST_X86_HPP
#include "asm/assembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
#include "utilities/top.hpp"
class NativeInstruction VALUE_OBJ_CLASS_SPEC {
friend class Relocation;
public:
enum Intel_specific_constants {
nop_instruction_code = 0x90,
nop_instruction_size = 1
};
bool is_nop() { return ubyte_at(0) == nop_instruction_code; }
bool is_dtrace_trap();
inline bool is_call();
inline bool is_illegal();
inline bool is_return();
inline bool is_jump();
inline bool is_cond_jump();
inline bool is_safepoint_poll();
inline bool is_mov_literal64();
protected:
address addr_at(int offset) const { return address(this) + offset; }
s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
jint int_at(int offset) const { return *(jint*) addr_at(offset); }
intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); }
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); }
void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); }
void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); }
void wrote(int offset);
public:
static void test() {} // override for testing
inline friend NativeInstruction* nativeInstruction_at(address address);
};
inline NativeInstruction* nativeInstruction_at(address address) {
NativeInstruction* inst = (NativeInstruction*)address;
#ifdef ASSERT
#endif
return inst;
}
inline NativeCall* nativeCall_at(address address);
class NativeCall: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0xE8,
instruction_size = 5,
instruction_offset = 0,
displacement_offset = 1,
return_address_offset = 5
};
enum { cache_line_size = BytesPerWord }; // conservative estimate!
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { return addr_at(return_address_offset); }
int displacement() const { return (jint) int_at(displacement_offset); }
address displacement_address() const { return addr_at(displacement_offset); }
address return_address() const { return addr_at(return_address_offset); }
address destination() const;
void set_destination(address dest) {
#ifdef AMD64
assert((labs((intptr_t) dest - (intptr_t) return_address()) &
0xFFFFFFFF00000000) == 0,
"must be 32bit offset");
#endif // AMD64
set_int_at(displacement_offset, dest - return_address());
}
void set_destination_mt_safe(address dest);
void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
void verify();
void print();
inline friend NativeCall* nativeCall_at(address address);
inline friend NativeCall* nativeCall_before(address return_address);
static bool is_call_at(address instr) {
return ((*instr) & 0xFF) == NativeCall::instruction_code;
}
static bool is_call_before(address return_address) {
return is_call_at(return_address - NativeCall::return_address_offset);
}
static bool is_call_to(address instr, address target) {
return nativeInstruction_at(instr)->is_call() &&
nativeCall_at(instr)->destination() == target;
}
static void insert(address code_pos, address entry);
static void replace_mt_safe(address instr_addr, address code_buffer);
};
inline NativeCall* nativeCall_at(address address) {
NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
#ifdef ASSERT
call->verify();
#endif
return call;
}
inline NativeCall* nativeCall_before(address return_address) {
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
#ifdef ASSERT
call->verify();
#endif
return call;
}
class NativeMovConstReg: public NativeInstruction {
#ifdef AMD64
static const bool has_rex = true;
static const int rex_size = 1;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
instruction_code = 0xB8,
instruction_size = 1 + rex_size + wordSize,
instruction_offset = 0,
data_offset = 1 + rex_size,
next_instruction_offset = instruction_size,
register_mask = 0x07
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { return addr_at(next_instruction_offset); }
intptr_t data() const { return ptr_at(data_offset); }
void set_data(intptr_t x) { set_ptr_at(data_offset, x); }
void verify();
void print();
static void test() {}
inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
};
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
inline NativeMovConstReg* nativeMovConstReg_before(address address) {
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
class NativeMovConstRegPatching: public NativeMovConstReg {
private:
friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
class NativeMovRegMem: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_prefix_wide_lo = Assembler::REX,
instruction_prefix_wide_hi = Assembler::REX_WRXB,
instruction_code_xor = 0x33,
instruction_extended_prefix = 0x0F,
instruction_code_mem2reg_movslq = 0x63,
instruction_code_mem2reg_movzxb = 0xB6,
instruction_code_mem2reg_movsxb = 0xBE,
instruction_code_mem2reg_movzxw = 0xB7,
instruction_code_mem2reg_movsxw = 0xBF,
instruction_operandsize_prefix = 0x66,
instruction_code_reg2mem = 0x89,
instruction_code_mem2reg = 0x8b,
instruction_code_reg2memb = 0x88,
instruction_code_mem2regb = 0x8a,
instruction_code_float_s = 0xd9,
instruction_code_float_d = 0xdd,
instruction_code_long_volatile = 0xdf,
instruction_code_xmm_ss_prefix = 0xf3,
instruction_code_xmm_sd_prefix = 0xf2,
instruction_code_xmm_code = 0x0f,
instruction_code_xmm_load = 0x10,
instruction_code_xmm_store = 0x11,
instruction_code_xmm_lpd = 0x12,
instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_size = 4,
instruction_offset = 0,
data_offset = 2,
next_instruction_offset = 4
};
int instruction_start() const;
address instruction_address() const;
address next_instruction_address() const;
int offset() const;
void set_offset(int x);
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
void verify();
void print ();
static void test() {}
private:
inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
};
inline NativeMovRegMem* nativeMovRegMem_at (address address) {
NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
class NativeMovRegMemPatching: public NativeMovRegMem {
private:
friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
class NativeLoadAddress: public NativeMovRegMem {
#ifdef AMD64
static const bool has_rex = true;
static const int rex_size = 1;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
instruction_prefix_wide = Assembler::REX_W,
instruction_prefix_wide_extended = Assembler::REX_WB,
lea_instruction_code = 0x8D,
mov64_instruction_code = 0xB8
};
void verify();
void print ();
static void test() {}
private:
friend NativeLoadAddress* nativeLoadAddress_at (address address) {
NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
#ifdef ASSERT
test->verify();
#endif
return test;
}
};
class NativeJump: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0xe9,
instruction_size = 5,
instruction_offset = 0,
data_offset = 1,
next_instruction_offset = 5
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { return addr_at(next_instruction_offset); }
address jump_destination() const {
address dest = (int_at(data_offset)+next_instruction_address());
dest = (dest == (address) this) ? (address) -1 : dest;
return dest;
}
void set_jump_destination(address dest) {
intptr_t val = dest - next_instruction_address();
if (dest == (address) -1) {
val = -5; // jump to self
}
#ifdef AMD64
assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
#endif // AMD64
set_int_at(data_offset, (jint)val);
}
inline friend NativeJump* nativeJump_at(address address);
void verify();
static void test() {}
static void insert(address code_pos, address entry);
static void check_verified_entry_alignment(address entry, address verified_entry);
static void patch_verified_entry(address entry, address verified_entry, address dest);
};
inline NativeJump* nativeJump_at(address address) {
NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
#ifdef ASSERT
jump->verify();
#endif
return jump;
}
class NativeGeneralJump: public NativeInstruction {
public:
enum Intel_specific_constants {
unconditional_long_jump = 0xe9,
unconditional_short_jump = 0xeb,
instruction_size = 5
};
address instruction_address() const { return addr_at(0); }
address jump_destination() const;
inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
static void insert_unconditional(address code_pos, address entry);
static void replace_mt_safe(address instr_addr, address code_buffer);
void verify();
};
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
NativeGeneralJump* jump = (NativeGeneralJump*)(address);
debug_only(jump->verify();)
return jump;
}
class NativePopReg : public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0x58,
instruction_size = 1,
instruction_offset = 0,
data_offset = 1,
next_instruction_offset = 1
};
static void insert(address code_pos, Register reg);
};
class NativeIllegalInstruction: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B
instruction_size = 2,
instruction_offset = 0,
next_instruction_offset = 2
};
static void insert(address code_pos);
};
class NativeReturn: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0xC3,
instruction_size = 1,
instruction_offset = 0,
next_instruction_offset = 1
};
};
class NativeReturnX: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_code = 0xC2,
instruction_size = 2,
instruction_offset = 0,
next_instruction_offset = 2
};
};
class NativeTstRegMem: public NativeInstruction {
public:
enum Intel_specific_constants {
instruction_rex_prefix_mask = 0xF0,
instruction_rex_prefix = Assembler::REX,
instruction_code_memXregl = 0x85,
modrm_mask = 0x38, // select reg from the ModRM byte
modrm_reg = 0x00 // rax
};
};
inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; }
inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code ||
ubyte_at(0) == NativeReturnX::instruction_code; }
inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code ||
ubyte_at(0) == 0xEB; /* short jump */ }
inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64
if (Assembler::is_polling_page_far()) {
if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
(ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
(ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) {
return true;
} else {
return false;
}
} else {
if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
ubyte_at(1) == 0x05) { // 00 rax 101
address fault = addr_at(6) + int_at(2);
return os::is_poll_address(fault);
} else {
return false;
}
}
#else
return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
(ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
(os::is_poll_address((address)int_at(2)));
#endif // AMD64
}
inline bool NativeInstruction::is_mov_literal64() {
#ifdef AMD64
return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
(ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
#else
return false;
#endif // AMD64
}
#endif // CPU_X86_VM_NATIVEINST_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/rdtsc_x86.cpp
#include "precompiled.hpp"
#ifdef TARGET_OS_ARCH_linux_x86
# include "os_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "os_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "os_windows_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "os_solaris_x86.inline.hpp"
#endif
#include "rdtsc_x86.hpp"
#include "runtime/thread.inline.hpp"
#include "vm_version_ext_x86.hpp"
#include "runtime/os.hpp"
static jlong _epoch = 0;
static bool rdtsc_elapsed_counter_enabled = false;
static jlong tsc_frequency = 0;
static jlong set_epoch() {
assert(0 == _epoch, "invariant");
_epoch = os::rdtsc();
return _epoch;
}
static void do_time_measurements(volatile jlong& time_base,
volatile jlong& time_fast,
volatile jlong& time_base_elapsed,
volatile jlong& time_fast_elapsed) {
static const unsigned int FT_SLEEP_MILLISECS = 1;
const unsigned int loopcount = 3;
volatile jlong start = 0;
volatile jlong fstart = 0;
volatile jlong end = 0;
volatile jlong fend = 0;
for (unsigned int times = 0; times < loopcount; times++) {
start = os::elapsed_counter();
OrderAccess::fence();
fstart = os::rdtsc();
os::sleep(Thread::current(), FT_SLEEP_MILLISECS, true);
end = os::elapsed_counter();
OrderAccess::fence();
fend = os::rdtsc();
time_base += end - start;
time_fast += fend - fstart;
time_base_elapsed += end;
time_fast_elapsed += (fend - _epoch);
}
time_base /= loopcount;
time_fast /= loopcount;
time_base_elapsed /= loopcount;
time_fast_elapsed /= loopcount;
}
static jlong initialize_frequency() {
assert(0 == tsc_frequency, "invariant");
assert(0 == _epoch, "invariant");
const jlong initial_counter = set_epoch();
if (initial_counter == 0) {
return 0;
}
static double os_freq = (double)os::elapsed_frequency();
assert(os_freq > 0, "os_elapsed frequency corruption!");
double tsc_freq = .0;
double os_to_tsc_conv_factor = 1.0;
if (VM_Version_Ext::supports_tscinv_ext()) {
tsc_freq = (double)VM_Version_Ext::maximum_qualified_cpu_frequency();
os_to_tsc_conv_factor = tsc_freq / os_freq;
} else {
volatile jlong time_base = 0;
volatile jlong time_fast = 0;
volatile jlong time_base_elapsed = 0;
volatile jlong time_fast_elapsed = 0;
do_time_measurements(time_base, time_fast, time_base_elapsed, time_fast_elapsed);
if (time_fast == 0 || time_base == 0) {
return 0;
}
os_to_tsc_conv_factor = (double)time_fast / (double)time_base;
if (os_to_tsc_conv_factor > 1) {
tsc_freq = os_to_tsc_conv_factor * os_freq;
}
}
if ((tsc_freq < 0) || (tsc_freq > 0 && tsc_freq <= os_freq) || (os_to_tsc_conv_factor <= 1)) {
tsc_freq = .0;
}
return (jlong)tsc_freq;
}
static bool initialize_elapsed_counter() {
tsc_frequency = initialize_frequency();
return tsc_frequency != 0 && _epoch != 0;
}
static bool ergonomics() {
const bool invtsc_support = Rdtsc::is_supported();
if (FLAG_IS_DEFAULT(UseFastUnorderedTimeStamps) && invtsc_support) {
FLAG_SET_ERGO(bool, UseFastUnorderedTimeStamps, true);
}
bool ft_enabled = UseFastUnorderedTimeStamps && invtsc_support;
if (!ft_enabled) {
if (UseFastUnorderedTimeStamps && VM_Version::supports_tsc()) {
warning("\nThe hardware does not support invariant tsc (INVTSC) register and/or cannot guarantee tsc synchronization between sockets at startup.\n"\
"Values returned via rdtsc() are not guaranteed to be accurate, esp. when comparing values from cross sockets reads. Enabling UseFastUnorderedTimeStamps on non-invariant tsc hardware should be considered experimental.\n");
ft_enabled = true;
}
}
if (!ft_enabled) {
if (UseFastUnorderedTimeStamps && !VM_Version::supports_tsc()) {
warning("Ignoring UseFastUnorderedTimeStamps, hardware does not support normal tsc");
}
}
return ft_enabled;
}
bool Rdtsc::is_supported() {
return VM_Version_Ext::supports_tscinv_ext();
}
bool Rdtsc::is_elapsed_counter_enabled() {
return rdtsc_elapsed_counter_enabled;
}
jlong Rdtsc::frequency() {
return tsc_frequency;
}
jlong Rdtsc::elapsed_counter() {
return os::rdtsc() - _epoch;
}
jlong Rdtsc::epoch() {
return _epoch;
}
jlong Rdtsc::raw() {
return os::rdtsc();
}
bool Rdtsc::initialize() {
static bool initialized = false;
if (!initialized) {
assert(!rdtsc_elapsed_counter_enabled, "invariant");
VM_Version_Ext::initialize();
assert(0 == tsc_frequency, "invariant");
assert(0 == _epoch, "invariant");
bool result = initialize_elapsed_counter(); // init hw
if (result) {
result = ergonomics(); // check logical state
}
rdtsc_elapsed_counter_enabled = result;
initialized = true;
}
return rdtsc_elapsed_counter_enabled;
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/rdtsc_x86.hpp
#ifndef CPU_X86_VM_RDTSC_X86_HPP
#define CPU_X86_VM_RDTSC_X86_HPP
#include "memory/allocation.hpp"
#include "utilities/macros.hpp"
class Rdtsc : AllStatic {
public:
static jlong elapsed_counter(); // provides quick time stamps
static jlong frequency(); // tsc register
static bool is_supported(); // InvariantTSC
static jlong raw(); // direct rdtsc() access
static bool is_elapsed_counter_enabled(); // turn off with -XX:-UseFastUnorderedTimeStamps
static jlong epoch();
static bool initialize();
};
#endif // CPU_X86_VM_RDTSC_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/registerMap_x86.hpp
#ifndef CPU_X86_VM_REGISTERMAP_X86_HPP
#define CPU_X86_VM_REGISTERMAP_X86_HPP
friend class frame;
private:
address pd_location(VMReg reg) const {return NULL;}
void pd_clear() {}
void pd_initialize() {}
void pd_initialize_from(const RegisterMap* map) {}
#endif // CPU_X86_VM_REGISTERMAP_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/register_definitions_x86.cpp
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/register.hpp"
#include "register_x86.hpp"
#ifdef TARGET_ARCH_x86
# include "interp_masm_x86.hpp"
#endif
REGISTER_DEFINITION(Register, noreg);
REGISTER_DEFINITION(Register, rax);
REGISTER_DEFINITION(Register, rcx);
REGISTER_DEFINITION(Register, rdx);
REGISTER_DEFINITION(Register, rbx);
REGISTER_DEFINITION(Register, rsp);
REGISTER_DEFINITION(Register, rbp);
REGISTER_DEFINITION(Register, rsi);
REGISTER_DEFINITION(Register, rdi);
#ifdef AMD64
REGISTER_DEFINITION(Register, r8);
REGISTER_DEFINITION(Register, r9);
REGISTER_DEFINITION(Register, r10);
REGISTER_DEFINITION(Register, r11);
REGISTER_DEFINITION(Register, r12);
REGISTER_DEFINITION(Register, r13);
REGISTER_DEFINITION(Register, r14);
REGISTER_DEFINITION(Register, r15);
#endif // AMD64
REGISTER_DEFINITION(XMMRegister, xnoreg);
REGISTER_DEFINITION(XMMRegister, xmm0 );
REGISTER_DEFINITION(XMMRegister, xmm1 );
REGISTER_DEFINITION(XMMRegister, xmm2 );
REGISTER_DEFINITION(XMMRegister, xmm3 );
REGISTER_DEFINITION(XMMRegister, xmm4 );
REGISTER_DEFINITION(XMMRegister, xmm5 );
REGISTER_DEFINITION(XMMRegister, xmm6 );
REGISTER_DEFINITION(XMMRegister, xmm7 );
#ifdef AMD64
REGISTER_DEFINITION(XMMRegister, xmm8);
REGISTER_DEFINITION(XMMRegister, xmm9);
REGISTER_DEFINITION(XMMRegister, xmm10);
REGISTER_DEFINITION(XMMRegister, xmm11);
REGISTER_DEFINITION(XMMRegister, xmm12);
REGISTER_DEFINITION(XMMRegister, xmm13);
REGISTER_DEFINITION(XMMRegister, xmm14);
REGISTER_DEFINITION(XMMRegister, xmm15);
REGISTER_DEFINITION(Register, c_rarg0);
REGISTER_DEFINITION(Register, c_rarg1);
REGISTER_DEFINITION(Register, c_rarg2);
REGISTER_DEFINITION(Register, c_rarg3);
REGISTER_DEFINITION(XMMRegister, c_farg0);
REGISTER_DEFINITION(XMMRegister, c_farg1);
REGISTER_DEFINITION(XMMRegister, c_farg2);
REGISTER_DEFINITION(XMMRegister, c_farg3);
#ifndef _WIN64
REGISTER_DEFINITION(Register, c_rarg4);
REGISTER_DEFINITION(Register, c_rarg5);
REGISTER_DEFINITION(XMMRegister, c_farg4);
REGISTER_DEFINITION(XMMRegister, c_farg5);
REGISTER_DEFINITION(XMMRegister, c_farg6);
REGISTER_DEFINITION(XMMRegister, c_farg7);
#endif /* _WIN64 */
REGISTER_DEFINITION(Register, j_rarg0);
REGISTER_DEFINITION(Register, j_rarg1);
REGISTER_DEFINITION(Register, j_rarg2);
REGISTER_DEFINITION(Register, j_rarg3);
REGISTER_DEFINITION(Register, j_rarg4);
REGISTER_DEFINITION(Register, j_rarg5);
REGISTER_DEFINITION(XMMRegister, j_farg0);
REGISTER_DEFINITION(XMMRegister, j_farg1);
REGISTER_DEFINITION(XMMRegister, j_farg2);
REGISTER_DEFINITION(XMMRegister, j_farg3);
REGISTER_DEFINITION(XMMRegister, j_farg4);
REGISTER_DEFINITION(XMMRegister, j_farg5);
REGISTER_DEFINITION(XMMRegister, j_farg6);
REGISTER_DEFINITION(XMMRegister, j_farg7);
REGISTER_DEFINITION(Register, rscratch1);
REGISTER_DEFINITION(Register, rscratch2);
REGISTER_DEFINITION(Register, r12_heapbase);
REGISTER_DEFINITION(Register, r15_thread);
#endif // AMD64
REGISTER_DEFINITION(MMXRegister, mnoreg );
REGISTER_DEFINITION(MMXRegister, mmx0 );
REGISTER_DEFINITION(MMXRegister, mmx1 );
REGISTER_DEFINITION(MMXRegister, mmx2 );
REGISTER_DEFINITION(MMXRegister, mmx3 );
REGISTER_DEFINITION(MMXRegister, mmx4 );
REGISTER_DEFINITION(MMXRegister, mmx5 );
REGISTER_DEFINITION(MMXRegister, mmx6 );
REGISTER_DEFINITION(MMXRegister, mmx7 );
REGISTER_DEFINITION(Register, rbp_mh_SP_save);
C:\hotspot-69087d08d473\src\cpu\x86\vm/register_x86.cpp
#include "precompiled.hpp"
#include "register_x86.hpp"
#ifndef AMD64
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers;
#else
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1;
#endif // AMD64
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
2 * FloatRegisterImpl::number_of_registers;
const int ConcreteRegisterImpl::max_xmm = ConcreteRegisterImpl::max_fpr +
8 * XMMRegisterImpl::number_of_registers;
const char* RegisterImpl::name() const {
const char* names[number_of_registers] = {
#ifndef AMD64
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
#else
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#endif // AMD64
};
return is_valid() ? names[encoding()] : "noreg";
}
const char* FloatRegisterImpl::name() const {
const char* names[number_of_registers] = {
"st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7"
};
return is_valid() ? names[encoding()] : "noreg";
}
const char* XMMRegisterImpl::name() const {
const char* names[number_of_registers] = {
"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7"
#ifdef AMD64
,"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
#endif // AMD64
};
return is_valid() ? names[encoding()] : "xnoreg";
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/register_x86.hpp
#ifndef CPU_X86_VM_REGISTER_X86_HPP
#define CPU_X86_VM_REGISTER_X86_HPP
#include "asm/register.hpp"
#include "vm_version_x86.hpp"
class VMRegImpl;
typedef VMRegImpl* VMReg;
class RegisterImpl;
typedef RegisterImpl* Register;
inline Register as_Register(int encoding) {
return (Register)(intptr_t) encoding;
}
class RegisterImpl: public AbstractRegisterImpl {
public:
enum {
#ifndef AMD64
number_of_registers = 8,
number_of_byte_registers = 4
#else
number_of_registers = 16,
number_of_byte_registers = 16
#endif // AMD64
};
Register successor() const { return as_Register(encoding() + 1); }
inline friend Register as_Register(int encoding);
VMReg as_VMReg();
int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; }
bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; }
bool has_byte_register() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_byte_registers; }
const char* name() const;
};
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
CONSTANT_REGISTER_DECLARATION(Register, rax, (0));
CONSTANT_REGISTER_DECLARATION(Register, rcx, (1));
CONSTANT_REGISTER_DECLARATION(Register, rdx, (2));
CONSTANT_REGISTER_DECLARATION(Register, rbx, (3));
CONSTANT_REGISTER_DECLARATION(Register, rsp, (4));
CONSTANT_REGISTER_DECLARATION(Register, rbp, (5));
CONSTANT_REGISTER_DECLARATION(Register, rsi, (6));
CONSTANT_REGISTER_DECLARATION(Register, rdi, (7));
#ifdef AMD64
CONSTANT_REGISTER_DECLARATION(Register, r8, (8));
CONSTANT_REGISTER_DECLARATION(Register, r9, (9));
CONSTANT_REGISTER_DECLARATION(Register, r10, (10));
CONSTANT_REGISTER_DECLARATION(Register, r11, (11));
CONSTANT_REGISTER_DECLARATION(Register, r12, (12));
CONSTANT_REGISTER_DECLARATION(Register, r13, (13));
CONSTANT_REGISTER_DECLARATION(Register, r14, (14));
CONSTANT_REGISTER_DECLARATION(Register, r15, (15));
#endif // AMD64
class FloatRegisterImpl;
typedef FloatRegisterImpl* FloatRegister;
inline FloatRegister as_FloatRegister(int encoding) {
return (FloatRegister)(intptr_t) encoding;
}
class FloatRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 8
};
inline friend FloatRegister as_FloatRegister(int encoding);
VMReg as_VMReg();
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; }
bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; }
const char* name() const;
};
class XMMRegisterImpl;
typedef XMMRegisterImpl* XMMRegister;
class MMXRegisterImpl;
typedef MMXRegisterImpl* MMXRegister;
inline XMMRegister as_XMMRegister(int encoding) {
return (XMMRegister)(intptr_t)encoding;
}
inline MMXRegister as_MMXRegister(int encoding) {
return (MMXRegister)(intptr_t)encoding;
}
class XMMRegisterImpl: public AbstractRegisterImpl {
public:
enum {
#ifndef AMD64
number_of_registers = 8
#else
number_of_registers = 16
#endif // AMD64
};
friend XMMRegister as_XMMRegister(int encoding);
VMReg as_VMReg();
XMMRegister successor() const { return as_XMMRegister(encoding() + 1); }
int encoding() const { assert(is_valid(), err_msg("invalid register (%d)", (int)(intptr_t)this )); return (intptr_t)this; }
bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; }
const char* name() const;
};
CONSTANT_REGISTER_DECLARATION(XMMRegister, xnoreg , (-1));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm0 , ( 0));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm1 , ( 1));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm2 , ( 2));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm3 , ( 3));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm4 , ( 4));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm5 , ( 5));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm6 , ( 6));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm7 , ( 7));
#ifdef AMD64
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm8, (8));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm9, (9));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm10, (10));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm11, (11));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm12, (12));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm13, (13));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm14, (14));
CONSTANT_REGISTER_DECLARATION(XMMRegister, xmm15, (15));
#endif // AMD64
CONSTANT_REGISTER_DECLARATION(MMXRegister, mnoreg , (-1));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx0 , ( 0));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx1 , ( 1));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx2 , ( 2));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx3 , ( 3));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx4 , ( 4));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx5 , ( 5));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx6 , ( 6));
CONSTANT_REGISTER_DECLARATION(MMXRegister, mmx7 , ( 7));
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
number_of_registers = RegisterImpl::number_of_registers +
#ifdef AMD64
RegisterImpl::number_of_registers + // "H" half of a 64bit register
#endif // AMD64
2 * FloatRegisterImpl::number_of_registers +
8 * XMMRegisterImpl::number_of_registers +
1 // eflags
};
static const int max_gpr;
static const int max_fpr;
static const int max_xmm;
};
#endif // CPU_X86_VM_REGISTER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/relocInfo_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
#ifdef AMD64
x += o;
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop
assert(which == Assembler::disp32_operand ||
which == Assembler::narrow_oop_operand ||
which == Assembler::imm_operand, "format unpacks ok");
if (which == Assembler::imm_operand) {
if (verify_only) {
guarantee(*pd_address_in_code() == x, "instructions must match");
} else {
}
} else if (which == Assembler::narrow_oop_operand) {
address disp = Assembler::locate_operand(addr(), which);
if (Universe::heap()->is_in_reserved((oop)x)) {
if (verify_only) {
guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
} else {
}
} else {
if (verify_only) {
guarantee(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
} else {
}
}
} else {
address ip = addr();
address disp = Assembler::locate_operand(ip, which);
address next_ip = Assembler::locate_next_instruction(ip);
if (verify_only) {
guarantee(*(int32_t*) disp == (x - next_ip), "instructions must match");
} else {
}
}
#else
if (verify_only) {
guarantee(*pd_address_in_code() == (x + o), "instructions must match");
} else {
}
#endif // AMD64
}
address Relocation::pd_call_destination(address orig_addr) {
intptr_t adj = 0;
if (orig_addr != NULL) {
adj = -( addr() - orig_addr );
}
NativeInstruction* ni = nativeInstruction_at(addr());
if (ni->is_call()) {
return nativeCall_at(addr())->destination() + adj;
} else if (ni->is_jump()) {
return nativeJump_at(addr())->jump_destination() + adj;
} else if (ni->is_cond_jump()) {
return nativeGeneralJump_at(addr())->jump_destination() + adj;
} else if (ni->is_mov_literal64()) {
return (address) ((NativeMovConstReg*)ni)->data();
} else {
ShouldNotReachHere();
return NULL;
}
}
void Relocation::pd_set_call_destination(address x) {
NativeInstruction* ni = nativeInstruction_at(addr());
if (ni->is_call()) {
nativeCall_at(addr())->set_destination(x);
} else if (ni->is_jump()) {
NativeJump* nj = nativeJump_at(addr());
if (nj->jump_destination() == (address) -1) {
x = addr(); // jump to self
}
nj->set_jump_destination(x);
} else if (ni->is_cond_jump()) {
address old_dest = nativeGeneralJump_at(addr())->jump_destination();
address disp = Assembler::locate_operand(addr(), Assembler::call32_operand);
} else if (ni->is_mov_literal64()) {
((NativeMovConstReg*)ni)->set_data((intptr_t)x);
} else {
ShouldNotReachHere();
}
}
address* Relocation::pd_address_in_code() {
assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
#ifdef AMD64
assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand ||
which == Assembler::imm_operand, "format unpacks ok");
guarantee(which == Assembler::imm_operand, "must be immediate operand");
#else
assert(which == Assembler::disp32_operand || which == Assembler::imm_operand, "format unpacks ok");
#endif // AMD64
return (address*) Assembler::locate_operand(addr(), which);
}
address Relocation::pd_get_address_from_code() {
#ifdef AMD64
assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand ||
which == Assembler::imm_operand, "format unpacks ok");
if (which != Assembler::imm_operand) {
address ip = addr();
address disp = Assembler::locate_operand(ip, which);
address next_ip = Assembler::locate_next_instruction(ip);
address a = next_ip + *(int32_t*) disp;
return a;
}
#endif // AMD64
return *pd_address_in_code();
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64
if (!Assembler::is_polling_page_far()) {
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format();
which = Assembler::disp32_operand;
address orig_addr = old_addr_for(addr(), src, dest);
NativeInstruction* oni = nativeInstruction_at(orig_addr);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
}
#endif // _LP64
}
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64
if (!Assembler::is_polling_page_far()) {
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format();
which = Assembler::disp32_operand;
address orig_addr = old_addr_for(addr(), src, dest);
NativeInstruction* oni = nativeInstruction_at(orig_addr);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
}
#endif // _LP64
}
void metadata_Relocation::pd_fix_value(address x) {
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/relocInfo_x86.hpp
#ifndef CPU_X86_VM_RELOCINFO_X86_HPP
#define CPU_X86_VM_RELOCINFO_X86_HPP
private:
enum {
offset_unit = 1,
#ifndef AMD64
format_width = 1
#else
format_width = 2
#endif
};
#endif // CPU_X86_VM_RELOCINFO_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/rtmLocking.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/task.hpp"
#include "runtime/rtmLocking.hpp"
uintx RTMLockingCounters::_calculation_flag = 0;
class RTMLockingCalculationTask : public PeriodicTask {
public:
RTMLockingCalculationTask(size_t interval_time) : PeriodicTask(interval_time){ }
virtual void task() {
RTMLockingCounters::_calculation_flag = 1;
delete this;
}
};
void RTMLockingCounters::init() {
if (UseRTMLocking && RTMLockingCalculationDelay > 0) {
RTMLockingCalculationTask* task = new RTMLockingCalculationTask(RTMLockingCalculationDelay);
task->enroll();
} else {
_calculation_flag = 1;
}
}
void RTMLockingCounters::print_on(outputStream* st) {
tty->print_cr("# rtm locks total (estimated): " UINTX_FORMAT, _total_count * RTMTotalCountIncrRate);
tty->print_cr("# rtm lock aborts : " UINTX_FORMAT, _abort_count);
for (int i = 0; i < ABORT_STATUS_LIMIT; i++) {
tty->print_cr("# rtm lock aborts %d: " UINTX_FORMAT, i, _abortX_count[i]);
}
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/runtime_x86_32.cpp
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_x86.inline.hpp"
#endif
#define __ masm->
void OptoRuntime::generate_exception_blob() {
enum layout {
thread_off, // last_java_sp
rbp_off,
return_off, // slot for return address
framesize
};
ResourceMark rm;
CodeBuffer buffer("exception_blob", 512, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
OopMapSet *oop_maps = new OopMapSet();
address start = __ pc();
__ push(rdx);
__ subptr(rsp, return_off * wordSize); // Prolog!
__ movptr(Address(rsp,rbp_off *wordSize), rbp);
__ get_thread(rcx);
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax);
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), rdx);
__ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
__ set_last_Java_frame(rcx, noreg, noreg, NULL);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
oop_maps->add_gc_map( __ pc() - start, new OopMap( framesize, 0 ));
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false);
__ movptr(rbp, Address(rsp, rbp_off * wordSize));
__ addptr(rsp, return_off * wordSize); // Epilog!
__ pop(rdx); // Exception pc
__ push(rax);
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
__ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
#ifdef ASSERT
__ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD);
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
#endif
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
__ pop(rcx);
__ jmp (rcx);
masm->flush();
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize);
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/runtime_x86_64.cpp
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_x86.inline.hpp"
#endif
C:\hotspot-69087d08d473\src\cpu\x86\vm/sharedRuntime_x86_32.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/compiledICHolder.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#define __ masm->
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
class RegisterSaver {
#define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
enum layout {
fpu_state_off = 0,
fpu_state_end = fpu_state_off+FPUStateSizeInWords,
st0_off, st0H_off,
st1_off, st1H_off,
st2_off, st2H_off,
st3_off, st3H_off,
st4_off, st4H_off,
st5_off, st5H_off,
st6_off, st6H_off,
st7_off, st7H_off,
xmm_off,
DEF_XMM_OFFS(0),
DEF_XMM_OFFS(1),
DEF_XMM_OFFS(2),
DEF_XMM_OFFS(3),
DEF_XMM_OFFS(4),
DEF_XMM_OFFS(5),
DEF_XMM_OFFS(6),
DEF_XMM_OFFS(7),
flags_off = xmm7_off + 16/BytesPerInt + 1, // 16-byte stack alignment fill word
rdi_off,
rsi_off,
ignore_off, // extra copy of rbp,
rsp_off,
rbx_off,
rdx_off,
rcx_off,
rax_off,
rbp_off,
return_off, // slot for return address
reg_save_size };
enum { FPU_regs_live = flags_off - fpu_state_end };
public:
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
int* total_frame_words, bool verify_fpu = true, bool save_vectors = false);
static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
static int rax_offset() { return rax_off; }
static int rbx_offset() { return rbx_off; }
static int raxOffset(void) { return rax_off; }
static int rdxOffset(void) { return rdx_off; }
static int rbxOffset(void) { return rbx_off; }
static int xmm0Offset(void) { return xmm0_off; }
static int fpResultOffset(void) { return st0_off; }
static void restore_result_registers(MacroAssembler* masm);
};
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
int* total_frame_words, bool verify_fpu, bool save_vectors) {
int vect_words = 0;
#ifdef COMPILER2
if (save_vectors) {
assert(UseAVX > 0, "256bit vectors are supported only with AVX");
assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
vect_words = 8 * 16 / wordSize;
additional_frame_words += vect_words;
}
#else
assert(!save_vectors, "vectors are generated only by C2");
#endif
int frame_size_in_bytes = (reg_save_size + additional_frame_words) * wordSize;
int frame_words = frame_size_in_bytes / wordSize;
assert(FPUStateSizeInWords == 27, "update stack layout");
__ enter();
__ pusha();
__ pushf();
__ subptr(rsp,FPU_regs_live*wordSize); // Push FPU registers space
__ push_FPU_state(); // Save FPU state & init
if (verify_fpu) {
#ifdef ASSERT
Label ok;
__ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
__ jccb(Assembler::equal, ok);
__ stop("corrupted control word detected");
__ bind(ok);
#endif
__ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
}
__ frstor(Address(rsp, 0));
if (!verify_fpu) {
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
__ fstp_d(Address(rsp, st0_off*wordSize)); // st(0)
__ fstp_d(Address(rsp, st1_off*wordSize)); // st(1)
__ fstp_d(Address(rsp, st2_off*wordSize)); // st(2)
__ fstp_d(Address(rsp, st3_off*wordSize)); // st(3)
__ fstp_d(Address(rsp, st4_off*wordSize)); // st(4)
__ fstp_d(Address(rsp, st5_off*wordSize)); // st(5)
__ fstp_d(Address(rsp, st6_off*wordSize)); // st(6)
__ fstp_d(Address(rsp, st7_off*wordSize)); // st(7)
if( UseSSE == 1 ) { // Save the XMM state
__ movflt(Address(rsp,xmm0_off*wordSize),xmm0);
__ movflt(Address(rsp,xmm1_off*wordSize),xmm1);
__ movflt(Address(rsp,xmm2_off*wordSize),xmm2);
__ movflt(Address(rsp,xmm3_off*wordSize),xmm3);
__ movflt(Address(rsp,xmm4_off*wordSize),xmm4);
__ movflt(Address(rsp,xmm5_off*wordSize),xmm5);
__ movflt(Address(rsp,xmm6_off*wordSize),xmm6);
__ movflt(Address(rsp,xmm7_off*wordSize),xmm7);
} else if( UseSSE >= 2 ) {
__ movdqu(Address(rsp,xmm0_off*wordSize),xmm0);
__ movdqu(Address(rsp,xmm1_off*wordSize),xmm1);
__ movdqu(Address(rsp,xmm2_off*wordSize),xmm2);
__ movdqu(Address(rsp,xmm3_off*wordSize),xmm3);
__ movdqu(Address(rsp,xmm4_off*wordSize),xmm4);
__ movdqu(Address(rsp,xmm5_off*wordSize),xmm5);
__ movdqu(Address(rsp,xmm6_off*wordSize),xmm6);
__ movdqu(Address(rsp,xmm7_off*wordSize),xmm7);
}
if (vect_words > 0) {
assert(vect_words*wordSize == 128, "");
__ subptr(rsp, 128); // Save upper half of YMM registes
__ vextractf128h(Address(rsp, 0),xmm0);
__ vextractf128h(Address(rsp, 16),xmm1);
__ vextractf128h(Address(rsp, 32),xmm2);
__ vextractf128h(Address(rsp, 48),xmm3);
__ vextractf128h(Address(rsp, 64),xmm4);
__ vextractf128h(Address(rsp, 80),xmm5);
__ vextractf128h(Address(rsp, 96),xmm6);
__ vextractf128h(Address(rsp,112),xmm7);
}
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap( frame_words, 0 );
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
map->set_callee_saved(STACK_OFFSET( rax_off), rax->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rcx_off), rcx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rdx_off), rdx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rbx_off), rbx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rsi_off), rsi->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rdi_off), rdi->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st0_off), as_FloatRegister(0)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st1_off), as_FloatRegister(1)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st2_off), as_FloatRegister(2)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st3_off), as_FloatRegister(3)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st4_off), as_FloatRegister(4)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st5_off), as_FloatRegister(5)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st6_off), as_FloatRegister(6)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(st7_off), as_FloatRegister(7)->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm0_off), xmm0->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm1_off), xmm1->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm2_off), xmm2->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm3_off), xmm3->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm4_off), xmm4->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm5_off), xmm5->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm6_off), xmm6->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm7_off), xmm7->as_VMReg());
if (true) {
#define NEXTREG(x) (x)->as_VMReg()->next()
map->set_callee_saved(STACK_OFFSET(st0H_off), NEXTREG(as_FloatRegister(0)));
map->set_callee_saved(STACK_OFFSET(st1H_off), NEXTREG(as_FloatRegister(1)));
map->set_callee_saved(STACK_OFFSET(st2H_off), NEXTREG(as_FloatRegister(2)));
map->set_callee_saved(STACK_OFFSET(st3H_off), NEXTREG(as_FloatRegister(3)));
map->set_callee_saved(STACK_OFFSET(st4H_off), NEXTREG(as_FloatRegister(4)));
map->set_callee_saved(STACK_OFFSET(st5H_off), NEXTREG(as_FloatRegister(5)));
map->set_callee_saved(STACK_OFFSET(st6H_off), NEXTREG(as_FloatRegister(6)));
map->set_callee_saved(STACK_OFFSET(st7H_off), NEXTREG(as_FloatRegister(7)));
map->set_callee_saved(STACK_OFFSET(xmm0H_off), NEXTREG(xmm0));
map->set_callee_saved(STACK_OFFSET(xmm1H_off), NEXTREG(xmm1));
map->set_callee_saved(STACK_OFFSET(xmm2H_off), NEXTREG(xmm2));
map->set_callee_saved(STACK_OFFSET(xmm3H_off), NEXTREG(xmm3));
map->set_callee_saved(STACK_OFFSET(xmm4H_off), NEXTREG(xmm4));
map->set_callee_saved(STACK_OFFSET(xmm5H_off), NEXTREG(xmm5));
map->set_callee_saved(STACK_OFFSET(xmm6H_off), NEXTREG(xmm6));
map->set_callee_saved(STACK_OFFSET(xmm7H_off), NEXTREG(xmm7));
#undef NEXTREG
#undef STACK_OFFSET
}
return map;
}
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
int additional_frame_bytes = 0;
#ifdef COMPILER2
if (restore_vectors) {
assert(UseAVX > 0, "256bit vectors are supported only with AVX");
assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
additional_frame_bytes = 128;
}
#else
assert(!restore_vectors, "vectors are generated only by C2");
#endif
if (UseSSE == 1) {
assert(additional_frame_bytes == 0, "");
__ movflt(xmm0,Address(rsp,xmm0_off*wordSize));
__ movflt(xmm1,Address(rsp,xmm1_off*wordSize));
__ movflt(xmm2,Address(rsp,xmm2_off*wordSize));
__ movflt(xmm3,Address(rsp,xmm3_off*wordSize));
__ movflt(xmm4,Address(rsp,xmm4_off*wordSize));
__ movflt(xmm5,Address(rsp,xmm5_off*wordSize));
__ movflt(xmm6,Address(rsp,xmm6_off*wordSize));
__ movflt(xmm7,Address(rsp,xmm7_off*wordSize));
} else if (UseSSE >= 2) {
#define STACK_ADDRESS(x) Address(rsp,(x)*wordSize + additional_frame_bytes)
__ movdqu(xmm0,STACK_ADDRESS(xmm0_off));
__ movdqu(xmm1,STACK_ADDRESS(xmm1_off));
__ movdqu(xmm2,STACK_ADDRESS(xmm2_off));
__ movdqu(xmm3,STACK_ADDRESS(xmm3_off));
__ movdqu(xmm4,STACK_ADDRESS(xmm4_off));
__ movdqu(xmm5,STACK_ADDRESS(xmm5_off));
__ movdqu(xmm6,STACK_ADDRESS(xmm6_off));
__ movdqu(xmm7,STACK_ADDRESS(xmm7_off));
#undef STACK_ADDRESS
}
if (restore_vectors) {
assert(additional_frame_bytes == 128, "");
__ vinsertf128h(xmm0, Address(rsp, 0));
__ vinsertf128h(xmm1, Address(rsp, 16));
__ vinsertf128h(xmm2, Address(rsp, 32));
__ vinsertf128h(xmm3, Address(rsp, 48));
__ vinsertf128h(xmm4, Address(rsp, 64));
__ vinsertf128h(xmm5, Address(rsp, 80));
__ vinsertf128h(xmm6, Address(rsp, 96));
__ vinsertf128h(xmm7, Address(rsp,112));
__ addptr(rsp, additional_frame_bytes);
}
__ pop_FPU_state();
__ addptr(rsp, FPU_regs_live*wordSize); // Pop FPU registers
__ popf();
__ popa();
__ pop(rbp);
}
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
__ frstor(Address(rsp, 0)); // Restore fpu state
if( UseSSE == 1 ) {
__ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
} else if( UseSSE >= 2 ) {
__ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
}
__ movptr(rax, Address(rsp, rax_off*wordSize));
__ movptr(rdx, Address(rsp, rdx_off*wordSize));
__ addptr(rsp, return_off * wordSize);
}
bool SharedRuntime::is_wide_vector(int size) {
return size > 16;
}
static int reg2offset_in(VMReg r) {
return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
}
static int reg2offset_out(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed,
int is_outgoing) {
uint stack = 0; // Starting stack position for args on stack
uint reg_arg0 = 9999;
uint reg_arg1 = 9999;
enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
uint fargs = (UseSSE>=2) ? 2 : UseSSE;
uint freg_arg0 = 9999+fargs;
uint freg_arg1 = 9999+fargs;
int i;
for( i = 0; i < total_args_passed; i++) {
if( sig_bt[i] == T_DOUBLE ) {
if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
else // Else double is passed low on the stack to be aligned.
stack += 2;
} else if( sig_bt[i] == T_LONG ) {
stack += 2;
}
}
int dstack = 0; // Separate counter for placing doubles
for( i = 0; i < total_args_passed; i++) {
switch( sig_bt[i] ) {
case T_SHORT:
case T_CHAR:
case T_BYTE:
case T_BOOLEAN:
case T_INT:
case T_ARRAY:
case T_OBJECT:
case T_ADDRESS:
if( reg_arg0 == 9999 ) {
reg_arg0 = i;
regs[i].set1(rcx->as_VMReg());
} else if( reg_arg1 == 9999 ) {
reg_arg1 = i;
regs[i].set1(rdx->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stack++));
}
break;
case T_FLOAT:
if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
freg_arg0 = i;
regs[i].set1(xmm0->as_VMReg());
} else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
freg_arg1 = i;
regs[i].set1(xmm1->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stack++));
}
break;
case T_LONG:
assert(sig_bt[i+1] == T_VOID, "missing Half" );
regs[i].set2(VMRegImpl::stack2reg(dstack));
dstack += 2;
break;
case T_DOUBLE:
assert(sig_bt[i+1] == T_VOID, "missing Half" );
if( freg_arg0 == (uint)i ) {
regs[i].set2(xmm0->as_VMReg());
} else if( freg_arg1 == (uint)i ) {
regs[i].set2(xmm1->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(dstack));
dstack += 2;
}
break;
case T_VOID: regs[i].set_bad(); break;
break;
default:
ShouldNotReachHere();
break;
}
}
return round_to(stack, 2);
}
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ movptr(rax, Address(rsp, 0));
__ pusha();
__ pushf();
if (UseSSE == 1) {
__ subptr(rsp, 2*wordSize);
__ movflt(Address(rsp, 0), xmm0);
__ movflt(Address(rsp, wordSize), xmm1);
}
if (UseSSE >= 2) {
__ subptr(rsp, 4*wordSize);
__ movdbl(Address(rsp, 0), xmm0);
__ movdbl(Address(rsp, 2*wordSize), xmm1);
}
#ifdef COMPILER2
if (UseSSE >= 2) {
__ verify_FPU(0, "c2i transition should have clean FPU stack");
} else {
__ empty_FPU_stack();
}
#endif /* COMPILER2 */
__ push(rax);
__ push(rbx);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
__ addptr(rsp, 2*wordSize);
if (UseSSE == 1) {
__ movflt(xmm0, Address(rsp, 0));
__ movflt(xmm1, Address(rsp, wordSize));
__ addptr(rsp, 2*wordSize);
}
if (UseSSE >= 2) {
__ movdbl(xmm0, Address(rsp, 0));
__ movdbl(xmm1, Address(rsp, 2*wordSize));
__ addptr(rsp, 4*wordSize);
}
__ popf();
__ popa();
__ bind(L);
}
static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
int next_off = st_off - Interpreter::stackElementSize;
__ movdbl(Address(rsp, next_off), r);
}
static void gen_c2i_adapter(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs,
Label& skip_fixup) {
patch_callers_callsite(masm);
__ bind(skip_fixup);
#ifdef COMPILER2
if (UseSSE >= 2) {
__ verify_FPU(0, "c2i transition should have clean FPU stack");
} else {
__ empty_FPU_stack();
}
#endif /* COMPILER2 */
int extraspace = total_args_passed * Interpreter::stackElementSize;
__ pop(rax);
__ movptr(rsi, rsp);
__ subptr(rsp, extraspace);
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
continue;
}
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
int next_off = st_off - Interpreter::stackElementSize;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (!r_1->is_valid()) {
assert(!r_2->is_valid(), "");
continue;
}
if (r_1->is_stack()) {
int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
if (!r_2->is_valid()) {
__ movl(rdi, Address(rsp, ld_off));
__ movptr(Address(rsp, st_off), rdi);
} else {
__ movptr(rdi, Address(rsp, ld_off));
__ movptr(Address(rsp, next_off), rdi);
#ifndef _LP64
__ movptr(rdi, Address(rsp, ld_off + wordSize));
__ movptr(Address(rsp, st_off), rdi);
#else
#ifdef ASSERT
__ mov64(rax, CONST64(0xdeadffffdeadaaaa));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
#endif // _LP64
}
} else if (r_1->is_Register()) {
Register r = r_1->as_Register();
if (!r_2->is_valid()) {
__ movl(Address(rsp, st_off), r);
} else {
NOT_LP64(ShouldNotReachHere());
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
#ifdef ASSERT
LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movptr(Address(rsp, next_off), r);
} else {
__ movptr(Address(rsp, st_off), r);
}
}
} else {
assert(r_1->is_XMMRegister(), "");
if (!r_2->is_valid()) {
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
} else {
assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
}
}
}
__ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
__ push(rax);
__ jmp(rcx);
}
static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
int next_val_off = ld_off - Interpreter::stackElementSize;
__ movdbl(r, Address(saved_sp, next_val_off));
}
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
address code_start, address code_end,
Label& L_ok) {
Label L_fail;
__ lea(temp_reg, ExternalAddress(code_start));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::belowEqual, L_fail);
__ lea(temp_reg, ExternalAddress(code_end));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
static void gen_i2c_adapter(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs) {
__ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
range_check(masm, rax, rdi,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
range_check(masm, rax, rdi,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
range_check(masm, rax, rdi,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
const char* msg = "i2c adapter must return to an interpreter frame";
__ block_comment(msg);
__ stop(msg);
__ bind(L_ok);
__ block_comment("} verify_i2ce ");
}
__ movptr(rdi, rsp);
int comp_words_on_stack = 0;
if (comp_args_on_stack) {
comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
comp_words_on_stack = round_to(comp_words_on_stack, 2);
__ subptr(rsp, comp_words_on_stack * wordSize);
}
__ andptr(rsp, -(StackAlignmentInBytes));
__ push(rax);
const Register saved_sp = rax;
__ movptr(saved_sp, rdi);
__ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset())));
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
continue;
}
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
int next_off = ld_off - Interpreter::stackElementSize;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (!r_1->is_valid()) {
assert(!r_2->is_valid(), "");
continue;
}
if (r_1->is_stack()) {
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
if (!r_2->is_valid()) {
__ movl(rsi, Address(saved_sp, ld_off));
__ movptr(Address(rsp, st_off), rsi);
} else {
const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
__ movptr(rsi, Address(saved_sp, offset));
__ movptr(Address(rsp, st_off), rsi);
#ifndef _LP64
__ movptr(rsi, Address(saved_sp, ld_off));
__ movptr(Address(rsp, st_off + wordSize), rsi);
#endif // _LP64
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
assert(r != rax, "must be different");
if (r_2->is_valid()) {
const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
__ movptr(r, Address(saved_sp, offset));
#ifndef _LP64
assert(r_2->as_Register() != rax, "need another temporary register");
__ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
#endif // _LP64
} else {
__ movl(r, Address(saved_sp, ld_off));
}
} else {
assert(r_1->is_XMMRegister(), "");
if (!r_2->is_valid()) {
__ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
} else {
move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
}
}
}
__ get_thread(rax);
__ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
__ mov(rax, rbx);
__ jmp(rdi);
}
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs,
AdapterFingerPrint* fingerprint) {
address i2c_entry = __ pc();
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Register holder = rax;
Register receiver = rcx;
Register temp = rbx;
{
Label missed;
__ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ jcc(Assembler::notEqual, missed);
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ bind(missed);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
address c2i_entry = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "not needed on x86");
uint stack = 0; // All arguments on stack
for( int i = 0; i < total_args_passed; i++) {
switch( sig_bt[i] ) {
case T_BOOLEAN:
case T_CHAR:
case T_FLOAT:
case T_BYTE:
case T_SHORT:
case T_INT:
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS:
case T_METADATA:
regs[i].set1(VMRegImpl::stack2reg(stack++));
break;
case T_LONG:
case T_DOUBLE: // The stack numbering is reversed from Java
assert(sig_bt[i+1] == T_VOID, "missing Half" );
regs[i].set2(VMRegImpl::stack2reg(stack));
stack += 2;
break;
case T_VOID: regs[i].set_bad(); break;
default:
ShouldNotReachHere();
break;
}
}
return stack;
}
static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
__ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
__ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
if (dst.first() != src.first()) {
__ mov(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
static void object_move(MacroAssembler* masm,
OopMap* map,
int oop_handle_offset,
int framesize_in_slots,
VMRegPair src,
VMRegPair dst,
bool is_receiver,
int* receiver_offset) {
assert(dst.first()->is_stack(), "must be stack");
if (src.first()->is_stack()) {
Register rHandle = rax;
Label nil;
__ xorptr(rHandle, rHandle);
__ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, nil);
__ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
__ bind(nil);
__ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
if (is_receiver) {
}
} else {
const Register rOop = src.first()->as_Register();
const Register rHandle = rax;
int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
int offset = oop_slot*VMRegImpl::stack_slot_size;
Label skip;
__ movptr(Address(rsp, offset), rOop);
map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ xorptr(rHandle, rHandle);
__ cmpptr(rOop, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip);
__ lea(rHandle, Address(rsp, offset));
__ bind(skip);
__ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
if (is_receiver) {
}
}
}
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
if (src.first()->is_stack()) {
__ movl(rax, Address(rbp, reg2offset_in(src.first())));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
__ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
}
}
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack() && dst.first()->is_stack()) {
assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
__ movptr(rax, Address(rbp, reg2offset_in(src.first())));
NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
} else {
ShouldNotReachHere();
}
}
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
if (src.first()->is_stack()) {
__ movptr(rax, Address(rbp, reg2offset_in(src.first())));
NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
} else {
__ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
}
}
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
switch (ret_type) {
case T_FLOAT:
__ fstp_s(Address(rbp, -wordSize));
break;
case T_DOUBLE:
__ fstp_d(Address(rbp, -2*wordSize));
break;
case T_VOID: break;
case T_LONG:
__ movptr(Address(rbp, -wordSize), rax);
NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
break;
default: {
__ movptr(Address(rbp, -wordSize), rax);
}
}
}
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
switch (ret_type) {
case T_FLOAT:
__ fld_s(Address(rbp, -wordSize));
break;
case T_DOUBLE:
__ fld_d(Address(rbp, -2*wordSize));
break;
case T_LONG:
__ movptr(rax, Address(rbp, -wordSize));
NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
break;
case T_VOID: break;
default: {
__ movptr(rax, Address(rbp, -wordSize));
}
}
}
static void save_or_restore_arguments(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMap* map,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
int handle_index = 0;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
if (in_regs[i].second()->is_Register()) {
__ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
}
} else {
__ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
if (in_regs[i].second()->is_Register()) {
__ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
}
}
}
}
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
map->set_oop(VMRegImpl::stack2reg(slot));;
}
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY:
if (map != NULL) {
__ movptr(Address(rsp, offset), reg);
} else {
__ movptr(reg, Address(rsp, offset));
}
break;
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (map != NULL) {
__ movl(Address(rsp, offset), reg);
} else {
__ movl(reg, Address(rsp, offset));
}
break;
case T_OBJECT:
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
if (in_sig_bt[i] == T_FLOAT) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
} else if (in_regs[i].first()->is_stack()) {
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
}
}
}
}
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
Register thread,
int stack_slots,
int total_c_args,
int total_in_args,
int arg_save_area,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
address the_pc = __ pc();
oop_maps->add_gc_map( __ offset(), map);
__ set_last_Java_frame(thread, rsp, noreg, the_pc);
__ block_comment("block_for_jni_critical");
__ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
__ increment(rsp, wordSize);
__ get_thread(thread);
__ reset_last_Java_frame(thread, false);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
for (int i = 0; i < total_in_args - 1; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
__ xorptr(reg, reg);
} else if (in_regs[i].first()->is_XMMRegister()) {
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
} else if (in_regs[i].first()->is_stack()) {
} else {
ShouldNotReachHere();
}
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
i++;
}
}
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
}
#endif
}
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
Register tmp_reg = rax;
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
"possible collision");
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
"possible collision");
Label is_null, done;
VMRegPair tmp(tmp_reg->as_VMReg());
if (reg.first()->is_stack()) {
simple_move32(masm, reg, tmp);
reg = tmp;
}
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
__ jccb(Assembler::equal, is_null);
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
simple_move32(masm, tmp, body_arg);
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
simple_move32(masm, tmp, length_arg);
__ jmpb(done);
__ bind(is_null);
__ xorptr(tmp_reg, tmp_reg);
simple_move32(masm, tmp, body_arg);
simple_move32(masm, tmp, length_arg);
__ bind(done);
}
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < method->size_of_parameters(); i++) {
if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
static void gen_special_dispatch(MacroAssembler* masm,
methodHandle method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
vmIntrinsics::ID iid = method->intrinsic_id();
bool has_receiver = false;
Register receiver_reg = noreg;
int member_arg_pos = -1;
Register member_reg = noreg;
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
if (ref_kind != 0) {
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
member_reg = rbx; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (iid == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} else {
fatal(err_msg_res("unexpected intrinsic id %d", iid));
}
if (member_reg != noreg) {
SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
VMReg r = regs[member_arg_pos].first();
if (r->is_stack()) {
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
member_reg = r->as_Register();
}
}
if (has_receiver) {
assert(method->size_of_parameters() > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) {
fatal("receiver always in a register");
receiver_reg = rcx; // known to be free at this point
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
receiver_reg = r->as_Register();
}
}
MethodHandles::generate_method_handle_dispatch(masm, iid,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
methodHandle method,
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,
BasicType ret_type) {
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
int vep_offset = ((intptr_t)__ pc()) - start;
gen_special_dispatch(masm,
method,
in_sig_bt,
in_regs);
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
__ flush();
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
return nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
}
bool is_critical_native = true;
address native_func = method->critical_native_function();
if (native_func == NULL) {
native_func = method->native_function();
is_critical_native = false;
}
assert(native_func != NULL, "must have function");
OopMapSet *oop_maps = new OopMapSet();
const int total_in_args = method->size_of_parameters();
int total_c_args = total_in_args;
if (!is_critical_native) {
total_c_args += 1;
if (method->is_static()) {
total_c_args++;
}
} else {
for (int i = 0; i < total_in_args; i++) {
if (in_sig_bt[i] == T_ARRAY) {
total_c_args++;
}
}
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
int argc = 0;
if (!is_critical_native) {
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
}
} else {
Thread* THREAD = Thread::current();
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
SignatureStream ss(method->signature());
for (int i = 0; i < total_in_args ; i++ ) {
if (in_sig_bt[i] == T_ARRAY) {
out_sig_bt[argc++] = T_INT;
out_sig_bt[argc++] = T_ADDRESS;
Symbol* atype = ss.as_symbol(CHECK_NULL);
const char* at = atype->as_C_string();
if (strlen(at) == 2) {
assert(at[0] == '[', "must be");
switch (at[1]) {
case 'B': in_elem_bt[i] = T_BYTE; break;
case 'C': in_elem_bt[i] = T_CHAR; break;
case 'D': in_elem_bt[i] = T_DOUBLE; break;
case 'F': in_elem_bt[i] = T_FLOAT; break;
case 'I': in_elem_bt[i] = T_INT; break;
case 'J': in_elem_bt[i] = T_LONG; break;
case 'S': in_elem_bt[i] = T_SHORT; break;
case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
default: ShouldNotReachHere();
}
}
} else {
out_sig_bt[argc++] = in_sig_bt[i];
in_elem_bt[i] = T_VOID;
}
if (in_sig_bt[i] != T_VOID) {
assert(in_sig_bt[i] == ss.type(), "must match");
ss.next();
}
}
}
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
if (is_critical_native) {
int double_slots = 0;
int single_slots = 0;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY: // critical array (uses 2 slots on LP64)
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT: single_slots++; break;
case T_LONG: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
switch (in_sig_bt[i]) {
case T_FLOAT: single_slots++; break;
case T_DOUBLE: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
}
}
total_save_slots = double_slots * 2 + single_slots;
if (double_slots != 0) {
stack_slots = round_to(stack_slots, 2);
}
}
int oop_handle_offset = stack_slots;
stack_slots += total_save_slots;
int klass_slot_offset = 0;
int klass_offset = -1;
int lock_slot_offset = 0;
bool is_static = false;
if (method->is_static()) {
klass_slot_offset = stack_slots;
stack_slots += VMRegImpl::slots_per_word;
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
is_static = true;
}
if (method->is_synchronized()) {
lock_slot_offset = stack_slots;
stack_slots += VMRegImpl::slots_per_word;
}
stack_slots += 4;
stack_slots = round_to(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
intptr_t start = (intptr_t)__ pc();
const Register ic_reg = rax;
const Register receiver = rcx;
Label hit;
Label exception_pending;
__ verify_oop(receiver);
__ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ align(8);
__ bind(hit);
int vep_offset = ((intptr_t)__ pc()) - start;
#ifdef COMPILER1
if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
Label slowCase;
Register receiver = rcx;
Register result = rax;
__ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
__ testptr(result, markOopDesc::unlocked_value);
__ jcc (Assembler::zero, slowCase);
if (UseBiasedLocking) {
__ testptr(result, markOopDesc::biased_lock_bit_in_place);
__ jcc (Assembler::notZero, slowCase);
}
__ andptr(result, markOopDesc::hash_mask_in_place);
__ jcc (Assembler::zero, slowCase);
__ shrptr(result, markOopDesc::hash_shift);
__ ret(0);
__ bind (slowCase);
}
#endif // COMPILER1
if (UseStackBanging) {
__ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
} else {
__ fat_nop();
}
__ enter();
__ subptr(rsp, stack_size - 2*wordSize);
int frame_complete = ((intptr_t)__ pc()) - start;
if (UseRTMLocking) {
__ xabort(0);
}
int fp_adjustment = stack_size - 2*wordSize;
#ifdef COMPILER2
if (UseSSE >= 2) {
__ verify_FPU(0, "c2i transition should have clean FPU stack");
} else {
__ empty_FPU_stack();
}
#endif /* COMPILER2 */
int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
const Register thread = rdi;
const Register oop_handle_reg = rsi;
__ get_thread(thread);
if (is_critical_native) {
check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
int receiver_offset = -1;
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
c_arg++;
break;
}
case T_OBJECT:
assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
break;
case T_VOID:
break;
case T_FLOAT:
float_move(masm, in_regs[i], out_regs[c_arg]);
break;
case T_DOUBLE:
assert( i + 1 < total_in_args &&
in_sig_bt[i + 1] == T_VOID &&
out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
double_move(masm, in_regs[i], out_regs[c_arg]);
break;
case T_LONG :
long_move(masm, in_regs[i], out_regs[c_arg]);
break;
case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
default:
simple_move32(masm, in_regs[i], out_regs[c_arg]);
}
}
if (method->is_static() && !is_critical_native) {
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
__ lea(oop_handle_reg, Address(rsp, klass_offset));
__ movptr(Address(rsp, wordSize), oop_handle_reg);
}
intptr_t the_pc = (intptr_t) __ pc();
oop_maps->add_gc_map(the_pc - start, map);
__ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
{
SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
__ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
thread, rax);
}
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
__ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
thread, rax);
}
const Register swap_reg = rax; // Must use rax, for cmpxchg instruction
const Register obj_reg = rcx; // Will contain the oop
const Register lock_reg = rdx; // Address of compiler lock object (BasicLock)
Label slow_path_lock;
Label lock_done;
if (method->is_synchronized()) {
assert(!is_critical_native, "unhandled");
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
__ movptr(oop_handle_reg, Address(rsp, wordSize));
__ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (UseBiasedLocking) {
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
}
__ movptr(swap_reg, 1);
__ orptr(swap_reg, Address(obj_reg, 0));
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
if (os::is_MP()) {
__ lock();
}
__ cmpxchgptr(lock_reg, Address(obj_reg, 0));
__ jcc(Assembler::equal, lock_done);
__ subptr(swap_reg, rsp);
__ andptr(swap_reg, 3 - os::vm_page_size());
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
__ jcc(Assembler::notEqual, slow_path_lock);
__ bind(lock_done);
if (UseBiasedLocking) {
__ movptr(oop_handle_reg, Address(rsp, wordSize));
}
}
if (!is_critical_native) {
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
__ movptr(Address(rsp, 0), rdx);
}
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
__ call(RuntimeAddress(native_func));
__ restore_cpu_control_state_after_jni();
switch (ret_type) {
case T_BOOLEAN: __ c2bool(rax); break;
case T_CHAR : __ andptr(rax, 0xFFFF); break;
case T_BYTE : __ sign_extend_byte (rax); break;
case T_SHORT : __ sign_extend_short(rax); break;
case T_INT : /* nothing to do */ break;
case T_DOUBLE :
case T_FLOAT :
break;
case T_ARRAY: // Really a handle
case T_OBJECT: // Really a handle
break; // can't de-handlize until after safepoint check
case T_VOID: break;
case T_LONG: break;
default : ShouldNotReachHere();
}
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
if(os::is_MP()) {
if (UseMembar) {
__ membar(Assembler::Membar_mask_bits(
Assembler::LoadLoad | Assembler::LoadStore |
Assembler::StoreLoad | Assembler::StoreStore));
} else {
__ serialize_memory(thread, rcx);
}
}
if (AlwaysRestoreFPU) {
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
Label after_transition;
{ Label Continue;
__ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
Label L;
__ jcc(Assembler::notEqual, L);
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue);
__ bind(L);
save_native_result(masm, ret_type, stack_slots);
__ push(thread);
if (!is_critical_native) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans)));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans_and_transition)));
}
__ increment(rsp, wordSize);
restore_native_result(masm, ret_type, stack_slots);
if (is_critical_native) {
__ jmpb(after_transition);
}
__ bind(Continue);
}
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ bind(after_transition);
Label reguard;
Label reguard_done;
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::equal, reguard);
__ bind(reguard_done);
Label slow_path_unlock;
Label unlock_done;
if (method->is_synchronized()) {
Label done;
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (UseBiasedLocking) {
__ biased_locking_exit(obj_reg, rbx, done);
}
__ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
save_native_result(masm, ret_type, stack_slots);
}
__ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
if (os::is_MP()) {
__ lock();
}
__ cmpxchgptr(rbx, Address(obj_reg, 0));
__ jcc(Assembler::notEqual, slow_path_unlock);
__ bind(unlock_done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
restore_native_result(masm, ret_type, stack_slots);
}
__ bind(done);
}
{
SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
save_native_result(masm, ret_type, stack_slots);
__ mov_metadata(rax, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
thread, rax);
restore_native_result(masm, ret_type, stack_slots);
}
__ reset_last_Java_frame(thread, false);
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
__ resolve_jobject(rax /* value */,
thread /* thread */,
rcx /* tmp */);
}
if (!is_critical_native) {
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
__ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
}
__ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
if (ret_type == T_FLOAT) {
if (UseSSE >= 1) {
__ fstp_s(Address(rbp, -4));
__ movflt(xmm0, Address(rbp, -4));
}
} else if (ret_type == T_DOUBLE) {
if (UseSSE >= 2) {
__ fstp_d(Address(rbp, -8));
__ movdbl(xmm0, Address(rbp, -8));
}
}
__ leave();
__ ret(0);
if (method->is_synchronized()) {
__ bind(slow_path_lock);
__ push(thread);
__ push(lock_reg);
__ push(obj_reg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
__ addptr(rsp, 3*wordSize);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit from monitorenter");
__ bind(L);
}
#endif
__ jmp(lock_done);
__ bind(slow_path_unlock);
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
save_native_result(masm, ret_type, stack_slots);
}
__ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
__ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
__ lea(rax, Address(rbp, lock_slot_rbp_offset));
__ push(rax);
__ push(obj_reg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
__ addptr(rsp, 2*wordSize);
#ifdef ASSERT
{
Label L;
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
__ bind(L);
}
#endif /* ASSERT */
__ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
restore_native_result(masm, ret_type, stack_slots);
}
__ jmp(unlock_done);
}
__ bind(reguard);
save_native_result(masm, ret_type, stack_slots);
{
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
}
restore_native_result(masm, ret_type, stack_slots);
__ jmp(reguard_done);
if (!is_critical_native) {
__ bind(exception_pending);
__ empty_FPU_stack();
__ leave();
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
}
__ flush();
nmethod *nm = nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
if (is_critical_native) {
nm->set_lazy_critical_native(true);
}
return nm;
}
#ifdef HAVE_DTRACE_H
nmethod *SharedRuntime::generate_dtrace_nmethod(
MacroAssembler *masm, methodHandle method) {
assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
int total_args_passed = method->size_of_parameters();
BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
int i=0;
int total_strings = 0;
int first_arg_to_pass = 0;
int total_c_args = 0;
if( !method->is_static() ) { // Pass in receiver first
in_sig_bt[i++] = T_OBJECT;
first_arg_to_pass = 1;
}
SignatureStream ss(method->signature());
for ( ; !ss.at_return_type(); ss.next()) {
BasicType bt = ss.type();
in_sig_bt[i++] = bt; // Collect remaining bits of signature
out_sig_bt[total_c_args++] = bt;
if( bt == T_OBJECT) {
Symbol* s = ss.as_symbol_or_null(); // symbol is created
if (s == vmSymbols::java_lang_String()) {
total_strings++;
out_sig_bt[total_c_args-1] = T_ADDRESS;
} else if (s == vmSymbols::java_lang_Boolean() ||
s == vmSymbols::java_lang_Character() ||
s == vmSymbols::java_lang_Byte() ||
s == vmSymbols::java_lang_Short() ||
s == vmSymbols::java_lang_Integer() ||
s == vmSymbols::java_lang_Float()) {
out_sig_bt[total_c_args-1] = T_INT;
} else if (s == vmSymbols::java_lang_Long() ||
s == vmSymbols::java_lang_Double()) {
out_sig_bt[total_c_args-1] = T_LONG;
out_sig_bt[total_c_args++] = T_VOID;
}
} else if ( bt == T_LONG || bt == T_DOUBLE ) {
in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
out_sig_bt[total_c_args++] = T_VOID;
}
}
assert(i==total_args_passed, "validly parsed signature");
int comp_args_on_stack;
comp_args_on_stack = SharedRuntime::java_calling_convention(
in_sig_bt, in_regs, total_args_passed, false);
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1);
for (i = 0; i < total_strings ; i++) {
string_locs[i] = stack_slots;
stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
}
stack_slots += 2;
stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
intptr_t start = (intptr_t)__ pc();
const Register ic_reg = rax;
const Register receiver = rcx;
Label hit;
Label exception_pending;
__ verify_oop(receiver);
__ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ align(8);
__ bind(hit);
int vep_offset = ((intptr_t)__ pc()) - start;
if (UseStackBanging) {
if (stack_size <= StackShadowPages*os::vm_page_size()) {
__ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
} else {
__ movl(rax, stack_size);
__ bang_stack_size(rax, rbx);
}
} else {
__ fat_nop();
}
assert(((int)__ pc() - start - vep_offset) >= 5,
"valid size for make_non_entrant");
__ enter();
if (stack_size - 2*wordSize != 0) {
__ subl(rsp, stack_size - 2*wordSize);
}
int frame_complete = ((intptr_t)__ pc()) - start;
int sid = 0;
int c_arg, j_arg;
int string_reg = 0;
for (j_arg = first_arg_to_pass, c_arg = 0 ;
j_arg < total_args_passed ; j_arg++, c_arg++ ) {
VMRegPair src = in_regs[j_arg];
VMRegPair dst = out_regs[c_arg];
assert(dst.first()->is_stack() || in_sig_bt[j_arg] == T_VOID,
"stack based abi assumed");
switch (in_sig_bt[j_arg]) {
case T_ARRAY:
case T_OBJECT:
if (out_sig_bt[c_arg] == T_ADDRESS) {
if (src.first()->is_reg()) {
if (string_reg++ != 0) {
simple_move32(masm, src, dst);
}
}
} else if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
Register in_reg = rax;
if ( src.first()->is_reg() ) {
in_reg = src.first()->as_Register();
} else {
simple_move32(masm, src, in_reg->as_VMReg());
}
Label skipUnbox;
__ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
if ( out_sig_bt[c_arg] == T_LONG ) {
__ movl(Address(rsp, reg2offset_out(dst.second())), NULL_WORD);
}
__ testl(in_reg, in_reg);
__ jcc(Assembler::zero, skipUnbox);
assert(dst.first()->is_stack() &&
(!dst.second()->is_valid() || dst.second()->is_stack()),
"value(s) must go into stack slots");
BasicType bt = out_sig_bt[c_arg];
int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
if ( bt == T_LONG ) {
__ movl(rbx, Address(in_reg,
box_offset + VMRegImpl::stack_slot_size));
__ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
}
__ movl(in_reg, Address(in_reg, box_offset));
__ movl(Address(rsp, reg2offset_out(dst.first())), in_reg);
__ bind(skipUnbox);
} else {
__ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
}
if (out_sig_bt[c_arg] == T_LONG) {
assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
++c_arg; // Move over the T_VOID To keep the loop indices in sync
}
break;
case T_VOID:
break;
case T_FLOAT:
float_move(masm, src, dst);
break;
case T_DOUBLE:
assert( j_arg + 1 < total_args_passed &&
in_sig_bt[j_arg + 1] == T_VOID, "bad arg list");
double_move(masm, src, dst);
break;
case T_LONG :
long_move(masm, src, dst);
break;
case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
default:
simple_move32(masm, src, dst);
}
}
for (sid = 0, j_arg = first_arg_to_pass, c_arg = 0 ;
sid < total_strings ; j_arg++, c_arg++ ) {
if (out_sig_bt[c_arg] == T_ADDRESS) {
Address utf8_addr = Address(
rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
__ leal(rax, utf8_addr);
VMReg orig_loc = in_regs[j_arg].first();
Register string_oop;
Address dest = Address(rsp, reg2offset_out(out_regs[c_arg].first()));
if (sid == 1 && orig_loc->is_reg()) {
string_oop = orig_loc->as_Register();
assert(string_oop != rax, "smashed arg");
} else {
if (orig_loc->is_reg()) {
__ movl(rcx, dest);
} else {
__ movl(rcx, Address(rbp, reg2offset_in(orig_loc)));
}
string_oop = rcx;
}
Label nullString;
__ movl(dest, NULL_WORD);
__ testl(string_oop, string_oop);
__ jcc(Assembler::zero, nullString);
__ movl(dest, rax);
__ call_VM_leaf(CAST_FROM_FN_PTR(
address, SharedRuntime::get_utf), string_oop, rax);
__ bind(nullString);
}
if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
++c_arg; // Move over the T_VOID To keep the loop indices in sync
}
}
int patch_offset = ((intptr_t)__ pc()) - start;
__ nop();
__ leave();
__ ret(0);
__ flush();
nmethod *nm = nmethod::new_dtrace_nmethod(
method, masm->code(), vep_offset, patch_offset, frame_complete,
stack_slots / VMRegImpl::slots_per_word);
return nm;
}
#endif // HAVE_DTRACE_H
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
}
uint SharedRuntime::out_preserve_stack_slots() {
return 0;
}
void SharedRuntime::generate_deopt_blob() {
ResourceMark rm;
CodeBuffer buffer("deopt_blob", 1024, 1024);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words;
OopMap* map = NULL;
const int additional_words = 2; // deopt kind, thread
OopMapSet *oop_maps = new OopMapSet();
address start = __ pc();
Label cont;
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
__ push(Deoptimization::Unpack_deopt);
__ jmp(cont);
int reexecute_offset = __ pc() - start;
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
__ push(Deoptimization::Unpack_reexecute);
__ jmp(cont);
int exception_offset = __ pc() - start;
__ get_thread(rdi);
__ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
__ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
int exception_in_tls_offset = __ pc() - start;
__ push(0);
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
__ push(Deoptimization::Unpack_exception);
__ get_thread(rdi);
__ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
__ movptr(Address(rbp, wordSize), rdx);
__ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
#ifdef ASSERT
__ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
__ verify_oop(rax);
Label no_pending_exception;
__ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
__ testptr(rax, rax);
__ jcc(Assembler::zero, no_pending_exception);
__ stop("must not have pending exception here");
__ bind(no_pending_exception);
#endif
__ bind(cont);
__ empty_FPU_stack();
__ get_thread(rcx);
__ push(rcx);
__ set_last_Java_frame(rcx, noreg, noreg, NULL);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
oop_maps->add_gc_map( __ pc()-start, map);
__ pop(rcx);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false);
__ mov(rdi, rax);
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
__ pop(rax);
__ movl(unpack_kind, rax); // save the unpack_kind value
Label noException;
__ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException);
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
__ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
__ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
__ verify_oop(rax);
__ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
__ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
__ bind(noException);
RegisterSaver::restore_result_registers(masm);
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
__ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
#ifdef ASSERT
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
__ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ pop(rsi); // trash the old pc
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(counter, rbx);
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
__ movptr(sp_temp, rsp);
__ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ subptr(rsp, rbx);
Label loop;
__ bind(loop);
__ movptr(rbx, Address(rsi, 0)); // Load frame size
#ifdef CC_INTERP
__ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
#ifdef ASSERT
__ push(0xDEADDEAD); // Make a recognizable pattern
__ push(0xDEADDEAD);
#else /* ASSERT */
__ subptr(rsp, 2*wordSize); // skip the "static long no_param"
#endif /* ASSERT */
#else /* CC_INTERP */
__ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
#endif /* CC_INTERP */
__ pushptr(Address(rcx, 0)); // save return address
__ enter(); // save old & set new rbp,
__ subptr(rsp, rbx); // Prolog!
__ movptr(rbx, sp_temp); // sender's sp
#ifdef CC_INTERP
__ movptr(Address(rbp,
-(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
rbx); // Make it walkable
#else /* CC_INTERP */
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
#endif /* CC_INTERP */
__ movptr(sp_temp, rsp); // pass to next frame
__ addptr(rsi, wordSize); // Bump array pointer (sizes)
__ addptr(rcx, wordSize); // Bump array pointer (pcs)
__ decrementl(counter); // decrement counter
__ jcc(Assembler::notZero, loop);
__ pushptr(Address(rcx, 0)); // save final return address
__ enter(); // save old & set new rbp,
__ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
__ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
__ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
__ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
__ pushl(unpack_kind); // get the unpack_kind value
__ get_thread(rcx);
__ push(rcx);
__ set_last_Java_frame(rcx, noreg, rbp, NULL);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
__ push(rax);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false);
__ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
__ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
__ empty_FPU_stack();
Label results_done, yes_double_value;
__ cmpl(Address(rsp, 0), T_DOUBLE);
__ jcc (Assembler::zero, yes_double_value);
__ cmpl(Address(rsp, 0), T_FLOAT);
__ jcc (Assembler::notZero, results_done);
if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
__ jmp(results_done);
__ bind(yes_double_value);
if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
else __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
__ bind(results_done);
__ leave(); // Epilog!
__ ret(0);
masm->flush();
_deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
}
#ifdef COMPILER2
void SharedRuntime::generate_uncommon_trap_blob() {
ResourceMark rm;
CodeBuffer buffer("uncommon_trap_blob", 512, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
enum frame_layout {
arg0_off, // thread sp + 0 // Arg location for
arg1_off, // unloaded_class_index sp + 1 // calling C
rbp_off, // callee saved register sp + 2
return_off, // slot for return address sp + 3
framesize
};
address start = __ pc();
if (UseRTMLocking) {
__ xabort(0);
}
__ subptr(rsp, return_off*wordSize); // Epilog!
__ movptr(Address(rsp, rbp_off*wordSize), rbp);
__ empty_FPU_stack();
__ get_thread(rdx);
__ set_last_Java_frame(rdx, noreg, noreg, NULL);
__ movptr(Address(rsp, arg0_off*wordSize), rdx);
__ movl(Address(rsp, arg1_off*wordSize),rcx);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap( framesize, 0 );
oop_maps->add_gc_map( __ pc()-start, map);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false);
__ movptr(rdi, rax);
__ addptr(rsp,(framesize-1)*wordSize); // Epilog!
__ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
__ addptr(rsp, rcx);
__ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
#ifdef ASSERT
if (UseStackBanging) {
__ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(rbx, rcx);
}
#endif
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
__ pop(rsi); // trash the pc
__ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
__ movl(counter, rbx);
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
__ movptr(sp_temp, rsp);
__ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
__ subptr(rsp, rbx);
Label loop;
__ bind(loop);
__ movptr(rbx, Address(rsi, 0)); // Load frame size
#ifdef CC_INTERP
__ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
#ifdef ASSERT
__ push(0xDEADDEAD); // Make a recognizable pattern
__ push(0xDEADDEAD); // (parm to RecursiveInterpreter...)
#else /* ASSERT */
__ subptr(rsp, 2*wordSize); // skip the "static long no_param"
#endif /* ASSERT */
#else /* CC_INTERP */
__ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
#endif /* CC_INTERP */
__ pushptr(Address(rcx, 0)); // save return address
__ enter(); // save old & set new rbp,
__ subptr(rsp, rbx); // Prolog!
__ movptr(rbx, sp_temp); // sender's sp
#ifdef CC_INTERP
__ movptr(Address(rbp,
-(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
rbx); // Make it walkable
#else /* CC_INTERP */
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
__ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
#endif /* CC_INTERP */
__ movptr(sp_temp, rsp); // pass to next frame
__ addptr(rsi, wordSize); // Bump array pointer (sizes)
__ addptr(rcx, wordSize); // Bump array pointer (pcs)
__ decrementl(counter); // decrement counter
__ jcc(Assembler::notZero, loop);
__ pushptr(Address(rcx, 0)); // save final return address
__ enter(); // save old & set new rbp,
__ subptr(rsp, (framesize-2) * wordSize); // Prolog!
__ get_thread(rdi);
__ set_last_Java_frame(rdi, noreg, rbp, NULL);
__ movptr(Address(rsp,arg0_off*wordSize),rdi);
__ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
__ get_thread(rdi);
__ reset_last_Java_frame(rdi, true);
__ leave(); // Epilog!
__ ret(0);
masm->flush();
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
}
#endif // COMPILER2
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
const int additional_words = 1;
int frame_size_in_words;
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
ResourceMark rm;
OopMapSet *oop_maps = new OopMapSet();
OopMap* map;
CodeBuffer buffer("handler_blob", 1024, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
const Register java_thread = rdi; // callee-saved for VC++
address start = __ pc();
address call_pc = NULL;
bool cause_return = (poll_type == POLL_AT_RETURN);
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
if (UseRTMLocking) {
__ xabort(0);
}
if (!cause_return)
__ push(rbx); // Make room for return address (or push it again)
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false, save_vectors);
__ get_thread(java_thread);
__ push(java_thread);
__ set_last_Java_frame(java_thread, noreg, noreg, NULL);
if (!cause_return) {
__ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
__ movptr(Address(rbp, wordSize), rax);
}
__ call(RuntimeAddress(call_ptr));
oop_maps->add_gc_map( __ pc() - start, map);
__ pop(rcx);
Label noException;
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, false);
__ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, noException);
RegisterSaver::restore_live_registers(masm, save_vectors);
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
__ bind(noException);
RegisterSaver::restore_live_registers(masm, save_vectors);
__ ret(0);
masm->flush();
return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
}
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
ResourceMark rm;
CodeBuffer buffer(name, 1000, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_words;
enum frame_layout {
thread_off,
extra_words };
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = NULL;
int start = __ offset();
map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
int frame_complete = __ offset();
const Register thread = rdi;
__ get_thread(rdi);
__ push(thread);
__ set_last_Java_frame(thread, noreg, rbp, NULL);
__ call(RuntimeAddress(destination));
oop_maps->add_gc_map( __ offset() - start, map);
__ addptr(rsp, wordSize);
__ reset_last_Java_frame(thread, true);
Label pending;
__ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending);
__ get_vm_result_2(rbx, thread);
__ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
__ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
RegisterSaver::restore_live_registers(masm);
__ jmp(rax);
__ bind(pending);
RegisterSaver::restore_live_registers(masm);
__ get_thread(thread);
__ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
__ movptr(rax, Address(thread, Thread::pending_exception_offset()));
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
masm->flush();
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/sharedRuntime_x86_64.cpp
#include "precompiled.hpp"
#ifndef _WINDOWS
#include "alloca.h"
#endif
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/compiledICHolder.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#define __ masm->
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
class SimpleRuntimeFrame {
public:
enum layout {
rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
rbp_off2,
return_off, return_off2,
framesize
};
};
class RegisterSaver {
#define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
enum layout {
fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
DEF_XMM_OFFS(0),
DEF_XMM_OFFS(1),
DEF_XMM_OFFS(2),
DEF_XMM_OFFS(3),
DEF_XMM_OFFS(4),
DEF_XMM_OFFS(5),
DEF_XMM_OFFS(6),
DEF_XMM_OFFS(7),
DEF_XMM_OFFS(8),
DEF_XMM_OFFS(9),
DEF_XMM_OFFS(10),
DEF_XMM_OFFS(11),
DEF_XMM_OFFS(12),
DEF_XMM_OFFS(13),
DEF_XMM_OFFS(14),
DEF_XMM_OFFS(15),
fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
fpu_stateH_end,
r15_off, r15H_off,
r14_off, r14H_off,
r13_off, r13H_off,
r12_off, r12H_off,
r11_off, r11H_off,
r10_off, r10H_off,
r9_off, r9H_off,
r8_off, r8H_off,
rdi_off, rdiH_off,
rsi_off, rsiH_off,
ignore_off, ignoreH_off, // extra copy of rbp
rsp_off, rspH_off,
rbx_off, rbxH_off,
rdx_off, rdxH_off,
rcx_off, rcxH_off,
rax_off, raxH_off,
align_off, alignH_off,
flags_off, flagsH_off,
rbp_off, rbpH_off, // copy of rbp we will restore
return_off, returnH_off, // slot for return address
reg_save_size // size in compiler stack slots
};
public:
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
static void restore_result_registers(MacroAssembler* masm);
};
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
int vect_words = 0;
#ifdef COMPILER2
if (save_vectors) {
assert(UseAVX > 0, "256bit vectors are supported only with AVX");
assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
vect_words = 16 * 16 / wordSize;
additional_frame_words += vect_words;
}
#else
assert(!save_vectors, "vectors are generated only by C2");
#endif
int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
reg_save_size*BytesPerInt, 16);
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
int frame_size_in_words = frame_size_in_bytes / wordSize;
__ enter(); // rsp becomes 16-byte aligned here
__ push_CPU_state(); // Push a multiple of 16 bytes
if (vect_words > 0) {
assert(vect_words*wordSize == 256, "");
__ subptr(rsp, 256); // Save upper half of YMM registes
__ vextractf128h(Address(rsp, 0),xmm0);
__ vextractf128h(Address(rsp, 16),xmm1);
__ vextractf128h(Address(rsp, 32),xmm2);
__ vextractf128h(Address(rsp, 48),xmm3);
__ vextractf128h(Address(rsp, 64),xmm4);
__ vextractf128h(Address(rsp, 80),xmm5);
__ vextractf128h(Address(rsp, 96),xmm6);
__ vextractf128h(Address(rsp,112),xmm7);
__ vextractf128h(Address(rsp,128),xmm8);
__ vextractf128h(Address(rsp,144),xmm9);
__ vextractf128h(Address(rsp,160),xmm10);
__ vextractf128h(Address(rsp,176),xmm11);
__ vextractf128h(Address(rsp,192),xmm12);
__ vextractf128h(Address(rsp,208),xmm13);
__ vextractf128h(Address(rsp,224),xmm14);
__ vextractf128h(Address(rsp,240),xmm15);
}
if (frame::arg_reg_save_area_bytes != 0) {
__ subptr(rsp, frame::arg_reg_save_area_bytes);
}
OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap(frame_size_in_slots, 0);
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14->as_VMReg());
map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg());
if (true) {
map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next());
map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next());
}
return map;
}
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
if (frame::arg_reg_save_area_bytes != 0) {
__ addptr(rsp, frame::arg_reg_save_area_bytes);
}
#ifdef COMPILER2
if (restore_vectors) {
assert(UseAVX > 0, "256bit vectors are supported only with AVX");
assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
__ vinsertf128h(xmm0, Address(rsp, 0));
__ vinsertf128h(xmm1, Address(rsp, 16));
__ vinsertf128h(xmm2, Address(rsp, 32));
__ vinsertf128h(xmm3, Address(rsp, 48));
__ vinsertf128h(xmm4, Address(rsp, 64));
__ vinsertf128h(xmm5, Address(rsp, 80));
__ vinsertf128h(xmm6, Address(rsp, 96));
__ vinsertf128h(xmm7, Address(rsp,112));
__ vinsertf128h(xmm8, Address(rsp,128));
__ vinsertf128h(xmm9, Address(rsp,144));
__ vinsertf128h(xmm10, Address(rsp,160));
__ vinsertf128h(xmm11, Address(rsp,176));
__ vinsertf128h(xmm12, Address(rsp,192));
__ vinsertf128h(xmm13, Address(rsp,208));
__ vinsertf128h(xmm14, Address(rsp,224));
__ vinsertf128h(xmm15, Address(rsp,240));
__ addptr(rsp, 256);
}
#else
assert(!restore_vectors, "vectors are generated only by C2");
#endif
__ pop_CPU_state();
__ pop(rbp);
}
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
__ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
__ movptr(rax, Address(rsp, rax_offset_in_bytes()));
__ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
__ addptr(rsp, return_offset_in_bytes());
}
bool SharedRuntime::is_wide_vector(int size) {
return size > 16;
}
static int reg2offset_in(VMReg r) {
return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
}
static int reg2offset_out(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed,
int is_outgoing) {
static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
};
static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
j_farg0, j_farg1, j_farg2, j_farg3,
j_farg4, j_farg5, j_farg6, j_farg7
};
uint int_args = 0;
uint fp_args = 0;
uint stk_args = 0; // inc by 2 each time
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_VOID:
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
regs[i].set_bad();
break;
case T_LONG:
assert(sig_bt[i + 1] == T_VOID, "expecting half");
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS:
if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_FLOAT:
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_DOUBLE:
assert(sig_bt[i + 1] == T_VOID, "expecting half");
if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
default:
ShouldNotReachHere();
break;
}
}
return round_to(stk_args, 2);
}
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ mov(r13, rsp);
__ movptr(rax, Address(rsp, 0));
__ andptr(rsp, -(StackAlignmentInBytes));
__ push_CPU_state();
if (frame::arg_reg_save_area_bytes != 0) {
__ subptr(rsp, frame::arg_reg_save_area_bytes);
}
__ mov(c_rarg0, rbx);
__ mov(c_rarg1, rax);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
if (frame::arg_reg_save_area_bytes != 0) {
__ addptr(rsp, frame::arg_reg_save_area_bytes);
}
__ pop_CPU_state();
__ mov(rsp, r13);
__ bind(L);
}
static void gen_c2i_adapter(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs,
Label& skip_fixup) {
patch_callers_callsite(masm);
__ bind(skip_fixup);
int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
extraspace = round_to(extraspace, 2*wordSize);
__ pop(rax);
__ mov(r13, rsp);
__ subptr(rsp, extraspace);
__ movptr(Address(rsp, 0), rax);
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
continue;
}
int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
int next_off = st_off - Interpreter::stackElementSize;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (!r_1->is_valid()) {
assert(!r_2->is_valid(), "");
continue;
}
if (r_1->is_stack()) {
int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
if (!r_2->is_valid()) {
__ movl(rax, Address(rsp, ld_off));
__ movptr(Address(rsp, st_off), rax);
} else {
__ movq(rax, Address(rsp, ld_off));
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
__ movq(Address(rsp, next_off), rax);
#ifdef ASSERT
__ mov64(rax, CONST64(0xdeadffffdeadaaaa));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
} else {
__ movq(Address(rsp, st_off), rax);
}
}
} else if (r_1->is_Register()) {
Register r = r_1->as_Register();
if (!r_2->is_valid()) {
__ movl(Address(rsp, st_off), r);
} else {
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
#ifdef ASSERT
__ mov64(rax, CONST64(0xdeadffffdeadaaab));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movq(Address(rsp, next_off), r);
} else {
__ movptr(Address(rsp, st_off), r);
}
}
} else {
assert(r_1->is_XMMRegister(), "");
if (!r_2->is_valid()) {
__ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
} else {
#ifdef ASSERT
__ mov64(rax, CONST64(0xdeadffffdeadaaac));
__ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
}
}
}
__ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
__ jmp(rcx);
}
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
address code_start, address code_end,
Label& L_ok) {
Label L_fail;
__ lea(temp_reg, ExternalAddress(code_start));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::belowEqual, L_fail);
__ lea(temp_reg, ExternalAddress(code_end));
__ cmpptr(pc_reg, temp_reg);
__ jcc(Assembler::below, L_ok);
__ bind(L_fail);
}
static void gen_i2c_adapter(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs) {
__ movptr(rax, Address(rsp, 0));
if (VerifyAdapterCalls &&
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
__ block_comment("verify_i2c { ");
Label L_ok;
if (Interpreter::code() != NULL)
range_check(masm, rax, r11,
Interpreter::code()->code_start(), Interpreter::code()->code_end(),
L_ok);
if (StubRoutines::code1() != NULL)
range_check(masm, rax, r11,
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
L_ok);
if (StubRoutines::code2() != NULL)
range_check(masm, rax, r11,
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
L_ok);
const char* msg = "i2c adapter must return to an interpreter frame";
__ block_comment(msg);
__ stop(msg);
__ bind(L_ok);
__ block_comment("} verify_i2ce ");
}
__ movptr(r11, rsp);
int comp_words_on_stack = 0;
if (comp_args_on_stack) {
comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
comp_words_on_stack = round_to(comp_words_on_stack, 2);
__ subptr(rsp, comp_words_on_stack * wordSize);
}
__ andptr(rsp, -16);
__ push(rax);
const Register saved_sp = rax;
__ movptr(saved_sp, r11);
__ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
for (int i = 0; i < total_args_passed; i++) {
if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
continue;
}
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
int next_off = ld_off - Interpreter::stackElementSize;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second();
if (!r_1->is_valid()) {
assert(!r_2->is_valid(), "");
continue;
}
if (r_1->is_stack()) {
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
if (!r_2->is_valid()) {
__ movl(r13, Address(saved_sp, ld_off));
__ movptr(Address(rsp, st_off), r13);
} else {
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
__ movq(r13, Address(saved_sp, offset));
__ movq(Address(rsp, st_off), r13);
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
assert(r != rax, "must be different");
if (r_2->is_valid()) {
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
__ movq(r, Address(saved_sp, offset));
} else {
__ movl(r, Address(saved_sp, ld_off));
}
} else {
if (!r_2->is_valid()) {
__ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
} else {
__ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
}
}
}
__ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
__ mov(rax, rbx);
__ jmp(r11);
}
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
int total_args_passed,
int comp_args_on_stack,
const BasicType *sig_bt,
const VMRegPair *regs,
AdapterFingerPrint* fingerprint) {
address i2c_entry = __ pc();
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Label ok;
Register holder = rax;
Register receiver = j_rarg0;
Register temp = rbx;
{
__ load_klass(temp, receiver);
__ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ jcc(Assembler::equal, ok);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
address c2i_entry = __ pc();
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
assert(regs2 == NULL, "not needed on x86");
#ifdef _WIN64
static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
c_rarg0, c_rarg1, c_rarg2, c_rarg3
};
static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
c_farg0, c_farg1, c_farg2, c_farg3
};
#else
static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
};
static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
c_farg0, c_farg1, c_farg2, c_farg3,
c_farg4, c_farg5, c_farg6, c_farg7
};
#endif // _WIN64
uint int_args = 0;
uint fp_args = 0;
uint stk_args = 0; // inc by 2 each time
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (int_args < Argument::n_int_register_parameters_c) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
#ifdef _WIN64
fp_args++;
stk_args += 2;
#endif
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_LONG:
assert(sig_bt[i + 1] == T_VOID, "expecting half");
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS:
case T_METADATA:
if (int_args < Argument::n_int_register_parameters_c) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
#ifdef _WIN64
fp_args++;
stk_args += 2;
#endif
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_FLOAT:
if (fp_args < Argument::n_float_register_parameters_c) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
#ifdef _WIN64
int_args++;
stk_args += 2;
#endif
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_DOUBLE:
assert(sig_bt[i + 1] == T_VOID, "expecting half");
if (fp_args < Argument::n_float_register_parameters_c) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
#ifdef _WIN64
int_args++;
stk_args += 2;
#endif
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
break;
case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
regs[i].set_bad();
break;
default:
ShouldNotReachHere();
break;
}
}
#ifdef _WIN64
if (stk_args < 8) {
stk_args = 8;
}
#endif // _WIN64
return stk_args;
}
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ movslq(rax, Address(rbp, reg2offset_in(src.first())));
__ movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
__ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
__ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
if (dst.first() != src.first()) {
__ movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ movq(rax, Address(rbp, reg2offset_in(src.first())));
__ movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
__ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
__ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
if (dst.first() != src.first()) {
__ movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
static void object_move(MacroAssembler* masm,
OopMap* map,
int oop_handle_offset,
int framesize_in_slots,
VMRegPair src,
VMRegPair dst,
bool is_receiver,
int* receiver_offset) {
Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
if (src.first()->is_stack()) {
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
if (is_receiver) {
}
__ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
__ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
__ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
} else {
const Register rOop = src.first()->as_Register();
int oop_slot;
if (rOop == j_rarg0)
oop_slot = 0;
else if (rOop == j_rarg1)
oop_slot = 1;
else if (rOop == j_rarg2)
oop_slot = 2;
else if (rOop == j_rarg3)
oop_slot = 3;
else if (rOop == j_rarg4)
oop_slot = 4;
else {
assert(rOop == j_rarg5, "wrong register");
oop_slot = 5;
}
oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
int offset = oop_slot*VMRegImpl::stack_slot_size;
map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ movptr(Address(rsp, offset), rOop);
if (is_receiver) {
}
__ cmpptr(rOop, (int32_t)NULL_WORD);
__ lea(rHandle, Address(rsp, offset));
__ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
}
if (dst.first()->is_stack()) {
__ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
}
}
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ movl(rax, Address(rbp, reg2offset_in(src.first())));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
__ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
__ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
} else {
if ( src.first() != dst.first()) {
__ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
}
}
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
if (dst.first() != src.first()) {
__ mov(dst.first()->as_Register(), src.first()->as_Register());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
__ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
}
} else if (dst.is_single_phys_reg()) {
assert(src.is_single_reg(), "not a stack pair");
__ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
} else {
assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
__ movq(rax, Address(rbp, reg2offset_in(src.first())));
__ movq(Address(rsp, reg2offset_out(dst.first())), rax);
}
}
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
if ( src.first() != dst.first()) {
__ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
__ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
}
} else if (dst.is_single_phys_reg()) {
assert(src.is_single_reg(), "not a stack pair");
__ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
} else {
assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
__ movq(rax, Address(rbp, reg2offset_in(src.first())));
__ movq(Address(rsp, reg2offset_out(dst.first())), rax);
}
}
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
switch (ret_type) {
case T_FLOAT:
__ movflt(Address(rbp, -wordSize), xmm0);
break;
case T_DOUBLE:
__ movdbl(Address(rbp, -wordSize), xmm0);
break;
case T_VOID: break;
default: {
__ movptr(Address(rbp, -wordSize), rax);
}
}
}
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
switch (ret_type) {
case T_FLOAT:
__ movflt(xmm0, Address(rbp, -wordSize));
break;
case T_DOUBLE:
__ movdbl(xmm0, Address(rbp, -wordSize));
break;
case T_VOID: break;
default: {
__ movptr(rax, Address(rbp, -wordSize));
}
}
}
static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
for ( int i = first_arg ; i < arg_count ; i++ ) {
if (args[i].first()->is_Register()) {
__ push(args[i].first()->as_Register());
} else if (args[i].first()->is_XMMRegister()) {
__ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
}
}
}
static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
if (args[i].first()->is_Register()) {
__ pop(args[i].first()->as_Register());
} else if (args[i].first()->is_XMMRegister()) {
__ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
}
}
static void save_or_restore_arguments(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMap* map,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
int slot = arg_save_area;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
int offset = slot * VMRegImpl::stack_slot_size;
slot += VMRegImpl::slots_per_word;
assert(slot <= stack_slots, "overflow");
if (map != NULL) {
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
if (in_regs[i].first()->is_Register() &&
(in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
int offset = slot * VMRegImpl::stack_slot_size;
if (map != NULL) {
__ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
if (in_sig_bt[i] == T_ARRAY) {
map->set_oop(VMRegImpl::stack2reg(slot));;
}
} else {
__ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
}
slot += VMRegImpl::slots_per_word;
}
}
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
int offset = slot * VMRegImpl::stack_slot_size;
slot++;
assert(slot <= stack_slots, "overflow");
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (map != NULL) {
__ movl(Address(rsp, offset), reg);
} else {
__ movl(reg, Address(rsp, offset));
}
break;
case T_ARRAY:
case T_LONG:
break;
case T_OBJECT:
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
if (in_sig_bt[i] == T_FLOAT) {
int offset = slot * VMRegImpl::stack_slot_size;
slot++;
assert(slot <= stack_slots, "overflow");
if (map != NULL) {
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
} else if (in_regs[i].first()->is_stack()) {
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
}
}
}
}
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
int stack_slots,
int total_c_args,
int total_in_args,
int arg_save_area,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
address the_pc = __ pc();
oop_maps->add_gc_map( __ offset(), map);
__ set_last_Java_frame(rsp, noreg, the_pc);
__ block_comment("block_for_jni_critical");
__ movptr(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
__ reset_last_Java_frame(false);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
for (int i = 0; i < total_in_args - 1; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
__ xorptr(reg, reg);
} else if (in_regs[i].first()->is_XMMRegister()) {
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
} else if (in_regs[i].first()->is_stack()) {
} else {
ShouldNotReachHere();
}
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
i++;
}
}
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
}
#endif
}
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
Register tmp_reg = rax;
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
"possible collision");
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
"possible collision");
__ block_comment("unpack_array_argument {");
Label is_null, done;
VMRegPair tmp;
tmp.set_ptr(tmp_reg->as_VMReg());
if (reg.first()->is_stack()) {
move_ptr(masm, reg, tmp);
reg = tmp;
}
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
__ jccb(Assembler::equal, is_null);
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
move_ptr(masm, tmp, body_arg);
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
move32_64(masm, tmp, length_arg);
__ jmpb(done);
__ bind(is_null);
__ xorptr(tmp_reg, tmp_reg);
move_ptr(masm, tmp, body_arg);
move32_64(masm, tmp, length_arg);
__ bind(done);
__ block_comment("} unpack_array_argument");
}
class ComputeMoveOrder: public StackObj {
class MoveOperation: public ResourceObj {
friend class ComputeMoveOrder;
private:
VMRegPair _src;
VMRegPair _dst;
int _src_index;
int _dst_index;
bool _processed;
MoveOperation* _next;
MoveOperation* _prev;
static int get_id(VMRegPair r) {
return r.first()->value();
}
public:
MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
_src(src)
, _src_index(src_index)
, _dst(dst)
, _dst_index(dst_index)
, _next(NULL)
, _prev(NULL)
, _processed(false) {
}
VMRegPair src() const { return _src; }
int src_id() const { return get_id(src()); }
int src_index() const { return _src_index; }
VMRegPair dst() const { return _dst; }
void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
int dst_index() const { return _dst_index; }
int dst_id() const { return get_id(dst()); }
MoveOperation* next() const { return _next; }
MoveOperation* prev() const { return _prev; }
void set_processed() { _processed = true; }
bool is_processed() const { return _processed; }
void break_cycle(VMRegPair temp_register) {
MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
MoveOperation* p = prev();
assert(p->next() == this, "must be");
_prev = NULL;
p->_next = new_store;
new_store->_prev = p;
set_dst(-1, temp_register);
}
void link(GrowableArray<MoveOperation*>& killer) {
MoveOperation* n = killer.at_grow(src_id(), NULL);
if (n != NULL) {
assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
_next = n;
n->_prev = this;
}
}
};
private:
GrowableArray<MoveOperation*> edges;
public:
ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
if (in_sig_bt[i] == T_ARRAY) {
c_arg--;
if (out_regs[c_arg].first()->is_stack() &&
out_regs[c_arg + 1].first()->is_stack()) {
arg_order.push(i);
arg_order.push(c_arg);
} else {
if (out_regs[c_arg].first()->is_stack() ||
in_regs[i].first() == out_regs[c_arg].first()) {
add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
} else {
add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
}
}
} else if (in_sig_bt[i] == T_VOID) {
arg_order.push(i);
arg_order.push(c_arg);
} else {
if (out_regs[c_arg].first()->is_stack() ||
in_regs[i].first() == out_regs[c_arg].first()) {
arg_order.push(i);
arg_order.push(c_arg);
} else {
add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
}
}
}
GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
for (int i = 0; i < stores->length(); i++) {
arg_order.push(stores->at(i)->src_index());
arg_order.push(stores->at(i)->dst_index());
}
}
void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
if (src.first() == dst.first()) return;
edges.append(new MoveOperation(src_index, src, dst_index, dst));
}
GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
GrowableArray<MoveOperation*> killer;
for (int i = 0; i < edges.length(); i++) {
MoveOperation* s = edges.at(i);
assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
killer.at_put_grow(s->dst_id(), s, NULL);
}
assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
"make sure temp isn't in the registers that are killed");
for (int i = 0; i < edges.length(); i++) {
edges.at(i)->link(killer);
}
GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
for (int e = 0; e < edges.length(); e++) {
MoveOperation* s = edges.at(e);
if (!s->is_processed()) {
MoveOperation* start = s;
while (start->prev() != NULL && start->prev() != s) {
start = start->prev();
}
if (start->prev() == s) {
start->break_cycle(temp_register);
}
while (start != NULL) {
stores->append(start);
start->set_processed();
start = start->next();
}
}
}
return stores;
}
};
static void verify_oop_args(MacroAssembler* masm,
methodHandle method,
const BasicType* sig_bt,
const VMRegPair* regs) {
Register temp_reg = rbx; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < method->size_of_parameters(); i++) {
if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
__ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
static void gen_special_dispatch(MacroAssembler* masm,
methodHandle method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
vmIntrinsics::ID iid = method->intrinsic_id();
bool has_receiver = false;
Register receiver_reg = noreg;
int member_arg_pos = -1;
Register member_reg = noreg;
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
if (ref_kind != 0) {
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
member_reg = rbx; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (iid == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} else {
fatal(err_msg_res("unexpected intrinsic id %d", iid));
}
if (member_reg != noreg) {
SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
VMReg r = regs[member_arg_pos].first();
if (r->is_stack()) {
__ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
member_reg = r->as_Register();
}
}
if (has_receiver) {
assert(method->size_of_parameters() > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) {
fatal("receiver always in a register");
receiver_reg = j_rarg0; // known to be free at this point
__ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
} else {
receiver_reg = r->as_Register();
}
}
MethodHandles::generate_method_handle_dispatch(masm, iid,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
cccccccc7
最新推荐文章于 2024-09-08 12:41:07 发布