cccccccccc3


void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
  assert(x->number_of_arguments() == 4, "wrong type");
  LIRItem obj   (x->argument_at(0), this);  // object
  LIRItem offset(x->argument_at(1), this);  // offset of field
  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
  assert(obj.type()->tag() == objectTag, "invalid type");
  assert(cmp.type()->tag() == type->tag(), "invalid type");
  assert(val.type()->tag() == type->tag(), "invalid type");
  obj.load_item();
  offset.load_nonconstant();
  if (type == objectType) {
    cmp.load_item_force(FrameMap::rax_oop_opr);
    val.load_item();
  } else if (type == intType) {
    cmp.load_item_force(FrameMap::rax_opr);
    val.load_item();
  } else if (type == longType) {
    cmp.load_item_force(FrameMap::long0_opr);
    val.load_item_force(FrameMap::long1_opr);
  } else {
    ShouldNotReachHere();
  }
  LIR_Opr addr = new_pointer_register();
  LIR_Address* a;
  if(offset.result()->is_constant()) {
#ifdef _LP64
    jlong c = offset.result()->as_jlong();
    if ((jlong)((jint)c) == c) {
      a = new LIR_Address(obj.result(),
                          (jint)c,
                          as_BasicType(type));
    } else {
      LIR_Opr tmp = new_register(T_LONG);
      __ move(offset.result(), tmp);
      a = new LIR_Address(obj.result(),
                          tmp,
                          as_BasicType(type));
    }
#else
    a = new LIR_Address(obj.result(),
                        offset.result()->as_jint(),
                        as_BasicType(type));
#endif
  } else {
    a = new LIR_Address(obj.result(),
                        offset.result(),
                        LIR_Address::times_1,
                        0,
                        as_BasicType(type));
  }
  __ leal(LIR_OprFact::address(a), addr);
  if (type == objectType) {  // Write-barrier needed for Object fields.
    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
                true /* do_load */, false /* patch */, NULL);
  }
  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
  if (type == objectType)
    __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
  else if (type == intType)
    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
  else if (type == longType)
    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
  else {
    ShouldNotReachHere();
  }
  LIR_Opr result = rlock_result(x);
  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
           result, as_BasicType(type));
  if (type == objectType) {   // Write-barrier needed for Object fields.
    post_barrier(addr, val.result());
  }
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
  assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
  LIRItem value(x->argument_at(0), this);
  bool use_fpu = false;
  if (UseSSE >= 2) {
    switch(x->id()) {
      case vmIntrinsics::_dsin:
      case vmIntrinsics::_dcos:
      case vmIntrinsics::_dtan:
      case vmIntrinsics::_dlog:
      case vmIntrinsics::_dlog10:
      case vmIntrinsics::_dexp:
      case vmIntrinsics::_dpow:
        use_fpu = true;
    }
  } else {
    value.set_destroys_register();
  }
  value.load_item();
  LIR_Opr calc_input = value.result();
  LIR_Opr calc_input2 = NULL;
  if (x->id() == vmIntrinsics::_dpow) {
    LIRItem extra_arg(x->argument_at(1), this);
    if (UseSSE < 2) {
      extra_arg.set_destroys_register();
    }
    extra_arg.load_item();
    calc_input2 = extra_arg.result();
  }
  LIR_Opr calc_result = rlock_result(x);
  LIR_Opr tmp1 = FrameMap::caller_save_fpu_reg_at(0);
  LIR_Opr tmp2 = FrameMap::caller_save_fpu_reg_at(1);
  if (use_fpu) {
    LIR_Opr tmp = FrameMap::fpu0_double_opr;
    int tmp_start = 1;
    if (calc_input2 != NULL) {
      __ move(calc_input2, tmp);
      tmp_start = 2;
      calc_input2 = tmp;
    }
    __ move(calc_input, tmp);
    calc_input = tmp;
    calc_result = tmp;
    tmp1 = FrameMap::caller_save_fpu_reg_at(tmp_start);
    tmp2 = FrameMap::caller_save_fpu_reg_at(tmp_start + 1);
  }
  switch(x->id()) {
    case vmIntrinsics::_dabs:   __ abs  (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
    case vmIntrinsics::_dsqrt:  __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break;
    case vmIntrinsics::_dsin:   __ sin  (calc_input, calc_result, tmp1, tmp2);              break;
    case vmIntrinsics::_dcos:   __ cos  (calc_input, calc_result, tmp1, tmp2);              break;
    case vmIntrinsics::_dtan:   __ tan  (calc_input, calc_result, tmp1, tmp2);              break;
    case vmIntrinsics::_dlog:   __ log  (calc_input, calc_result, tmp1);                    break;
    case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, tmp1);                    break;
    case vmIntrinsics::_dexp:   __ exp  (calc_input, calc_result,              tmp1, tmp2, FrameMap::rax_opr, FrameMap::rcx_opr, FrameMap::rdx_opr); break;
    case vmIntrinsics::_dpow:   __ pow  (calc_input, calc_input2, calc_result, tmp1, tmp2, FrameMap::rax_opr, FrameMap::rcx_opr, FrameMap::rdx_opr); break;
    default:                    ShouldNotReachHere();
  }
  if (use_fpu) {
    __ move(calc_result, x->operand());
  }
}
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
  assert(x->number_of_arguments() == 5, "wrong type");
  CodeEmitInfo* info = state_for(x, x->state());
  LIRItem src(x->argument_at(0), this);
  LIRItem src_pos(x->argument_at(1), this);
  LIRItem dst(x->argument_at(2), this);
  LIRItem dst_pos(x->argument_at(3), this);
  LIRItem length(x->argument_at(4), this);
#ifndef _LP64
  src.load_item_force     (FrameMap::rcx_oop_opr);
  src_pos.load_item_force (FrameMap::rdx_opr);
  dst.load_item_force     (FrameMap::rax_oop_opr);
  dst_pos.load_item_force (FrameMap::rbx_opr);
  length.load_item_force  (FrameMap::rdi_opr);
  LIR_Opr tmp =           (FrameMap::rsi_opr);
#else
  src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
  src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
  dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
  dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
  length.load_item_force  (FrameMap::as_opr(j_rarg4));
  LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
#endif // LP64
  set_no_result(x);
  int flags;
  ciArrayKlass* expected_type;
  arraycopy_helper(x, &flags, &expected_type);
  __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
}
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
  LIR_Opr result = rlock_result(x);
  int flags = 0;
  switch (x->id()) {
    case vmIntrinsics::_updateCRC32: {
      LIRItem crc(x->argument_at(0), this);
      LIRItem val(x->argument_at(1), this);
      val.set_destroys_register();
      crc.load_item();
      val.load_item();
      __ update_crc32(crc.result(), val.result(), result);
      break;
    }
    case vmIntrinsics::_updateBytesCRC32:
    case vmIntrinsics::_updateByteBufferCRC32: {
      bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
      LIRItem crc(x->argument_at(0), this);
      LIRItem buf(x->argument_at(1), this);
      LIRItem off(x->argument_at(2), this);
      LIRItem len(x->argument_at(3), this);
      buf.load_item();
      off.load_nonconstant();
      LIR_Opr index = off.result();
      int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
      if(off.result()->is_constant()) {
        index = LIR_OprFact::illegalOpr;
       offset += off.result()->as_jint();
      }
      LIR_Opr base_op = buf.result();
#ifndef _LP64
      if (!is_updateBytes) { // long b raw address
         base_op = new_register(T_INT);
         __ convert(Bytecodes::_l2i, buf.result(), base_op);
      }
#else
      if (index->is_valid()) {
        LIR_Opr tmp = new_register(T_LONG);
        __ convert(Bytecodes::_i2l, index, tmp);
        index = tmp;
      }
#endif
      LIR_Address* a = new LIR_Address(base_op,
                                       index,
                                       LIR_Address::times_1,
                                       offset,
                                       T_BYTE);
      BasicTypeList signature(3);
      signature.append(T_INT);
      signature.append(T_ADDRESS);
      signature.append(T_INT);
      CallingConvention* cc = frame_map()->c_calling_convention(&signature);
      const LIR_Opr result_reg = result_register_for(x->type());
      LIR_Opr addr = new_pointer_register();
      __ leal(LIR_OprFact::address(a), addr);
      crc.load_item_force(cc->at(0));
      __ move(addr, cc->at(1));
      len.load_item_force(cc->at(2));
      __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
      __ move(result_reg, result);
      break;
    }
    default: {
      ShouldNotReachHere();
    }
  }
}
LIR_Opr fixed_register_for(BasicType type) {
  switch (type) {
    case T_FLOAT:  return FrameMap::fpu0_float_opr;
    case T_DOUBLE: return FrameMap::fpu0_double_opr;
    case T_INT:    return FrameMap::rax_opr;
    case T_LONG:   return FrameMap::long0_opr;
    default:       ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  }
}
void LIRGenerator::do_Convert(Convert* x) {
  bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
  switch (x->op()) {
    case Bytecodes::_i2l: // fall through
    case Bytecodes::_l2i: // fall through
    case Bytecodes::_i2b: // fall through
    case Bytecodes::_i2c: // fall through
    case Bytecodes::_i2s: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
    case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false;       round_result = false;      needs_stub = false; break;
    case Bytecodes::_d2f: fixed_input = false;       fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break;
    case Bytecodes::_i2f: fixed_input = false;       fixed_result = false;       round_result = UseSSE < 1; needs_stub = false; break;
    case Bytecodes::_i2d: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = false; break;
    case Bytecodes::_f2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
    case Bytecodes::_d2i: fixed_input = false;       fixed_result = false;       round_result = false;      needs_stub = true;  break;
    case Bytecodes::_l2f: fixed_input = false;       fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break;
    case Bytecodes::_l2d: fixed_input = false;       fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break;
    case Bytecodes::_f2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
    case Bytecodes::_d2l: fixed_input = true;        fixed_result = true;        round_result = false;      needs_stub = false; break;
    default: ShouldNotReachHere();
  }
  LIRItem value(x->value(), this);
  value.load_item();
  LIR_Opr input = value.result();
  LIR_Opr result = rlock(x);
  LIR_Opr conv_input = input;
  LIR_Opr conv_result = result;
  ConversionStub* stub = NULL;
  if (fixed_input) {
    conv_input = fixed_register_for(input->type());
    __ move(input, conv_input);
  }
  assert(fixed_result == false || round_result == false, "cannot set both");
  if (fixed_result) {
    conv_result = fixed_register_for(result->type());
  } else if (round_result) {
    result = new_register(result->type());
    set_vreg_flag(result, must_start_in_memory);
  }
  if (needs_stub) {
    stub = new ConversionStub(x->op(), conv_input, conv_result);
  }
  __ convert(x->op(), conv_input, conv_result, stub);
  if (result != conv_result) {
    __ move(conv_result, result);
  }
  assert(result->is_virtual(), "result must be virtual register");
  set_result(x, result);
}
void LIRGenerator::do_NewInstance(NewInstance* x) {
  print_if_not_loaded(x);
  CodeEmitInfo* info = state_for(x, x->state());
  LIR_Opr reg = result_register_for(x->type());
  new_instance(reg, x->klass(), x->is_unresolved(),
                       FrameMap::rcx_oop_opr,
                       FrameMap::rdi_oop_opr,
                       FrameMap::rsi_oop_opr,
                       LIR_OprFact::illegalOpr,
                       FrameMap::rdx_metadata_opr, info);
  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
  CodeEmitInfo* info = state_for(x, x->state());
  LIRItem length(x->length(), this);
  length.load_item_force(FrameMap::rbx_opr);
  LIR_Opr reg = result_register_for(x->type());
  LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
  LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
  LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
  LIR_Opr tmp4 = reg;
  LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
  LIR_Opr len = length.result();
  BasicType elem_type = x->elt_type();
  __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
  CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
  LIRItem length(x->length(), this);
  CodeEmitInfo* patching_info = NULL;
  if (!x->klass()->is_loaded() || PatchALot) {
    patching_info =  state_for(x, x->state_before());
  }
  CodeEmitInfo* info = state_for(x, x->state());
  const LIR_Opr reg = result_register_for(x->type());
  LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
  LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
  LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
  LIR_Opr tmp4 = reg;
  LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
  length.load_item_force(FrameMap::rbx_opr);
  LIR_Opr len = length.result();
  CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
  ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
  if (obj == ciEnv::unloaded_ciobjarrayklass()) {
    BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
  }
  klass2reg_with_patching(klass_reg, obj, patching_info);
  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
  Values* dims = x->dims();
  int i = dims->length();
  LIRItemList* items = new LIRItemList(dims->length(), NULL);
  while (i-- > 0) {
    LIRItem* size = new LIRItem(dims->at(i), this);
    items->at_put(i, size);
  }
  CodeEmitInfo* patching_info = NULL;
  if (!x->klass()->is_loaded() || PatchALot) {
    patching_info = state_for(x, x->state_before());
    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
  }
  CodeEmitInfo* info = state_for(x, x->state());
  i = dims->length();
  while (i-- > 0) {
    LIRItem* size = items->at(i);
    size->load_nonconstant();
    store_stack_parameter(size->result(), in_ByteSize(i*4));
  }
  LIR_Opr klass_reg = FrameMap::rax_metadata_opr;
  klass2reg_with_patching(klass_reg, x->klass(), patching_info);
  LIR_Opr rank = FrameMap::rbx_opr;
  __ move(LIR_OprFact::intConst(x->rank()), rank);
  LIR_Opr varargs = FrameMap::rcx_opr;
  __ move(FrameMap::rsp_opr, varargs);
  LIR_OprList* args = new LIR_OprList(3);
  args->append(klass_reg);
  args->append(rank);
  args->append(varargs);
  LIR_Opr reg = result_register_for(x->type());
  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
                  LIR_OprFact::illegalOpr,
                  reg, args, info);
  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
}
void LIRGenerator::do_CheckCast(CheckCast* x) {
  LIRItem obj(x->obj(), this);
  CodeEmitInfo* patching_info = NULL;
  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
    patching_info = state_for(x, x->state_before());
  }
  obj.load_item();
  CodeEmitInfo* info_for_exception =
      (x->needs_exception_state() ? state_for(x) :
                                    state_for(x, x->state_before(), true /*ignore_xhandler*/));
  CodeStub* stub;
  if (x->is_incompatible_class_change_check()) {
    assert(patching_info == NULL, "can't patch this");
    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
  } else if (x->is_invokespecial_receiver_check()) {
    assert(patching_info == NULL, "can't patch this");
    stub = new DeoptimizeStub(info_for_exception);
  } else {
    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
  }
  LIR_Opr reg = rlock_result(x);
  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
    tmp3 = new_register(objectType);
  }
  __ checkcast(reg, obj.result(), x->klass(),
               new_register(objectType), new_register(objectType), tmp3,
               x->direct_compare(), info_for_exception, patching_info, stub,
               x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
  LIRItem obj(x->obj(), this);
  LIR_Opr reg = rlock_result(x);
  CodeEmitInfo* patching_info = NULL;
  if ((!x->klass()->is_loaded() || PatchALot)) {
    patching_info = state_for(x, x->state_before());
  }
  obj.load_item();
  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
    tmp3 = new_register(objectType);
  }
  __ instanceof(reg, obj.result(), x->klass(),
                new_register(objectType), new_register(objectType), tmp3,
                x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_If(If* x) {
  assert(x->number_of_sux() == 2, "inconsistency");
  ValueTag tag = x->x()->type()->tag();
  bool is_safepoint = x->is_safepoint();
  If::Condition cond = x->cond();
  LIRItem xitem(x->x(), this);
  LIRItem yitem(x->y(), this);
  LIRItem* xin = &xitem;
  LIRItem* yin = &yitem;
  if (tag == longTag) {
    if (cond == If::gtr || cond == If::leq) {
      cond = Instruction::mirror(cond);
      xin = &yitem;
      yin = &xitem;
    }
    xin->set_destroys_register();
  }
  xin->load_item();
  if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
    yin->dont_load_item();
  } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
    yin->load_item();
  } else {
    yin->dont_load_item();
  }
  if (x->is_safepoint()) {
    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
    __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
  }
  set_no_result(x);
  LIR_Opr left = xin->result();
  LIR_Opr right = yin->result();
  __ cmp(lir_cond(cond), left, right);
  profile_branch(x, cond);
  move_to_phi(x->state());
  if (x->x()->type()->is_float_kind()) {
    __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
  } else {
    __ branch(lir_cond(cond), right->type(), x->tsux());
  }
  assert(x->default_sux() == x->fsux(), "wrong destination above");
  __ jump(x->default_sux());
}
LIR_Opr LIRGenerator::getThreadPointer() {
#ifdef _LP64
  return FrameMap::as_pointer_opr(r15_thread);
#else
  LIR_Opr result = new_register(T_INT);
  __ get_thread(result);
  return result;
#endif //
}
void LIRGenerator::trace_block_entry(BlockBegin* block) {
  store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
  LIR_OprList* args = new LIR_OprList();
  address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
  __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
}
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
                                        CodeEmitInfo* info) {
  if (address->type() == T_LONG) {
    address = new LIR_Address(address->base(),
                              address->index(), address->scale(),
                              address->disp(), T_DOUBLE);
    LIR_Opr temp_double = new_register(T_DOUBLE);
    LIR_Opr spill = new_register(T_LONG);
    set_vreg_flag(spill, must_start_in_memory);
    __ move(value, spill);
    __ volatile_move(spill, temp_double, T_LONG);
    __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
  } else {
    __ store(value, address, info);
  }
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
                                       CodeEmitInfo* info) {
  if (address->type() == T_LONG) {
    address = new LIR_Address(address->base(),
                              address->index(), address->scale(),
                              address->disp(), T_DOUBLE);
    LIR_Opr temp_double = new_register(T_DOUBLE);
    __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
    __ volatile_move(temp_double, result, T_LONG);
    if (UseSSE < 2) {
      set_vreg_flag(result, must_start_in_memory);
    }
  } else {
    __ load(address, result, info);
  }
}
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
                                     BasicType type, bool is_volatile) {
  if (is_volatile && type == T_LONG) {
    LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
    LIR_Opr tmp = new_register(T_DOUBLE);
    __ load(addr, tmp);
    LIR_Opr spill = new_register(T_LONG);
    set_vreg_flag(spill, must_start_in_memory);
    __ move(tmp, spill);
    __ move(spill, dst);
  } else {
    LIR_Address* addr = new LIR_Address(src, offset, type);
    __ load(addr, dst);
  }
}
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
                                     BasicType type, bool is_volatile) {
  if (is_volatile && type == T_LONG) {
    LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
    LIR_Opr tmp = new_register(T_DOUBLE);
    LIR_Opr spill = new_register(T_DOUBLE);
    set_vreg_flag(spill, must_start_in_memory);
    __ move(data, spill);
    __ move(spill, tmp);
    __ move(tmp, addr);
  } else {
    LIR_Address* addr = new LIR_Address(src, offset, type);
    bool is_obj = (type == T_ARRAY || type == T_OBJECT);
    if (is_obj) {
      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                  true /* do_load */, false /* patch */, NULL);
      __ move(data, addr);
      assert(src->is_register(), "must be register");
      post_barrier(LIR_OprFact::address(addr), data);
    } else {
      __ move(data, addr);
    }
  }
}
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
  BasicType type = x->basic_type();
  LIRItem src(x->object(), this);
  LIRItem off(x->offset(), this);
  LIRItem value(x->value(), this);
  src.load_item();
  value.load_item();
  off.load_nonconstant();
  LIR_Opr dst = rlock_result(x, type);
  LIR_Opr data = value.result();
  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
  LIR_Opr offset = off.result();
  assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
  LIR_Address* addr;
  if (offset->is_constant()) {
#ifdef _LP64
    jlong c = offset->as_jlong();
    if ((jlong)((jint)c) == c) {
      addr = new LIR_Address(src.result(), (jint)c, type);
    } else {
      LIR_Opr tmp = new_register(T_LONG);
      __ move(offset, tmp);
      addr = new LIR_Address(src.result(), tmp, type);
    }
#else
    addr = new LIR_Address(src.result(), offset->as_jint(), type);
#endif
  } else {
    addr = new LIR_Address(src.result(), offset, type);
  }
  __ move(data, dst);
  if (x->is_add()) {
    __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
  } else {
    if (is_obj) {
      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
                  true /* do_load */, false /* patch */, NULL);
    }
    __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
    if (is_obj) {
      post_barrier(LIR_OprFact::address(addr), data);
    }
  }
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/c1_MacroAssembler_x86.cpp
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
  const int aligned_mask = BytesPerWord -1;
  const int hdr_offset = oopDesc::mark_offset_in_bytes();
  assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
  assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
  Label done;
  int null_check_offset = -1;
  verify_oop(obj);
  movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
  if (UseBiasedLocking) {
    assert(scratch != noreg, "should have scratch register at this point");
    null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
  } else {
    null_check_offset = offset();
  }
  movptr(hdr, Address(obj, hdr_offset));
  orptr(hdr, markOopDesc::unlocked_value);
  movptr(Address(disp_hdr, 0), hdr);
  if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
  cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
  if (PrintBiasedLockingStatistics) {
    cond_inc32(Assembler::equal,
               ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
  }
  jcc(Assembler::equal, done);
  subptr(hdr, rsp);
  andptr(hdr, aligned_mask - os::vm_page_size());
  movptr(Address(disp_hdr, 0), hdr);
  jcc(Assembler::notZero, slow_case);
  bind(done);
  return null_check_offset;
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
  const int aligned_mask = BytesPerWord -1;
  const int hdr_offset = oopDesc::mark_offset_in_bytes();
  assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
  assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
  Label done;
  if (UseBiasedLocking) {
    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
    biased_locking_exit(obj, hdr, done);
  }
  movptr(hdr, Address(disp_hdr, 0));
  testptr(hdr, hdr);
  jcc(Assembler::zero, done);
  if (!UseBiasedLocking) {
    movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
  }
  verify_oop(obj);
  if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
  cmpxchgptr(hdr, Address(obj, hdr_offset));
  jcc(Assembler::notEqual, slow_case);
  bind(done);
}
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
  if (UseTLAB) {
    tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
  } else {
    eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
    incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
  }
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
  assert_different_registers(obj, klass, len);
  if (UseBiasedLocking && !len->is_valid()) {
    assert_different_registers(obj, klass, len, t1, t2);
    movptr(t1, Address(klass, Klass::prototype_header_offset()));
    movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
  } else {
    movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
  }
#ifdef _LP64
  if (UseCompressedClassPointers) { // Take care not to kill klass
    movptr(t1, klass);
    encode_klass_not_null(t1);
    movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
  } else
#endif
  {
    movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
  }
  if (len->is_valid()) {
    movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
  }
#ifdef _LP64
  else if (UseCompressedClassPointers) {
    xorptr(t1, t1);
    store_klass_gap(obj, t1);
  }
#endif
}
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
  Label done;
  assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
  assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
  Register index = len_in_bytes;
  subptr(index, hdr_size_in_bytes);
  jcc(Assembler::zero, done);
#ifdef ASSERT
  { Label L;
    testptr(index, BytesPerWord - 1);
    jcc(Assembler::zero, L);
    stop("index is not a multiple of BytesPerWord");
    bind(L);
  }
#endif
  xorptr(t1, t1);    // use _zero reg to clear memory (shorter code)
  if (UseIncDec) {
    shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
  } else {
    shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
    shrptr(index, 1);
  }
#ifndef _LP64
  { Label even;
    jcc(Assembler::carryClear, even);
    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
    jcc(Assembler::zero, done);
    bind(even);
  }
#endif // !_LP64
  { Label loop;
    bind(loop);
    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
    NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
    decrement(index);
    jcc(Assembler::notZero, loop);
  }
  bind(done);
}
void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
  assert(obj == rax, "obj must be in rax, for cmpxchg");
  assert_different_registers(obj, t1, t2); // XXX really?
  assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
  try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
  initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
}
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
  assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
         "con_size_in_bytes is not multiple of alignment");
  const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
  initialize_header(obj, klass, noreg, t1, t2);
  const Register t1_zero = t1;
  const Register index = t2;
  const int threshold = 6 * BytesPerWord;   // approximate break even point for code size (see comments below)
  if (var_size_in_bytes != noreg) {
    mov(index, var_size_in_bytes);
    initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
  } else if (con_size_in_bytes <= threshold) {
    xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
    for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
      movptr(Address(obj, i), t1_zero);
  } else if (con_size_in_bytes > hdr_size_in_bytes) {
    xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
    movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
    if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
      movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
    { Label loop;
      bind(loop);
      movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
             t1_zero);
      NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
             t1_zero);)
      decrement(index);
      jcc(Assembler::notZero, loop);
    }
  }
  if (CURRENT_ENV->dtrace_alloc_probes()) {
    assert(obj == rax, "must be");
    call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
  }
  verify_oop(obj);
}
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) {
  assert(obj == rax, "obj must be in rax, for cmpxchg");
  assert_different_registers(obj, len, t1, t2, klass);
  assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
  cmpptr(len, (int32_t)max_array_allocation_length);
  jcc(Assembler::above, slow_case);
  const Register arr_size = t2; // okay to be the same
  movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
  lea(arr_size, Address(arr_size, len, f));
  andptr(arr_size, ~MinObjAlignmentInBytesMask);
  try_allocate(obj, arr_size, 0, t1, t2, slow_case);
  initialize_header(obj, klass, len, t1, t2);
  const Register len_zero = len;
  initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
  if (CURRENT_ENV->dtrace_alloc_probes()) {
    assert(obj == rax, "must be");
    call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
  }
  verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
  verify_oop(receiver);
  assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
  int start_offset = offset();
  if (UseCompressedClassPointers) {
    load_klass(rscratch1, receiver);
    cmpptr(rscratch1, iCache);
  } else {
    cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
  }
  jump_cc(Assembler::notEqual,
          RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
  assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
  generate_stack_overflow_check(bang_size_in_bytes);
  push(rbp);
  if (PreserveFramePointer) {
    mov(rbp, rsp);
  }
#ifdef TIERED
  if (UseSSE < 2 ) {
    empty_FPU_stack();
  }
#endif // TIERED
  decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
}
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
  increment(rsp, frame_size_in_bytes);  // Does not emit code for frame_size == 0
  pop(rbp);
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
  if (C1Breakpoint) int3();
  inline_cache_check(receiver, ic_klass);
}
void C1_MacroAssembler::verified_entry() {
  if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
    fat_nop();
  }
  if (C1Breakpoint)int3();
  verify_FPU(0, "method_entry");
}
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
  if (!VerifyOops) return;
  verify_oop_addr(Address(rsp, stack_offset));
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {
  if (!VerifyOops) return;
  Label not_null;
  testptr(r, r);
  jcc(Assembler::notZero, not_null);
  stop("non-null oop required");
  bind(not_null);
  verify_oop(r);
}
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
#ifdef ASSERT
  if (inv_rax) movptr(rax, 0xDEAD);
  if (inv_rbx) movptr(rbx, 0xDEAD);
  if (inv_rcx) movptr(rcx, 0xDEAD);
  if (inv_rdx) movptr(rdx, 0xDEAD);
  if (inv_rsi) movptr(rsi, 0xDEAD);
  if (inv_rdi) movptr(rdi, 0xDEAD);
#endif
}
#endif // ifndef PRODUCT
C:\hotspot-69087d08d473\src\cpu\x86\vm/c1_MacroAssembler_x86.hpp
#ifndef CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
#define CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
 private:
  int _rsp_offset;    // track rsp changes
  void pd_init() { _rsp_offset = 0; }
 public:
  void try_allocate(
    Register obj,                      // result: pointer to object after successful allocation
    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
    int      con_size_in_bytes,        // object size in bytes if   known at compile time
    Register t1,                       // temp register
    Register t2,                       // temp register
    Label&   slow_case                 // continuation point if fast allocation fails
  );
  void initialize_header(Register obj, Register klass, Register len, Register t1, Register t2);
  void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1);
  int lock_object  (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
  void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
  void initialize_object(
    Register obj,                      // result: pointer to object after successful allocation
    Register klass,                    // object klass
    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
    int      con_size_in_bytes,        // object size in bytes if   known at compile time
    Register t1,                       // temp register
    Register t2                        // temp register
  );
  void allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case);
  enum {
    max_array_allocation_length = 0x00FFFFFF
  };
  void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case);
  int  rsp_offset() const { return _rsp_offset; }
  void set_rsp_offset(int n) { _rsp_offset = n; }
  void push_jint (jint i)     { _rsp_offset++; push(i); }
  void push_oop  (jobject o)  { _rsp_offset++; pushoop(o); }
  void push_addr (Address a)  { _rsp_offset++; pushptr(a); }
  void push_reg  (Register r) { _rsp_offset++; push(r); }
  void pop_reg   (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); }
  void dec_stack (int nof_words) {
    _rsp_offset -= nof_words;
    assert(_rsp_offset >= 0, "stack offset underflow");
    addptr(rsp, wordSize * nof_words);
  }
  void dec_stack_after_call (int nof_words) {
    _rsp_offset -= nof_words;
    assert(_rsp_offset >= 0, "stack offset underflow");
  }
  void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN;
#endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/c1_Runtime1_x86.cpp
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_x86.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
  assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
  assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
  assert(args_size >= 0, "illegal args_size");
  bool align_stack = false;
#ifdef _LP64
  align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
#endif
#ifdef _LP64
  mov(c_rarg0, thread);
  set_num_rt_args(0); // Nothing on stack
#else
  set_num_rt_args(1 + args_size);
  get_thread(thread);
  push(thread);
#endif // _LP64
  int call_offset;
  if (!align_stack) {
    set_last_Java_frame(thread, noreg, rbp, NULL);
  } else {
    address the_pc = pc();
    call_offset = offset();
    set_last_Java_frame(thread, noreg, rbp, the_pc);
    andptr(rsp, -(StackAlignmentInBytes));    // Align stack
  }
  call(RuntimeAddress(entry));
  if (!align_stack) {
    call_offset = offset();
  }
#ifdef ASSERT
  guarantee(thread != rax, "change this code");
  push(rax);
  { Label L;
    get_thread(rax);
    cmpptr(thread, rax);
    jcc(Assembler::equal, L);
    int3();
    stop("StubAssembler::call_RT: rdi not callee saved?");
    bind(L);
  }
  pop(rax);
#endif
  reset_last_Java_frame(thread, true);
  NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
  { Label L;
    cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
    jcc(Assembler::equal, L);
    movptr(rax, Address(thread, Thread::pending_exception_offset()));
    if (oop_result1->is_valid()) {
      movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
    }
    if (metadata_result->is_valid()) {
      movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
    }
    if (frame_size() == no_frame_size) {
      leave();
      jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
    } else if (_stub_id == Runtime1::forward_exception_id) {
      should_not_reach_here();
    } else {
      jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
    }
    bind(L);
  }
  if (oop_result1->is_valid()) {
    get_vm_result(oop_result1, thread);
  }
  if (metadata_result->is_valid()) {
    get_vm_result_2(metadata_result, thread);
  }
  return call_offset;
}
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
#ifdef _LP64
  mov(c_rarg1, arg1);
#else
  push(arg1);
#endif // _LP64
  return call_RT(oop_result1, metadata_result, entry, 1);
}
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
#ifdef _LP64
  if (c_rarg1 == arg2) {
    if (c_rarg2 == arg1) {
      xchgq(arg1, arg2);
    } else {
      mov(c_rarg2, arg2);
      mov(c_rarg1, arg1);
    }
  } else {
    mov(c_rarg1, arg1);
    mov(c_rarg2, arg2);
  }
#else
  push(arg2);
  push(arg1);
#endif // _LP64
  return call_RT(oop_result1, metadata_result, entry, 2);
}
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
#ifdef _LP64
  if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
      arg2 == c_rarg1 || arg2 == c_rarg3 ||
      arg3 == c_rarg1 || arg3 == c_rarg2) {
    push(arg3);
    push(arg2);
    push(arg1);
    pop(c_rarg1);
    pop(c_rarg2);
    pop(c_rarg3);
  } else {
    mov(c_rarg1, arg1);
    mov(c_rarg2, arg2);
    mov(c_rarg3, arg3);
  }
#else
  push(arg3);
  push(arg2);
  push(arg1);
#endif // _LP64
  return call_RT(oop_result1, metadata_result, entry, 3);
}
class StubFrame: public StackObj {
 private:
  StubAssembler* _sasm;
 public:
  StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
  void load_argument(int offset_in_words, Register reg);
  ~StubFrame();
};
#define __ _sasm->
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
  _sasm = sasm;
  __ set_info(name, must_gc_arguments);
  __ enter();
}
void StubFrame::load_argument(int offset_in_words, Register reg) {
  __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
}
StubFrame::~StubFrame() {
  __ leave();
  __ ret(0);
}
#undef __
#define __ sasm->
const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
#ifdef _LP64
  #define SLOT2(x) x,
  #define SLOT_PER_WORD 2
#else
  #define SLOT2(x)
  #define SLOT_PER_WORD 1
#endif // _LP64
enum reg_save_layout {
#ifdef _LP64
  align_dummy_0, align_dummy_1,
#endif // _LP64
#ifdef _WIN64
  arg_reg_save_1, arg_reg_save_1H,                                                          // 0, 4
  arg_reg_save_2, arg_reg_save_2H,                                                          // 8, 12
  arg_reg_save_3, arg_reg_save_3H,                                                          // 16, 20
  arg_reg_save_4, arg_reg_save_4H,                                                          // 24, 28
#endif // _WIN64
  xmm_regs_as_doubles_off,                                                                  // 32
  float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots,  // 160
  fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots,          // 224
  fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD),                // 352
  marker = fpu_state_end_off, SLOT2(markerH)                                                // 352, 356
  extra_space_offset,                                                                       // 360
#ifdef _LP64
  r15_off = extra_space_offset, r15H_off,                                                   // 360, 364
  r14_off, r14H_off,                                                                        // 368, 372
  r13_off, r13H_off,                                                                        // 376, 380
  r12_off, r12H_off,                                                                        // 384, 388
  r11_off, r11H_off,                                                                        // 392, 396
  r10_off, r10H_off,                                                                        // 400, 404
  r9_off, r9H_off,                                                                          // 408, 412
  r8_off, r8H_off,                                                                          // 416, 420
  rdi_off, rdiH_off,                                                                        // 424, 428
#else
  rdi_off = extra_space_offset,
#endif // _LP64
  rsi_off, SLOT2(rsiH_off)                                                                  // 432, 436
  rbp_off, SLOT2(rbpH_off)                                                                  // 440, 444
  rsp_off, SLOT2(rspH_off)                                                                  // 448, 452
  rbx_off, SLOT2(rbxH_off)                                                                  // 456, 460
  rdx_off, SLOT2(rdxH_off)                                                                  // 464, 468
  rcx_off, SLOT2(rcxH_off)                                                                  // 472, 476
  rax_off, SLOT2(raxH_off)                                                                  // 480, 484
  saved_rbp_off, SLOT2(saved_rbpH_off)                                                      // 488, 492
  return_off, SLOT2(returnH_off)                                                            // 496, 500
  reg_save_frame_size   // As noted: neglects any parameters to runtime                     // 504
};
static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
                                bool save_fpu_registers = true) {
  LP64_ONLY(num_rt_args = 0);
  LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
  int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
  OopMap* map = new OopMap(frame_size_in_slots, 0);
  map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
#ifdef _LP64
  map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args),  r8->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args),  r9->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
  map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args),  r8->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args),  r9->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
  map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
#endif // _LP64
  if (save_fpu_registers) {
    if (UseSSE < 2) {
      int fpu_off = float_regs_as_doubles_off;
      for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
        VMReg fpu_name_0 = FrameMap::fpu_regname(n);
        map->set_callee_saved(VMRegImpl::stack2reg(fpu_off +     num_rt_args), fpu_name_0);
        if (true) {
          map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
        }
        fpu_off += 2;
      }
      assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
    }
    if (UseSSE >= 2) {
      int xmm_off = xmm_regs_as_doubles_off;
      for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
        if (true) {
          map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
        }
        xmm_off += 2;
      }
      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
    } else if (UseSSE == 1) {
      int xmm_off = xmm_regs_as_doubles_off;
      for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
        VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
        map->set_callee_saved(VMRegImpl::stack2reg(xmm_off +     num_rt_args), xmm_name_0);
        xmm_off += 2;
      }
      assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
    }
  }
  return map;
}
static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
                                   bool save_fpu_registers = true) {
  __ block_comment("save_live_registers");
  __ pusha();         // integer registers
  __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
#ifdef ASSERT
  __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
#endif
  if (save_fpu_registers) {
    if (UseSSE < 2) {
      __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
      __ fwait();
#ifdef ASSERT
      Label ok;
      __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
      __ jccb(Assembler::equal, ok);
      __ stop("corrupted control word detected");
      __ bind(ok);
#endif
      __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
      __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
    }
    if (UseSSE >= 2) {
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
#ifdef _LP64
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
      __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
#endif // _LP64
    } else if (UseSSE == 1) {
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0), xmm0);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8), xmm1);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
      __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
    }
  }
  __ verify_FPU(0, "save_live_registers");
  return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
}
static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
  if (restore_fpu_registers) {
    if (UseSSE >= 2) {
      __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
      __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
      __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
      __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
      __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
      __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
      __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
      __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
#ifdef _LP64
      __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
      __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
      __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
      __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
      __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
      __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
      __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
      __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
#endif // _LP64
    } else if (UseSSE == 1) {
      __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  0));
      __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size +  8));
      __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
      __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
      __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
      __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
      __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
      __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
    }
    if (UseSSE < 2) {
      __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
    } else {
      __ verify_FPU(0, "restore_live_registers");
    }
  } else {
    __ verify_FPU(0, "restore_live_registers");
  }
#ifdef ASSERT
  {
    Label ok;
    __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
    __ jcc(Assembler::equal, ok);
    __ stop("bad offsets in frame");
    __ bind(ok);
  }
#endif // ASSERT
  __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
}
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
  __ block_comment("restore_live_registers");
  restore_fpu(sasm, restore_fpu_registers);
  __ popa();
}
static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
  __ block_comment("restore_live_registers_except_rax");
  restore_fpu(sasm, restore_fpu_registers);
#ifdef _LP64
  __ movptr(r15, Address(rsp, 0));
  __ movptr(r14, Address(rsp, wordSize));
  __ movptr(r13, Address(rsp, 2 * wordSize));
  __ movptr(r12, Address(rsp, 3 * wordSize));
  __ movptr(r11, Address(rsp, 4 * wordSize));
  __ movptr(r10, Address(rsp, 5 * wordSize));
  __ movptr(r9,  Address(rsp, 6 * wordSize));
  __ movptr(r8,  Address(rsp, 7 * wordSize));
  __ movptr(rdi, Address(rsp, 8 * wordSize));
  __ movptr(rsi, Address(rsp, 9 * wordSize));
  __ movptr(rbp, Address(rsp, 10 * wordSize));
  __ movptr(rbx, Address(rsp, 12 * wordSize));
  __ movptr(rdx, Address(rsp, 13 * wordSize));
  __ movptr(rcx, Address(rsp, 14 * wordSize));
  __ addptr(rsp, 16 * wordSize);
#else
  __ pop(rdi);
  __ pop(rsi);
  __ pop(rbp);
  __ pop(rbx); // skip this value
  __ pop(rbx);
  __ pop(rdx);
  __ pop(rcx);
  __ addptr(rsp, BytesPerWord);
#endif // _LP64
}
void Runtime1::initialize_pd() {
}
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
  int num_rt_args = has_argument ? 2 : 1;
  OopMap* oop_map = save_live_registers(sasm, num_rt_args);
  __ invalidate_registers(true, true, true, true, true, true);
  const Register temp_reg = rbx;
  if (has_argument) {
#ifdef _LP64
    __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
#else
    __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
    __ push(temp_reg);
#endif // _LP64
  }
  int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
  OopMapSet* oop_maps = new OopMapSet();
  oop_maps->add_gc_map(call_offset, oop_map);
  __ stop("should not reach here");
  return oop_maps;
}
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
  __ block_comment("generate_handle_exception");
  const Register exception_oop = rax;
  const Register exception_pc  = rdx;
  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
  OopMapSet* oop_maps = new OopMapSet();
  OopMap* oop_map = NULL;
  switch (id) {
  case forward_exception_id:
    oop_map = generate_oop_map(sasm, 1 /*thread*/);
    __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
    __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
    __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
    __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
    break;
  case handle_exception_nofpu_id:
  case handle_exception_id:
    oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
    break;
  case handle_exception_from_callee_id: {
    const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
    oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
    sasm->set_frame_size(frame_size);
    WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
    break;
  }
  default:  ShouldNotReachHere();
  }
#ifdef TIERED
  if (UseSSE < 2) {
    __ empty_FPU_stack();
  }
#endif // TIERED
  __ invalidate_registers(false, true, true, false, true, true);
  __ verify_not_null_oop(exception_oop);
  NOT_LP64(__ get_thread(thread);)
#ifdef ASSERT
  Label oop_empty;
  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
  __ jcc(Assembler::equal, oop_empty);
  __ stop("exception oop already set");
  __ bind(oop_empty);
  Label pc_empty;
  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
  __ jcc(Assembler::equal, pc_empty);
  __ stop("exception pc already set");
  __ bind(pc_empty);
#endif
  __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
  __ movptr(Address(thread, JavaThread::exception_pc_offset()),  exception_pc);
  __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
  int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
  oop_maps->add_gc_map(call_offset, oop_map);
  __ invalidate_registers(false, true, true, true, true, true);
  __ movptr(Address(rbp, 1*BytesPerWord), rax);
  switch (id) {
  case forward_exception_id:
  case handle_exception_nofpu_id:
  case handle_exception_id:
    restore_live_registers(sasm, id != handle_exception_nofpu_id);
    break;
  case handle_exception_from_callee_id:
    __ leave();
    __ pop(rcx);
    __ jmp(rcx);  // jump to exception handler
    break;
  default:  ShouldNotReachHere();
  }
  return oop_maps;
}
void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
  const Register exception_oop = rax;
  const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
  const Register exception_pc = rdx;
  const Register handler_addr = rbx;
  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
  __ invalidate_registers(false, true, true, true, true, true);
#ifdef ASSERT
  NOT_LP64(__ get_thread(thread);)
  Label oop_empty;
  __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
  __ jcc(Assembler::equal, oop_empty);
  __ stop("exception oop must be empty");
  __ bind(oop_empty);
  Label pc_empty;
  __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
  __ jcc(Assembler::equal, pc_empty);
  __ stop("exception pc must be empty");
  __ bind(pc_empty);
#endif
  __ empty_FPU_stack();
  __ verify_not_null_oop(exception_oop);
  __ movptr(exception_oop_callee_saved, exception_oop);
  NOT_LP64(__ get_thread(thread);)
  __ movptr(exception_pc, Address(rsp, 0));
  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
  __ invalidate_registers(false, true, true, true, false, true);
  __ movptr(handler_addr, rax);
  __ movptr(exception_oop, exception_oop_callee_saved);
  __ verify_not_null_oop(exception_oop);
  __ pop(exception_pc);
  __ jmp(handler_addr);
}
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
  const int num_rt_args = 2;  // thread + dummy
  DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
  assert(deopt_blob != NULL, "deoptimization blob must have been created");
  OopMap* oop_map = save_live_registers(sasm, num_rt_args);
#ifdef _LP64
  const Register thread = r15_thread;
  __ mov(c_rarg0, thread);
#else
  __ push(rax); // push dummy
  const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
  __ get_thread(thread);
  __ push(thread);
#endif // _LP64
  __ set_last_Java_frame(thread, noreg, rbp, NULL);
  __ call(RuntimeAddress(target));
  OopMapSet* oop_maps = new OopMapSet();
  oop_maps->add_gc_map(__ offset(), oop_map);
#ifdef ASSERT
  guarantee(thread != rax, "change this code");
  __ push(rax);
  { Label L;
    __ get_thread(rax);
    __ cmpptr(thread, rax);
    __ jcc(Assembler::equal, L);
    __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
    __ bind(L);
  }
  __ pop(rax);
#endif
  __ reset_last_Java_frame(thread, true);
#ifndef _LP64
  __ pop(rcx); // discard thread arg
  __ pop(rcx); // discard dummy
#endif // _LP64
  { Label L;
    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ testptr(rax, rax);                                   // have we deoptimized?
    __ jump_cc(Assembler::equal,
               RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
    __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
    __ verify_not_null_oop(rax);
    __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
#ifdef ASSERT
    Label oop_empty;
    __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, oop_empty);
    __ stop("exception oop must be empty");
    __ bind(oop_empty);
    Label pc_empty;
    __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, pc_empty);
    __ stop("exception pc must be empty");
    __ bind(pc_empty);
#endif
    __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
    __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
    restore_live_registers(sasm);
    __ leave();
    __ addptr(rsp, BytesPerWord);  // remove return address from stack
    __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
    __ bind(L);
  }
  Label reexecuteEntry, cont;
  __ testptr(rax, rax);                                 // have we deoptimized?
  __ jcc(Assembler::equal, cont);                       // no
  restore_live_registers(sasm);
  __ leave();
  __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
  __ bind(cont);
  restore_live_registers(sasm);
  __ leave();
  __ ret(0);
  return oop_maps;
}
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
  const bool must_gc_arguments = true;
  const bool dont_gc_arguments = false;
  bool save_fpu_registers = true;
  OopMapSet* oop_maps = NULL;
  switch (id) {
    case forward_exception_id:
      {
        oop_maps = generate_handle_exception(id, sasm);
        __ leave();
        __ ret(0);
      }
      break;
    case new_instance_id:
    case fast_new_instance_id:
    case fast_new_instance_init_check_id:
      {
        Register klass = rdx; // Incoming
        Register obj   = rax; // Result
        if (id == new_instance_id) {
          __ set_info("new_instance", dont_gc_arguments);
        } else if (id == fast_new_instance_id) {
          __ set_info("fast new_instance", dont_gc_arguments);
        } else {
          assert(id == fast_new_instance_init_check_id, "bad StubID");
          __ set_info("fast new_instance init check", dont_gc_arguments);
        }
        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
            UseTLAB && FastTLABRefill) {
          Label slow_path;
          Register obj_size = rcx;
          Register t1       = rbx;
          Register t2       = rsi;
          assert_different_registers(klass, obj, obj_size, t1, t2);
          __ push(rdi);
          __ push(rbx);
          if (id == fast_new_instance_init_check_id) {
            __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
            __ jcc(Assembler::notEqual, slow_path);
          }
#ifdef ASSERT
          {
            Label ok, not_ok;
            __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
            __ cmpl(obj_size, 0);  // make sure it's an instance (LH > 0)
            __ jcc(Assembler::lessEqual, not_ok);
            __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
            __ jcc(Assembler::zero, ok);
            __ bind(not_ok);
            __ stop("assert(can be fast path allocated)");
            __ should_not_reach_here();
            __ bind(ok);
          }
#endif // ASSERT
          Label retry_tlab, try_eden;
          const Register thread =
            __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
          __ bind(retry_tlab);
          __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
          __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
          __ verify_oop(obj);
          __ pop(rbx);
          __ pop(rdi);
          __ ret(0);
          __ bind(try_eden);
          __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
          __ eden_allocate(obj, obj_size, 0, t1, slow_path);
          __ incr_allocated_bytes(thread, obj_size, 0);
          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
          __ verify_oop(obj);
          __ pop(rbx);
          __ pop(rdi);
          __ ret(0);
          __ bind(slow_path);
          __ pop(rbx);
          __ pop(rdi);
        }
        __ enter();
        OopMap* map = save_live_registers(sasm, 2);
        int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers_except_rax(sasm);
        __ verify_oop(obj);
        __ leave();
        __ ret(0);
      }
      break;
    case counter_overflow_id:
      {
        Register bci = rax, method = rbx;
        __ enter();
        OopMap* map = save_live_registers(sasm, 3);
        __ movl(bci, Address(rbp, 2*BytesPerWord));
        __ movptr(method, Address(rbp, 3*BytesPerWord));
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers(sasm);
        __ leave();
        __ ret(0);
      }
      break;
    case new_type_array_id:
    case new_object_array_id:
      {
        Register length   = rbx; // Incoming
        Register klass    = rdx; // Incoming
        Register obj      = rax; // Result
        if (id == new_type_array_id) {
          __ set_info("new_type_array", dont_gc_arguments);
        } else {
          __ set_info("new_object_array", dont_gc_arguments);
        }
#ifdef ASSERT
        {
          Label ok;
          Register t0 = obj;
          __ movl(t0, Address(klass, Klass::layout_helper_offset()));
          __ sarl(t0, Klass::_lh_array_tag_shift);
          int tag = ((id == new_type_array_id)
                     ? Klass::_lh_array_tag_type_value
                     : Klass::_lh_array_tag_obj_value);
          __ cmpl(t0, tag);
          __ jcc(Assembler::equal, ok);
          __ stop("assert(is an array klass)");
          __ should_not_reach_here();
          __ bind(ok);
        }
#endif // ASSERT
        if (UseTLAB && FastTLABRefill) {
          Register arr_size = rsi;
          Register t1       = rcx;  // must be rcx for use as shift count
          Register t2       = rdi;
          Label slow_path;
          assert_different_registers(length, klass, obj, arr_size, t1, t2);
          __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
          __ jcc(Assembler::above, slow_path);
          Label retry_tlab, try_eden;
          const Register thread =
            __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
          __ bind(retry_tlab);
          __ movl(t1, Address(klass, Klass::layout_helper_offset()));
          __ movl(arr_size, length);
          assert(t1 == rcx, "fixed register usage");
          __ shlptr(arr_size /* by t1=rcx, mod 32 */);
          __ shrptr(t1, Klass::_lh_header_size_shift);
          __ andptr(t1, Klass::_lh_header_size_mask);
          __ addptr(arr_size, t1);
          __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
          __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
          __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
          __ initialize_header(obj, klass, length, t1, t2);
          __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
          assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
          assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
          __ andptr(t1, Klass::_lh_header_size_mask);
          __ subptr(arr_size, t1);  // body length
          __ addptr(t1, obj);       // body start
          __ initialize_body(t1, arr_size, 0, t2);
          __ verify_oop(obj);
          __ ret(0);
          __ bind(try_eden);
          __ movl(t1, Address(klass, Klass::layout_helper_offset()));
          __ movl(arr_size, length);
          assert(t1 == rcx, "fixed register usage");
          __ shlptr(arr_size /* by t1=rcx, mod 32 */);
          __ shrptr(t1, Klass::_lh_header_size_shift);
          __ andptr(t1, Klass::_lh_header_size_mask);
          __ addptr(arr_size, t1);
          __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
          __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
          __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
          __ incr_allocated_bytes(thread, arr_size, 0);
          __ initialize_header(obj, klass, length, t1, t2);
          __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
          assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
          assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
          __ andptr(t1, Klass::_lh_header_size_mask);
          __ subptr(arr_size, t1);  // body length
          __ addptr(t1, obj);       // body start
          __ initialize_body(t1, arr_size, 0, t2);
          __ verify_oop(obj);
          __ ret(0);
          __ bind(slow_path);
        }
        __ enter();
        OopMap* map = save_live_registers(sasm, 3);
        int call_offset;
        if (id == new_type_array_id) {
          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
        } else {
          call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
        }
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers_except_rax(sasm);
        __ verify_oop(obj);
        __ leave();
        __ ret(0);
      }
      break;
    case new_multi_array_id:
      { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
        OopMap* map = save_live_registers(sasm, 4);
        int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers_except_rax(sasm);
        __ verify_oop(rax);
      }
      break;
    case register_finalizer_id:
      {
        __ set_info("register_finalizer", dont_gc_arguments);
#ifdef _LP64
        __ verify_oop(c_rarg0);
        __ mov(rax, c_rarg0);
#else
        __ movptr(rax, Address(rsp, 1 * BytesPerWord));
        __ verify_oop(rax);
#endif // _LP64
        Label register_finalizer;
        Register t = rsi;
        __ load_klass(t, rax);
        __ movl(t, Address(t, Klass::access_flags_offset()));
        __ testl(t, JVM_ACC_HAS_FINALIZER);
        __ jcc(Assembler::notZero, register_finalizer);
        __ ret(0);
        __ bind(register_finalizer);
        __ enter();
        OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm);
        __ leave();
        __ ret(0);
      }
      break;
    case throw_range_check_failed_id:
      { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
      }
      break;
    case throw_index_exception_id:
      { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
      }
      break;
    case throw_div0_exception_id:
      { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
      }
      break;
    case throw_null_pointer_exception_id:
      { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
      }
      break;
    case handle_exception_nofpu_id:
    case handle_exception_id:
      { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
        oop_maps = generate_handle_exception(id, sasm);
      }
      break;
    case handle_exception_from_callee_id:
      { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
        oop_maps = generate_handle_exception(id, sasm);
      }
      break;
    case unwind_exception_id:
      { __ set_info("unwind_exception", dont_gc_arguments);
        generate_unwind_exception(sasm);
      }
      break;
    case throw_array_store_exception_id:
      { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
      }
      break;
    case throw_class_cast_exception_id:
      { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
      }
      break;
    case throw_incompatible_class_change_error_id:
      { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
      }
      break;
    case slow_subtype_check_id:
      {
        enum layout {
          rax_off, SLOT2(raxH_off)
          rcx_off, SLOT2(rcxH_off)
          rsi_off, SLOT2(rsiH_off)
          rdi_off, SLOT2(rdiH_off)
          return_off, SLOT2(returnH_off)
          sup_k_off, SLOT2(sup_kH_off)
          klass_off, SLOT2(superH_off)
          framesize,
          result_off = klass_off  // deepest argument is also the return value
        };
        __ set_info("slow_subtype_check", dont_gc_arguments);
        __ push(rdi);
        __ push(rsi);
        __ push(rcx);
        __ push(rax);
        __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
        __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
        Label miss;
        __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
        __ pop(rax);
        __ pop(rcx);
        __ pop(rsi);
        __ pop(rdi);
        __ ret(0);
        __ bind(miss);
        __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
        __ pop(rax);
        __ pop(rcx);
        __ pop(rsi);
        __ pop(rdi);
        __ ret(0);
      }
      break;
    case monitorenter_nofpu_id:
      save_fpu_registers = false;
    case monitorenter_id:
      {
        StubFrame f(sasm, "monitorenter", dont_gc_arguments);
        OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
        f.load_argument(1, rax); // rax,: object
        f.load_argument(0, rbx); // rbx,: lock address
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers(sasm, save_fpu_registers);
      }
      break;
    case monitorexit_nofpu_id:
      save_fpu_registers = false;
    case monitorexit_id:
      {
        StubFrame f(sasm, "monitorexit", dont_gc_arguments);
        OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
        f.load_argument(0, rax); // rax,: lock address
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers(sasm, save_fpu_registers);
      }
      break;
    case deoptimize_id:
      {
        StubFrame f(sasm, "deoptimize", dont_gc_arguments);
        const int num_rt_args = 1;  // thread
        OopMap* oop_map = save_live_registers(sasm, num_rt_args);
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm);
        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
        assert(deopt_blob != NULL, "deoptimization blob must have been created");
        __ leave();
        __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
      }
      break;
    case access_field_patching_id:
      { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
      }
      break;
    case load_klass_patching_id:
      { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
      }
      break;
    case load_mirror_patching_id:
      { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
      }
      break;
    case load_appendix_patching_id:
      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
      }
      break;
    case dtrace_object_alloc_id:
      { // rax,: object
        StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
        save_live_registers(sasm, 1);
        __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
        __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
        NOT_LP64(__ pop(rax));
        restore_live_registers(sasm);
      }
      break;
    case fpu2long_stub_id:
      {
        __ push(rsi);
        __ push(rcx);
        LP64_ONLY(__ push(rdx);)
        Label return0, do_return, return_min_jlong, do_convert;
        Address value_high_word(rsp, wordSize + 4);
        Address value_low_word(rsp, wordSize);
        Address result_high_word(rsp, 3*wordSize + 4);
        Address result_low_word(rsp, 3*wordSize);
        __ subptr(rsp, 32);                    // more than enough on 32bit
        __ fst_d(value_low_word);
        __ movl(rax, value_high_word);
        __ andl(rax, 0x7ff00000);
        __ cmpl(rax, 0x7ff00000);
        __ jcc(Assembler::notEqual, do_convert);
        __ movl(rax, value_high_word);
        __ andl(rax, 0xfffff);
        __ orl(rax, value_low_word);
        __ jcc(Assembler::notZero, return0);
        __ bind(do_convert);
        __ fnstcw(Address(rsp, 0));
        __ movzwl(rax, Address(rsp, 0));
        __ orl(rax, 0xc00);
        __ movw(Address(rsp, 2), rax);
        __ fldcw(Address(rsp, 2));
        __ fwait();
        __ fistp_d(result_low_word);
        __ fldcw(Address(rsp, 0));
        __ fwait();
        __ movptr(rax, result_low_word);
        __ movl(rdx, result_high_word);
        __ mov(rcx, rax);
        __ xorl(rcx, 0x0);
        __ movl(rsi, 0x80000000);
        __ xorl(rsi, rdx);
        __ orl(rcx, rsi);
        __ jcc(Assembler::notEqual, do_return);
        __ fldz();
        __ fcomp_d(value_low_word);
        __ fnstsw_ax();
#ifdef _LP64
        __ testl(rax, 0x4100);  // ZF & CF == 0
        __ jcc(Assembler::equal, return_min_jlong);
#else
        __ sahf();
        __ jcc(Assembler::above, return_min_jlong);
#endif // _LP64
#ifndef _LP64
        __ movl(rdx, 0x7fffffff);
        __ movl(rax, 0xffffffff);
#else
        __ mov64(rax, CONST64(0x7fffffffffffffff));
#endif // _LP64
        __ jmp(do_return);
        __ bind(return_min_jlong);
#ifndef _LP64
        __ movl(rdx, 0x80000000);
        __ xorl(rax, rax);
#else
        __ mov64(rax, CONST64(0x8000000000000000));
#endif // _LP64
        __ jmp(do_return);
        __ bind(return0);
        __ fpop();
#ifndef _LP64
        __ xorptr(rdx,rdx);
        __ xorptr(rax,rax);
#else
        __ xorptr(rax, rax);
#endif // _LP64
        __ bind(do_return);
        __ addptr(rsp, 32);
        LP64_ONLY(__ pop(rdx);)
        __ pop(rcx);
        __ pop(rsi);
        __ ret(0);
      }
      break;
#if INCLUDE_ALL_GCS
    case g1_pre_barrier_slow_id:
      {
        StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
        BarrierSet* bs = Universe::heap()->barrier_set();
        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
          __ movptr(rax, (int)id);
          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
          __ should_not_reach_here();
          break;
        }
        __ push(rax);
        __ push(rdx);
        const Register pre_val = rax;
        const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
        const Register tmp = rdx;
        NOT_LP64(__ get_thread(thread);)
        Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                             PtrQueue::byte_offset_of_active()));
        Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                             PtrQueue::byte_offset_of_index()));
        Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                        PtrQueue::byte_offset_of_buf()));
        Label done;
        Label runtime;
#ifdef _LP64
        __ movslq(tmp, queue_index);
        __ cmpq(tmp, 0);
#else
        __ cmpl(queue_index, 0);
#endif
        __ jcc(Assembler::equal, runtime);
#ifdef _LP64
        __ subq(tmp, wordSize);
        __ movl(queue_index, tmp);
        __ addq(tmp, buffer);
#else
        __ subl(queue_index, wordSize);
        __ movl(tmp, buffer);
        __ addl(tmp, queue_index);
#endif
        f.load_argument(0, pre_val);
        __ movptr(Address(tmp, 0), pre_val);
        __ jmp(done);
        __ bind(runtime);
        save_live_registers(sasm, 3);
        f.load_argument(0, rcx);
        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
        restore_live_registers(sasm);
        __ bind(done);
        __ pop(rdx);
        __ pop(rax);
      }
      break;
    case g1_post_barrier_slow_id:
      {
        StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
        Address store_addr(rbp, 2*BytesPerWord);
        BarrierSet* bs = Universe::heap()->barrier_set();
        CardTableModRefBS* ct = (CardTableModRefBS*)bs;
        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
        Label done;
        Label runtime;
        const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
        Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                             PtrQueue::byte_offset_of_index()));
        Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        PtrQueue::byte_offset_of_buf()));
        __ push(rax);
        __ push(rcx);
        const Register cardtable = rax;
        const Register card_addr = rcx;
        f.load_argument(0, card_addr);
        __ shrptr(card_addr, CardTableModRefBS::card_shift);
        __ movptr(cardtable, (intptr_t)ct->byte_map_base);
        __ addptr(card_addr, cardtable);
        NOT_LP64(__ get_thread(thread);)
        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
        __ jcc(Assembler::equal, done);
        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
        __ jcc(Assembler::equal, done);
        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
        __ cmpl(queue_index, 0);
        __ jcc(Assembler::equal, runtime);
        __ subl(queue_index, wordSize);
        const Register buffer_addr = rbx;
        __ push(rbx);
        __ movptr(buffer_addr, buffer);
#ifdef _LP64
        __ movslq(rscratch1, queue_index);
        __ addptr(buffer_addr, rscratch1);
#else
        __ addptr(buffer_addr, queue_index);
#endif
        __ movptr(Address(buffer_addr, 0), card_addr);
        __ pop(rbx);
        __ jmp(done);
        __ bind(runtime);
        __ push(rdx);
        save_live_registers(sasm, 3);
        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
        restore_live_registers(sasm);
        __ pop(rdx);
        __ bind(done);
        __ pop(rcx);
        __ pop(rax);
      }
      break;
#endif // INCLUDE_ALL_GCS
    case predicate_failed_trap_id:
      {
        StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
        OopMap* map = save_live_registers(sasm, 1);
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, map);
        restore_live_registers(sasm);
        __ leave();
        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
        assert(deopt_blob != NULL, "deoptimization blob must have been created");
        __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
      }
      break;
    default:
      { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
        __ movptr(rax, (int)id);
        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
        __ should_not_reach_here();
      }
      break;
  }
  return oop_maps;
}
#undef __
const char *Runtime1::pd_name_for_address(address entry) {
  return "<unknown function>";
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/c2_globals_x86.hpp
#ifndef CPU_X86_VM_C2_GLOBALS_X86_HPP
#define CPU_X86_VM_C2_GLOBALS_X86_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
define_pd_global(bool, BackgroundCompilation,        true);
define_pd_global(bool, UseTLAB,                      true);
define_pd_global(bool, ResizeTLAB,                   true);
define_pd_global(bool, CICompileOSR,                 true);
define_pd_global(bool, InlineIntrinsics,             true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps,                 true);
define_pd_global(bool, UseOnStackReplacement,        true);
#ifdef CC_INTERP
define_pd_global(bool, ProfileInterpreter,           false);
#else
define_pd_global(bool, ProfileInterpreter,           true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation,            trueInTiered);
define_pd_global(intx, CompileThreshold,             10000);
define_pd_global(intx, BackEdgeThreshold,            100000);
define_pd_global(intx, OnStackReplacePercentage,     140);
define_pd_global(intx, ConditionalMoveLimit,         3);
define_pd_global(intx, FLOATPRESSURE,                6);
define_pd_global(intx, FreqInlineSize,               325);
define_pd_global(intx, MinJumpTableSize,             10);
#ifdef AMD64
define_pd_global(intx, INTPRESSURE,                  13);
define_pd_global(intx, InteriorEntryAlignment,       16);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit,              60);
define_pd_global(intx, InitialCodeCacheSize,         2496*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize,       64*K);
define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
#else
define_pd_global(intx, INTPRESSURE,                  6);
define_pd_global(intx, InteriorEntryAlignment,       4);
define_pd_global(intx, NewSizeThreadIncrease,        4*K);
define_pd_global(intx, LoopUnrollLimit,              50);     // Design center runs on 1.3.1
define_pd_global(intx, InitialCodeCacheSize,         2304*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize,       32*K);
define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
#endif // AMD64
define_pd_global(intx, RegisterCostAreaRatio,        16000);
define_pd_global(bool, OptoPeephole,                 true);
define_pd_global(bool, UseCISCSpill,                 true);
define_pd_global(bool, OptoScheduling,               false);
define_pd_global(bool, OptoBundling,                 false);
define_pd_global(intx, ReservedCodeCacheSize,        48*M);
define_pd_global(uintx, CodeCacheMinBlockLength,     4);
define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
define_pd_global(bool,  TrapBasedRangeChecks,        false); // Not needed on x86.
define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_X86_VM_C2_GLOBALS_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/c2_init_x86.cpp
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
void Compile::pd_compiler2_init() {
  guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
#ifndef AMD64
  if (!VM_Version::supports_cmov()) {
    ConditionalMoveLimit = 0;
  }
#endif // AMD64
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/codeBuffer_x86.hpp
#ifndef CPU_X86_VM_CODEBUFFER_X86_HPP
#define CPU_X86_VM_CODEBUFFER_X86_HPP
private:
  void pd_initialize() {}
public:
  void flush_bundle(bool start_new_bundle) {}
#endif // CPU_X86_VM_CODEBUFFER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/compiledIC_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
  NativeCall* call = nativeCall_at(call_site->addr());
  if (is_icholder_entry(call->destination())) {
    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
  }
}
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
  NativeCall* call = nativeCall_at(call_site->addr());
  return is_icholder_entry(call->destination());
}
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
  address mark = cbuf.insts_mark();  // Get mark within main instrs section.
  MacroAssembler _masm(&cbuf);
  address base = __ start_a_stub(to_interp_stub_size());
  if (base == NULL) {
    return NULL;  // CodeBuffer::expand failed.
  }
  __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
  __ mov_metadata(rbx, (Metadata*) NULL);  // Method is zapped till fixup time.
  __ jump(RuntimeAddress(__ pc()));
  __ end_a_stub();
  return base;
}
#undef __
int CompiledStaticCall::to_interp_stub_size() {
  return NOT_LP64(10)    // movl; jmp
         LP64_ONLY(15);  // movq (1+1+8); jmp (1+4)
}
int CompiledStaticCall::reloc_to_interp_stub() {
  return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
  address stub = find_stub();
  guarantee(stub != NULL, "stub not found");
  if (TraceICs) {
    ResourceMark rm;
    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
                  p2i(instruction_address()),
                  callee->name_and_sig_as_C_string());
  }
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
         "a) MT-unsafe modification of inline cache");
  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
         "b) MT-unsafe modification of inline cache");
  method_holder->set_data((intptr_t)callee());
  jump->set_jump_destination(entry);
  set_destination_mt_safe(stub);
}
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
  address stub = static_stub->addr();
  assert(stub != NULL, "stub not found");
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
  method_holder->set_data(0);
  jump->set_jump_destination((address)-1);
}
#ifndef PRODUCT
void CompiledStaticCall::verify() {
  NativeCall::verify();
  if (os::is_MP()) {
    verify_alignment();
  }
  address stub = find_stub();
  assert(stub != NULL, "no stub found for static call");
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT
C:\hotspot-69087d08d473\src\cpu\x86\vm/copy_x86.hpp
#ifndef CPU_X86_VM_COPY_X86_HPP
#define CPU_X86_VM_COPY_X86_HPP
#ifdef TARGET_OS_ARCH_linux_x86
# include "copy_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "copy_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "copy_windows_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "copy_bsd_x86.inline.hpp"
#endif
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef AMD64
  julong* to = (julong*) tohw;
  julong  v  = ((julong) value << 32) | value;
  while (count-- > 0) {
  }
#else
  juint* to = (juint*)tohw;
  count *= HeapWordSize / BytesPerInt;
  while (count-- > 0) {
  }
#endif // AMD64
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
  pd_fill_to_words(tohw, count, value);
}
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
  (void)memset(to, value, count);
}
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
  pd_fill_to_words(tohw, count, 0);
}
static void pd_zero_to_bytes(void* to, size_t count) {
  (void)memset(to, 0, count);
}
#endif // CPU_X86_VM_COPY_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/cppInterpreterGenerator_x86.hpp
#ifndef CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
#define CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
 protected:
#if 0
  address generate_asm_interpreter_entry(bool synchronized);
  address generate_native_entry(bool synchronized);
  address generate_abstract_entry(void);
  address generate_math_entry(AbstractInterpreter::MethodKind kind);
  address generate_empty_entry(void);
  address generate_accessor_entry(void);
  address generate_Reference_get_entry(void);
  void lock_method(void);
  void generate_stack_overflow_check(void);
  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
  void generate_counter_overflow(Label* do_continue);
#endif
  void generate_more_monitors();
  void generate_deopt_handling();
  address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
  void generate_compute_interpreter_state(const Register state,
                                          const Register prev_state,
                                          const Register sender_sp,
                                          bool native); // C++ interpreter only
#endif // CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/cppInterpreter_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/cppInterpreter.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
#ifdef SHARK
#include "shark/shark_globals.hpp"
#endif
#ifdef CC_INTERP
extern "C" void RecursiveInterpreterActivation(interpreterState istate )
{
  ShouldNotReachHere();
}
#define __ _masm->
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
Label fast_accessor_slow_entry_path;  // fast accessor methods need to be able to jmp to unsynchronized
const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
static address unctrap_frame_manager_entry  = NULL;
static address deopt_frame_manager_return_atos  = NULL;
static address deopt_frame_manager_return_btos  = NULL;
static address deopt_frame_manager_return_itos  = NULL;
static address deopt_frame_manager_return_ltos  = NULL;
static address deopt_frame_manager_return_ftos  = NULL;
static address deopt_frame_manager_return_dtos  = NULL;
static address deopt_frame_manager_return_vtos  = NULL;
int AbstractInterpreter::BasicType_as_index(BasicType type) {
  int i = 0;
  switch (type) {
    case T_BOOLEAN: i = 0; break;
    case T_CHAR   : i = 1; break;
    case T_BYTE   : i = 2; break;
    case T_SHORT  : i = 3; break;
    case T_INT    : i = 4; break;
    case T_VOID   : i = 5; break;
    case T_FLOAT  : i = 8; break;
    case T_LONG   : i = 9; break;
    case T_DOUBLE : i = 6; break;
    case T_OBJECT : // fall through
    case T_ARRAY  : i = 7; break;
    default       : ShouldNotReachHere();
  }
  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
  return i;
}
bool CppInterpreter::contains(address pc)            {
    return (_code->contains(pc) ||
            pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
}
address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
  address entry = __ pc();
  switch (type) {
    case T_BOOLEAN: __ c2bool(rax);            break;
    case T_CHAR   : __ andl(rax, 0xFFFF);      break;
    case T_BYTE   : __ sign_extend_byte (rax); break;
    case T_SHORT  : __ sign_extend_short(rax); break;
    case T_VOID   : // fall thru
    case T_LONG   : // fall thru
    case T_INT    : /* nothing to do */        break;
    case T_DOUBLE :
    case T_FLOAT  :
      {
        const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
        __ pop(t);                            // remove return address first
        if (type == T_FLOAT && UseSSE >= 1) {
#ifndef _LP64
          __ fld_d(Address(rsp, 0));
          __ fstp_s(Address(rsp, 0));
#endif // !_LP64
          __ movflt(xmm0, Address(rsp, 0));
        } else if (type == T_DOUBLE && UseSSE >= 2 ) {
          __ movdbl(xmm0, Address(rsp, 0));
        } else {
          __ fld_d(Address(rsp, 0));
        }
        __ addptr(rsp, 2 * wordSize);
        __ push(t);                            // restore return address
      }
      break;
    case T_OBJECT :
      __ movptr(rax, STATE(_oop_temp));
      __ verify_oop(rax);
      break;
    default       : ShouldNotReachHere();
  }
  __ ret(0);                                   // return from result handler
  return entry;
}
#undef EXTEND  // SHOULD NOT BE NEEDED
address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
  address entry = __ pc();
  const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
  __ pop(t);                            // remove return address first
  switch (type) {
    case T_VOID:
       break;
    case T_BOOLEAN:
#ifdef EXTEND
      __ c2bool(rax);
#endif
      __ push(rax);
      break;
    case T_CHAR   :
#ifdef EXTEND
      __ andl(rax, 0xFFFF);
#endif
      __ push(rax);
      break;
    case T_BYTE   :
#ifdef EXTEND
      __ sign_extend_byte (rax);
#endif
      __ push(rax);
      break;
    case T_SHORT  :
#ifdef EXTEND
      __ sign_extend_short(rax);
#endif
      __ push(rax);
      break;
    case T_LONG    :
      __ push(rdx);                             // pushes useless junk on 64bit
      __ push(rax);
      break;
    case T_INT    :
      __ push(rax);
      break;
    case T_FLOAT  :
      __ subptr(rsp, wordSize);
      if ( UseSSE < 1) {
        __ fstp_s(Address(rsp, 0));
      } else {
        __ movflt(Address(rsp, 0), xmm0);
      }
      break;
    case T_DOUBLE  :
      __ subptr(rsp, 2*wordSize);
      if ( UseSSE < 2 ) {
        __ fstp_d(Address(rsp, 0));
      } else {
        __ movdbl(Address(rsp, 0), xmm0);
      }
      break;
    case T_OBJECT :
      __ verify_oop(rax);                      // verify it
      __ push(rax);
      break;
    default       : ShouldNotReachHere();
  }
  __ jmp(t);                                   // return from result handler
  return entry;
}
address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
  address entry = __ pc();
  const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
  switch (type) {
    case T_VOID:
      __ movptr(rax, STATE(_locals));                                   // pop parameters get new stack value
      __ addptr(rax, wordSize);                                         // account for prepush before we return
      break;
    case T_FLOAT  :
    case T_BOOLEAN:
    case T_CHAR   :
    case T_BYTE   :
    case T_SHORT  :
    case T_INT    :
      __ movptr(rdx, STATE(_stack));
      __ movptr(rax, STATE(_locals));                                   // address for result
      __ movl(rdx, Address(rdx, wordSize));                             // get result
      __ movptr(Address(rax, 0), rdx);                                  // and store it
      break;
    case T_LONG    :
    case T_DOUBLE  :
      __ movptr(rax, STATE(_locals));                                   // address for result
      __ movptr(rcx, STATE(_stack));
      __ subptr(rax, wordSize);                                         // need addition word besides locals[0]
      __ movptr(rdx, Address(rcx, 2*wordSize));                         // get result word (junk in 64bit)
      __ movptr(Address(rax, wordSize), rdx);                           // and store it
      __ movptr(rdx, Address(rcx, wordSize));                           // get result word
      __ movptr(Address(rax, 0), rdx);                                  // and store it
      break;
    case T_OBJECT :
      __ movptr(rdx, STATE(_stack));
      __ movptr(rax, STATE(_locals));                                   // address for result
      __ movptr(rdx, Address(rdx, wordSize));                           // get result
      __ verify_oop(rdx);                                               // verify it
      __ movptr(Address(rax, 0), rdx);                                  // and store it
      break;
    default       : ShouldNotReachHere();
  }
  __ ret(0);
  return entry;
}
address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
  address entry = __ pc();
  switch (type) {
    case T_VOID:
       break;
    case T_BOOLEAN:
    case T_CHAR   :
    case T_BYTE   :
    case T_SHORT  :
    case T_INT    :
      __ movptr(rdx, STATE(_stack));                                    // get top of stack
      __ movl(rax, Address(rdx, wordSize));                             // get result word 1
      break;
    case T_LONG    :
      __ movptr(rdx, STATE(_stack));                                    // get top of stack
      __ movptr(rax, Address(rdx, wordSize));                           // get result low word
      NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));)                 // get result high word
      break;
    case T_FLOAT  :
      __ movptr(rdx, STATE(_stack));                                    // get top of stack
      if ( UseSSE >= 1) {
        __ movflt(xmm0, Address(rdx, wordSize));
      } else {
        __ fld_s(Address(rdx, wordSize));                               // pushd float result
      }
      break;
    case T_DOUBLE  :
      __ movptr(rdx, STATE(_stack));                                    // get top of stack
      if ( UseSSE > 1) {
        __ movdbl(xmm0, Address(rdx, wordSize));
      } else {
        __ fld_d(Address(rdx, wordSize));                               // push double result
      }
      break;
    case T_OBJECT :
      __ movptr(rdx, STATE(_stack));                                    // get top of stack
      __ movptr(rax, Address(rdx, wordSize));                           // get result word 1
      __ verify_oop(rax);                                               // verify it
      break;
    default       : ShouldNotReachHere();
  }
  __ ret(0);
  return entry;
}
address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
  return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
}
address CppInterpreter::deopt_entry(TosState state, int length) {
  address ret = NULL;
  if (length != 0) {
    switch (state) {
      case atos: ret = deopt_frame_manager_return_atos; break;
      case btos: ret = deopt_frame_manager_return_btos; break;
      case ctos:
      case stos:
      case itos: ret = deopt_frame_manager_return_itos; break;
      case ltos: ret = deopt_frame_manager_return_ltos; break;
      case ftos: ret = deopt_frame_manager_return_ftos; break;
      case dtos: ret = deopt_frame_manager_return_dtos; break;
      case vtos: ret = deopt_frame_manager_return_vtos; break;
    }
  } else {
    ret = unctrap_frame_manager_entry;  // re-execute the bytecode ( e.g. uncommon trap)
  }
  assert(ret != NULL, "Not initialized");
  return ret;
}
void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
                                                                 const Register locals,
                                                                 const Register sender_sp,
                                                                 bool native) {
  const Address const_offset      (rbx, Method::const_offset());
  if (!native) {
#ifdef PRODUCT
    __ subptr(rsp, 2*wordSize);
#else /* PRODUCT */
    __ push((int32_t)NULL_WORD);
    __ push(state);                         // make it look like a real argument
#endif /* PRODUCT */
  }
  __ push(rax);
  __ enter();
  __ mov(rax, state);                                  // save current state
  __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
  __ mov(state, rsp);
  __ movptr(STATE(_locals), locals);                    // state->_locals = locals()
  __ movptr(STATE(_self_link), state);                  // point to self
  __ movptr(STATE(_prev_link), rax);                    // state->_link = state on entry (NULL or previous state)
  __ movptr(STATE(_sender_sp), sender_sp);              // state->_sender_sp = sender_sp
#ifdef _LP64
  __ movptr(STATE(_thread), r15_thread);                // state->_bcp = codes()
#else
  __ get_thread(rax);                                   // get vm's javathread*
  __ movptr(STATE(_thread), rax);                       // state->_bcp = codes()
#endif // _LP64
  __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
  __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
  if (native) {
    __ movptr(STATE(_bcp), (int32_t)NULL_WORD);         // state->_bcp = NULL
  } else {
    __ movptr(STATE(_bcp), rdx);                        // state->_bcp = codes()
  }
  __ xorptr(rdx, rdx);
  __ movptr(STATE(_oop_temp), rdx);                     // state->_oop_temp = NULL (only really needed for native)
  __ movptr(STATE(_mdx), rdx);                          // state->_mdx = NULL
  __ movptr(rdx, Address(rbx, Method::const_offset()));
  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
  __ movptr(STATE(_constants), rdx);                    // state->_constants = constants()
  __ movptr(STATE(_method), rbx);                       // state->_method = method()
  __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry);   // state->_msg = initial method entry
  __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
  __ movptr(STATE(_monitor_base), rsp);                 // set monitor block bottom (grows down) this would point to entry [0]
  {
    const Address access_flags      (rbx, Method::access_flags_offset());
    const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
    Label not_synced;
    __ movl(rax, access_flags);
    __ testl(rax, JVM_ACC_SYNCHRONIZED);
    __ jcc(Assembler::zero, not_synced);
    Label done;
    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
    __ movl(rax, access_flags);
    __ testl(rax, JVM_ACC_STATIC);
    __ movptr(rax, Address(locals, 0));                   // get receiver (assume this is frequent case)
    __ jcc(Assembler::zero, done);
    __ movptr(rax, Address(rbx, Method::const_offset()));
    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
    __ movptr(rax, Address(rax, mirror_offset));
    __ bind(done);
    __ subptr(rsp, entry_size);                                           // add space for a monitor entry
    __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
    __ bind(not_synced);
  }
  __ movptr(STATE(_stack_base), rsp);                                     // set expression stack base ( == &monitors[-count])
  if (native) {
    __ movptr(STATE(_stack), rsp);                                        // set current expression stack tos
    __ movptr(STATE(_stack_limit), rsp);
  } else {
    __ subptr(rsp, wordSize);                                             // pre-push stack
    __ movptr(STATE(_stack), rsp);                                        // set current expression stack tos
    __ movptr(rdx, Address(rbx, Method::const_offset()));
    __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
    __ negptr(rdx);                                                       // so we can subtract in next step
    __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words()));
    __ movptr(STATE(_stack_limit), rsp);
  }
#ifdef _LP64
  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
#endif // _LP64
}
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
  Label done;
  const Address invocation_counter(rax,
                MethodCounters::invocation_counter_offset() +
                InvocationCounter::counter_offset());
  const Address backedge_counter  (rax,
                MethodCounter::backedge_counter_offset() +
                InvocationCounter::counter_offset());
  __ get_method_counters(rbx, rax, done);
  if (ProfileInterpreter) {
    __ incrementl(Address(rax,
            MethodCounters::interpreter_invocation_counter_offset()));
  }
  __ movl(rcx, invocation_counter);
  __ increment(rcx, InvocationCounter::count_increment);
  __ movl(invocation_counter, rcx);             // save invocation count
  __ movl(rax, backedge_counter);               // load backedge counter
  __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
  __ addl(rcx, rax);                            // add both counters
  __ cmp32(rcx,
           ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
  __ jcc(Assembler::aboveEqual, *overflow);
  __ bind(done);
}
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
  __ movptr(rax, (int32_t)false);
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));                               // restore state
  __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method)));            // restore method
  __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals)));            // get locals pointer
  __ jmp(*do_continue, relocInfo::none);
}
void InterpreterGenerator::generate_stack_overflow_check(void) {
  const int entry_size    = frame::interpreter_frame_monitor_size() * wordSize;
  const int overhead_size = (int)sizeof(BytecodeInterpreter);
  const int page_size = os::vm_page_size();
  Label after_frame_check;
  Label after_frame_check_pop;
  __ push(state);
  const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
  NOT_LP64(__ get_thread(thread));
  const Address stack_base(thread, Thread::stack_base_offset());
  const Address stack_size(thread, Thread::stack_size_offset());
  const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
  __ movptr(rax, Address(rbx, Method::const_offset()));
  __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
  __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words()));
  __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
#ifdef ASSERT
  Label stack_base_okay, stack_size_okay;
  __ cmpptr(stack_base, (int32_t)0);
  __ jcc(Assembler::notEqual, stack_base_okay);
  __ stop("stack base is zero");
  __ bind(stack_base_okay);
  __ cmpptr(stack_size, (int32_t)0);
  __ jcc(Assembler::notEqual, stack_size_okay);
  __ stop("stack size is zero");
  __ bind(stack_size_okay);
#endif
  __ addptr(rax, stack_base);
  __ subptr(rax, stack_size);
  const int slop = 6 * K;
  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
                                                                              (StackRedPages+StackYellowPages);
  __ addptr(rax, slop + 2*max_pages * page_size);
  __ cmpptr(rsp, rax);
  __ jcc(Assembler::above, after_frame_check_pop);
  __ pop(state);  //  get c++ prev state.
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
  __ bind(after_frame_check_pop);
  __ pop(state);
  __ bind(after_frame_check);
}
void InterpreterGenerator::lock_method(void) {
  const int entry_size            = frame::interpreter_frame_monitor_size() * wordSize;
  const Address access_flags      (rbx, Method::access_flags_offset());
  const Register monitor  = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
  __ movptr(monitor, STATE(_monitor_base));                                   // get monitor bottom limit
  __ subptr(monitor, entry_size);                                             // point to initial monitor
#ifdef ASSERT
  { Label L;
    __ movl(rax, access_flags);
    __ testl(rax, JVM_ACC_SYNCHRONIZED);
    __ jcc(Assembler::notZero, L);
    __ stop("method doesn't need synchronization");
    __ bind(L);
  }
#endif // ASSERT
  { Label done;
    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
    __ movl(rax, access_flags);
    __ movptr(rdi, STATE(_locals));                                     // prepare to get receiver (assume common case)
    __ testl(rax, JVM_ACC_STATIC);
    __ movptr(rax, Address(rdi, 0));                                    // get receiver (assume this is frequent case)
    __ jcc(Assembler::zero, done);
    __ movptr(rax, Address(rbx, Method::const_offset()));
    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
    __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
    __ movptr(rax, Address(rax, mirror_offset));
    __ bind(done);
  }
#ifdef ASSERT
  { Label L;
    __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));   // correct object?
    __ jcc(Assembler::equal, L);
    __ stop("wrong synchronization lobject");
    __ bind(L);
  }
#endif // ASSERT
  __ lock_object(monitor);
}
address InterpreterGenerator::generate_accessor_entry(void) {
  Label xreturn_path;
  if (UseFastAccessorMethods) {
    address entry_point = __ pc();
    Label slow_path;
    ExternalAddress state(SafepointSynchronize::address_of_state());
    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);
    __ jcc(Assembler::notEqual, slow_path);
    __ movptr(rax, Address(rsp, wordSize));
    __ testptr(rax, rax);
    __ jcc(Assembler::zero, slow_path);
    __ movptr(rdx, Address(rbx, Method::const_offset()));
    __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
    __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
    __ shrl(rdx, 2*BitsPerByte);
    __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
    __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
    assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
    __ movl(rcx,
            Address(rdi,
                    rdx,
                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
    __ shrl(rcx, 2*BitsPerByte);
    __ andl(rcx, 0xFF);
    __ cmpl(rcx, Bytecodes::_getfield);
    __ jcc(Assembler::notEqual, slow_path);
    __ movptr(rcx,
            Address(rdi,
                    rdx,
                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
    __ movl(rdx,
            Address(rdi,
                    rdx,
                    Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
    Label notByte, notBool, notShort, notChar;
    const Address field_address (rax, rcx, Address::times_1);
    __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
    ConstantPoolCacheEntry::verify_tos_state_shift();
#ifdef _LP64
    Label notObj;
    __ cmpl(rdx, atos);
    __ jcc(Assembler::notEqual, notObj);
    __ movptr(rax, field_address);
    __ jmp(xreturn_path);
    __ bind(notObj);
#endif // _LP64
    __ cmpl(rdx, ztos);
    __ jcc(Assembler::notEqual, notBool);
    __ load_signed_byte(rax, field_address);
    __ jmp(xreturn_path);
    __ cmpl(rdx, btos);
    __ jcc(Assembler::notEqual, notByte);
    __ load_signed_byte(rax, field_address);
    __ jmp(xreturn_path);
    __ bind(notByte);
    __ cmpl(rdx, stos);
    __ jcc(Assembler::notEqual, notShort);
    __ load_signed_short(rax, field_address);
    __ jmp(xreturn_path);
    __ bind(notShort);
    __ cmpl(rdx, ctos);
    __ jcc(Assembler::notEqual, notChar);
    __ load_unsigned_short(rax, field_address);
    __ jmp(xreturn_path);
    __ bind(notChar);
#ifdef ASSERT
    Label okay;
#ifndef _LP64
    __ cmpl(rdx, atos);
    __ jcc(Assembler::equal, okay);
#endif // _LP64
    __ cmpl(rdx, itos);
    __ jcc(Assembler::equal, okay);
    __ stop("what type is this?");
    __ bind(okay);
#endif // ASSERT
    __ movl(rax, field_address);
    __ bind(xreturn_path);
    __ pop(rdi);                               // get return address
    __ mov(rsp, sender_sp_on_entry);           // set sp to sender sp
    __ jmp(rdi);
    __ bind(slow_path);
    __ jmp(fast_accessor_slow_entry_path);
    return entry_point;
  } else {
    return NULL;
  }
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
  if (UseG1GC) {
    Unimplemented();
  }
#endif // INCLUDE_ALL_GCS
  return generate_accessor_entry();
}
address InterpreterGenerator::generate_native_entry(bool synchronized) {
  bool inc_counter  = UseCompiler || CountCompiledCalls;
  address entry_point = __ pc();
  const Address constMethod       (rbx, Method::const_offset());
  const Address access_flags      (rbx, Method::access_flags_offset());
  const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
  const Register locals = rdi;
  __ movptr(rcx, constMethod);
  __ load_unsigned_short(rcx, size_of_parameters);
  __ pop(rax);                                       // get return address
  __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
  __ mov(rcx, sender_sp_on_entry);
  __ movptr(state, (int32_t)NULL_WORD);
  generate_compute_interpreter_state(state, locals, rcx, true);
#ifdef ASSERT
  { Label L;
    __ movptr(rax, STATE(_stack_base));
#ifdef _LP64
    __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
    __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
#endif // _LP64
    __ cmpptr(rax, rsp);
    __ jcc(Assembler::equal, L);
    __ stop("broken stack frame setup in interpreter");
    __ bind(L);
  }
#endif
  const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
  NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
  const Address do_not_unlock_if_synchronized(unlock_thread,
        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
  __ movbool(do_not_unlock_if_synchronized, true);
#ifdef ASSERT
  __ movl(rax, access_flags);
  {
    Label L;
    __ testl(rax, JVM_ACC_NATIVE);
    __ jcc(Assembler::notZero, L);
    __ stop("tried to execute non-native method as native");
    __ bind(L);
  }
  { Label L;
    __ testl(rax, JVM_ACC_ABSTRACT);
    __ jcc(Assembler::zero, L);
    __ stop("tried to execute abstract method in interpreter");
    __ bind(L);
  }
#endif
  Label invocation_counter_overflow;
  if (inc_counter) {
    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
  }
  Label continue_after_compile;
  __ bind(continue_after_compile);
  bang_stack_shadow_pages(true);
  NOT_LP64(__ movl(rax, STATE(_thread));)                       // get thread
  __ movbool(do_not_unlock_if_synchronized, false);
  if (synchronized) {
    lock_method();
  } else {
#ifdef ASSERT
      { Label L;
        __ movl(rax, access_flags);
        __ testl(rax, JVM_ACC_SYNCHRONIZED);
        __ jcc(Assembler::zero, L);
        __ stop("method needs synchronization");
        __ bind(L);
      }
#endif
  }
  __ notify_method_entry();
  const Register method = rbx;
  const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
  const Register t      = InterpreterRuntime::SignatureHandlerGenerator::temp();    // rcx|rscratch1
  const Address constMethod       (method, Method::const_offset());
  const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
  __ movptr(method, STATE(_method));
  __ verify_method_ptr(method);
  __ movptr(t, constMethod);
  __ load_unsigned_short(t, size_of_parameters);
  __ shll(t, 2);
#ifdef _LP64
  __ subptr(rsp, t);
  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
#else
  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
  __ subptr(rsp, t);
  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
#endif // _LP64
    Label pending_exception_present;
  { Label L;
    __ movptr(t, Address(method, Method::signature_handler_offset()));
    __ testptr(t, t);
    __ jcc(Assembler::notZero, L);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
    __ movptr(method, STATE(_method));
    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notEqual, pending_exception_present);
    __ verify_method_ptr(method);
    __ movptr(t, Address(method, Method::signature_handler_offset()));
    __ bind(L);
  }
#ifdef ASSERT
  {
    Label L;
    __ push(t);
    __ get_thread(t);                                   // get vm's javathread*
    __ cmpptr(t, STATE(_thread));
    __ jcc(Assembler::equal, L);
    __ int3();
    __ bind(L);
    __ pop(t);
  }
#endif //
  const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
  assert(InterpreterRuntime::SignatureHandlerGenerator::to  () == rsp, "adjust this code");
  __ movptr(from_ptr, STATE(_locals));  // get the from pointer
  __ call(t);
  __ movptr(method, STATE(_method));
  __ verify_method_ptr(method);
  __ movptr(STATE(_result_handler), rax);
  { Label L;
    __ movptr(rax, Address(method, Method::native_function_offset()));
    __ testptr(rax, rax);
    __ jcc(Assembler::notZero, L);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
    __ movptr(method, STATE(_method));
    __ verify_method_ptr(method);
    __ movptr(rax, Address(method, Method::native_function_offset()));
    __ bind(L);
  }
  { Label L;
    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
    __ movl(t, Address(method, Method::access_flags_offset()));
    __ testl(t, JVM_ACC_STATIC);
    __ jcc(Assembler::zero, L);
    __ movptr(t, Address(method, Method:: const_offset()));
    __ movptr(t, Address(t, ConstMethod::constants_offset()));
    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
    __ movptr(t, Address(t, mirror_offset));
    __ movptr(STATE(_oop_temp), t);
#ifdef _LP64
    __ lea(c_rarg1, STATE(_oop_temp));
#else
    __ lea(t, STATE(_oop_temp));
    __ movptr(Address(rsp, wordSize), t);
#endif // _LP64
    __ bind(L);
  }
#ifdef ASSERT
  {
    Label L;
    __ push(t);
    __ get_thread(t);                                   // get vm's javathread*
    __ cmpptr(t, STATE(_thread));
    __ jcc(Assembler::equal, L);
    __ int3();
    __ bind(L);
    __ pop(t);
  }
#endif //
#ifdef _LP64
  __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
#else
  __ movptr(thread, STATE(_thread));          // get thread
  __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
  __ movptr(Address(rsp, 0), t);
#endif // _LP64
#ifdef ASSERT
  {
    Label L;
    __ push(t);
    __ get_thread(t);                                   // get vm's javathread*
    __ cmpptr(t, STATE(_thread));
    __ jcc(Assembler::equal, L);
    __ int3();
    __ bind(L);
    __ pop(t);
  }
#endif //
#ifdef ASSERT
  { Label L;
    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
    __ cmpl(t, _thread_in_Java);
    __ jcc(Assembler::equal, L);
    __ stop("Wrong thread state in native stub");
    __ bind(L);
  }
#endif
  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
  __ call(rax);
  __ movptr(method, STATE(_method));
  NOT_LP64(__ movptr(thread, STATE(_thread));)                  // get thread
    { Label Lpush, Lskip;
      ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
      ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
      __ cmpptr(STATE(_result_handler), float_handler.addr());
      __ jcc(Assembler::equal, Lpush);
      __ cmpptr(STATE(_result_handler), double_handler.addr());
      __ jcc(Assembler::notEqual, Lskip);
      __ bind(Lpush);
      __ subptr(rsp, 2*wordSize);
      if ( UseSSE < 2 ) {
        __ fstp_d(Address(rsp, 0));
      } else {
        __ movdbl(Address(rsp, 0), xmm0);
      }
      __ bind(Lskip);
    }
  __ push(rax);
#ifndef _LP64
  __ push(rdx);
#endif // _LP64
  __ restore_cpu_control_state_after_jni();
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
  if(os::is_MP()) {
    __ serialize_memory(thread, rcx);
  }
  { Label Continue;
    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);
    Label L;
    __ jcc(Assembler::notEqual, L);
    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
    __ jcc(Assembler::equal, Continue);
    __ bind(L);
    ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
                          thread);
    __ increment(rsp, wordSize);
    __ movptr(method, STATE(_method));
    __ verify_method_ptr(method);
    __ movptr(thread, STATE(_thread));                       // get thread
    __ bind(Continue);
  }
  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
  __ reset_last_Java_frame(thread, true, true);
  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
  { Label L;
    Label no_oop, store_result;
      ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
    __ cmpptr(STATE(_result_handler), oop_handler.addr());
    __ jcc(Assembler::notEqual, no_oop);
#ifndef _LP64
    __ pop(rdx);
#endif // _LP64
    __ pop(rax);
    __ testptr(rax, rax);
    __ jcc(Assembler::zero, store_result);
    __ movptr(rax, Address(rax, 0));
    __ bind(store_result);
    __ movptr(STATE(_oop_temp), rax);
    __ push(rax);
#ifndef _LP64
    __ push(rdx);
#endif // _LP64
    __ bind(no_oop);
  }
  {
     Label no_reguard;
     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
     __ jcc(Assembler::notEqual, no_reguard);
     __ pusha();
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
     __ popa();
     __ bind(no_reguard);
   }
  { Label L;
    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::zero, L);
    __ bind(pending_exception_present);
    __ movptr(t, STATE(_sender_sp));
    __ leave();                                  // remove frame anchor
    __ pop(rdi);                                 // get return address
    __ movptr(state, STATE(_prev_link));         // get previous state for return
    __ mov(rsp, t);                              // set sp to sender sp
    __ push(rdi);                                // push throwing pc
    __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
    __ bind(L);
  }
  { Label L;
    __ movl(t, Address(method, Method::access_flags_offset()));
    __ testl(t, JVM_ACC_SYNCHRONIZED);
    __ jcc(Assembler::zero, L);
    { Label unlock;
    const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
      __ movptr(monitor, STATE(_monitor_base));
      __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize);  // address of initial monitor
      __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
      __ testptr(t, t);
      __ jcc(Assembler::notZero, unlock);
      __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
      __ should_not_reach_here();
      __ bind(unlock);
      __ unlock_object(monitor);
      __ movptr(method, STATE(_method));
    }
    __ bind(L);
  }
  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
#ifndef _LP64
  __ pop(rdx);
#endif // _LP64
  __ pop(rax);
  __ movptr(t, STATE(_result_handler));       // get result handler
  __ call(t);                                 // call result handler to convert to tosca form
  __ movptr(t, STATE(_sender_sp));
  __ leave();                                  // remove frame anchor
  __ pop(rdi);                                 // get return address
  __ movptr(state, STATE(_prev_link));         // get previous state for return (if c++ interpreter was caller)
  __ mov(rsp, t);                              // set sp to sender sp
  __ jmp(rdi);
  if (inc_counter) {
    __ bind(invocation_counter_overflow);
    generate_counter_overflow(&continue_after_compile);
  }
  return entry_point;
}
void CppInterpreterGenerator::generate_deopt_handling() {
  Label return_from_deopt_common;
  deopt_frame_manager_return_atos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_btos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_itos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_ltos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_ftos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_dtos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE));    // Result stub address array index
  __ jmp(return_from_deopt_common);
  deopt_frame_manager_return_vtos  = __ pc();
  __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
  __ bind(return_from_deopt_common);
  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
  __ movptr(rsp, STATE(_stack));                                   // trim stack (is prepushed)
  __ addptr(rsp, wordSize);                                        // undo prepush
  ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
  __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
  __ call(rcx);                                                   // call result converter
  __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
  __ lea(rsp, Address(rsp, -wordSize));                            // prepush stack (result if any already present)
  __ movptr(STATE(_stack), rsp);                                   // inform interpreter of new stack depth (parameters removed,
  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
}
void CppInterpreterGenerator::generate_more_monitors() {
  Label entry, loop;
  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
  __ movptr(rdx, STATE(_stack_base));            // rdx: old expression stack bottom
  __ subptr(rsp, entry_size);                    // move expression stack top limit
  __ subptr(STATE(_stack), entry_size);          // update interpreter stack top
  __ subptr(STATE(_stack_limit), entry_size);    // inform interpreter
  __ subptr(rdx, entry_size);                    // move expression stack bottom
  __ movptr(STATE(_stack_base), rdx);            // inform interpreter
  __ movptr(rcx, STATE(_stack));                 // set start value for copy loop
  __ jmp(entry);
  __ bind(loop);
  __ movptr(rbx, Address(rcx, entry_size));      // load expression stack word from old location
  __ movptr(Address(rcx, 0), rbx);               // and store it at new location
  __ addptr(rcx, wordSize);                      // advance to next word
  __ bind(entry);
  __ cmpptr(rcx, rdx);                           // check if bottom reached
  __ jcc(Assembler::notEqual, loop);             // if not at bottom then copy next word
  __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
  __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
}
static address interpreter_frame_manager = NULL;
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
  if (interpreter_frame_manager) return interpreter_frame_manager;
  address entry_point = __ pc();
  if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
  Label dispatch_entry_2;
  __ movptr(rcx, sender_sp_on_entry);
  __ movptr(state, (int32_t)NULL_WORD);                              // no current activation
  __ jmp(dispatch_entry_2);
  const Register locals  = rdi;
  Label re_dispatch;
  __ bind(re_dispatch);
  __ lea(rcx, Address(rsp, wordSize));
  __ bind(dispatch_entry_2);
  __ push(rcx);
  const Address constMethod       (rbx, Method::const_offset());
  const Address access_flags      (rbx, Method::access_flags_offset());
  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
  __ movptr(rdx, constMethod);
  __ load_unsigned_short(rcx, size_of_parameters);
  __ load_unsigned_short(rdx, size_of_locals);                     // get size of locals in words
  __ subptr(rdx, rcx);                                             // rdx = no. of additional locals
  generate_stack_overflow_check();                                 // C++
  bang_stack_shadow_pages(false);
  __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
  __ pop(rcx);
  __ pop(rax);
  {
    Label exit, loop;
    __ testl(rdx, rdx);                               // (32bit ok)
    __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
    __ bind(loop);
    __ push((int32_t)NULL_WORD);                      // initialize local variables
    __ decrement(rdx);                                // until everything initialized
    __ jcc(Assembler::greater, loop);
    __ bind(exit);
  }
  generate_compute_interpreter_state(state, locals, rcx, false);
  Label call_interpreter;
  __ bind(call_interpreter);
  bang_stack_shadow_pages(false);
  Label call_interpreter_2;
  __ bind(call_interpreter_2);
  {
    const Register thread  = NOT_LP64(rcx) LP64_ONLY(r15_thread);
#ifdef _LP64
    __ mov(c_rarg0, state);
#else
    __ push(state);                                                 // push arg to interpreter
    __ movptr(thread, STATE(_thread));
#endif // _LP64
    __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
    __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
    RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
    RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
    __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
    NOT_LP64(__ pop(rax);)                                          // discard parameter to run
    NOT_LP64(__ movl(thread, STATE(_thread));)
    __ reset_last_Java_frame(thread, true, true);
  }
  __ movl(rdx, STATE(_msg));                                       // Get new message
  Label call_method;
  Label return_from_interpreted_method;
  Label throw_exception;
  Label bad_msg;
  Label do_OSR;
  __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
  __ jcc(Assembler::equal, call_method);
  __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
  __ jcc(Assembler::equal, return_from_interpreted_method);
  __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
  __ jcc(Assembler::equal, do_OSR);
  __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
  __ jcc(Assembler::equal, throw_exception);
  __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
  __ jcc(Assembler::notEqual, bad_msg);
  generate_more_monitors();
  __ jmp(call_interpreter);
  unctrap_frame_manager_entry  = __ pc();
  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
  __ jmp(call_interpreter_2);
  generate_deopt_handling();
  __ jmp(call_interpreter);
  Interpreter::_rethrow_exception_entry = __ pc();
  Label return_with_exception;
  Label unwind_and_forward;
  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
  __ movptr(rbx, STATE(_method));                       // get method
#ifdef _LP64
  __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
#else
  __ movl(rcx, STATE(_thread));                       // get thread
  __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
#endif // _LP64
  __ movl(rdx, access_flags);
  __ testl(rdx, JVM_ACC_NATIVE);
  __ jcc(Assembler::zero, return_with_exception);     // vanilla interpreted frame, handle directly
  __ bind(unwind_and_forward);
  __ movptr(rcx, STATE(_sender_sp));
  __ leave();
  __ pop(rdx);
  __ mov(rsp, rcx);
  __ push(rdx);
  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
  Label resume_interpreter;
  Label do_float;
  Label do_double;
  Label done_conv;
  if (UseSSE < 2) {
    __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
    __ movptr(rbx, STATE(_result._to_call._callee));                   // get method just executed
    __ movl(rcx, Address(rbx, Method::result_index_offset()));
    __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT));    // Result stub address array index
    __ jcc(Assembler::equal, do_float);
    __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE));    // Result stub address array index
    __ jcc(Assembler::equal, do_double);
#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
    __ empty_FPU_stack();
#endif // COMPILER2
    __ jmp(done_conv);
    __ bind(do_float);
#ifdef COMPILER2
    for (int i = 1; i < 8; i++) {
      __ ffree(i);
    }
#endif // COMPILER2
    __ jmp(done_conv);
    __ bind(do_double);
#ifdef COMPILER2
    for (int i = 1; i < 8; i++) {
      __ ffree(i);
    }
#endif // COMPILER2
    __ jmp(done_conv);
  } else {
    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
    __ jmp(done_conv);
  }
  InternalAddress return_from_native_method(__ pc());
  __ bind(done_conv);
  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
  __ movptr(rsp, STATE(_stack));
  __ lea(rsp, Address(rsp, wordSize));
#ifdef _LP64
  __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
#else
  __ movptr(rcx, STATE(_thread));                       // get thread
  __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
#endif // _LP64
  __ jcc(Assembler::notZero, return_with_exception);
  __ movptr(rbx, STATE(_result._to_call._callee));
  __ movptr(rcx, constMethod);
  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
  __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
  __ movl(rcx, Address(rbx, Method::result_index_offset()));
  ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
  __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
  __ call(rcx);                                               // call result converter
  __ jmp(resume_interpreter);
  __ bind(return_with_exception);
  __ movptr(rsp, STATE(_stack_base));
  __ jmp(resume_interpreter);
  __ bind(return_from_interpreted_method);
  Label return_to_initial_caller;
  __ movptr(rbx, STATE(_method));                                   // get method just executed
  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);                 // returning from "recursive" interpreter call?
  __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
  __ jcc(Assembler::equal, return_to_initial_caller);               // back to native code (call_stub/c1/c2)
  ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
  __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
  __ call(rax);                                                     // call result converter
  Label unwind_recursive_activation;
  __ bind(unwind_recursive_activation);
  __ movptr(state, STATE(_prev_link));                              // unwind state
  __ leave();                                                       // pop the frame
  __ mov(rsp, rax);                                                 // unwind stack to remove args
  __ bind(resume_interpreter);
  __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
  __ lea(rsp, Address(rsp, -wordSize));                            // prepush stack (result if any already present)
  __ movptr(STATE(_stack), rsp);                                   // inform interpreter of new stack depth (parameters removed,
  __ movptr(rsp, STATE(_stack_limit));                             // restore expression stack to full depth
  __ jmp(call_interpreter_2);                                      // No need to bang
  __ bind(return_to_initial_caller);
  ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
  __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
  __ call(rax);                                                    // call result converter
  Label unwind_initial_activation;
  __ bind(unwind_initial_activation);
        [ incoming parameters ]
        [ extra locals ]
        [ return address to CALL_STUB/C1/C2]
  fp -> [ CALL_STUB/C1/C2 fp ]
        BytecodeInterpreter object
        expression stack
  sp ->
  __ movptr(rcx, STATE(_sender_sp));
  __ leave();
  __ pop(rdi);                                                        // get return address
  __ mov(rsp, rcx);
  __ jmp(rdi);                                                        // return to call_stub
  __ bind(do_OSR);
  Label remove_initial_frame;
  __ movptr(rcx, STATE(_result._osr._osr_buf));
  __ movptr(rax, STATE(_result._osr._osr_entry));
  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);            // returning from "recursive" interpreter call?
  __ jcc(Assembler::equal, remove_initial_frame);              // back to native code (call_stub/c1/c2)
  __ movptr(sender_sp_on_entry, STATE(_sender_sp));            // get sender's sp in expected register
  __ leave();                                                  // pop the frame
  __ mov(rsp, sender_sp_on_entry);                             // trim any stack expansion
  __ pushptr(return_from_native_method.addr());
  __ jmp(rax);
  __ bind(remove_initial_frame);
  __ movptr(rdx, STATE(_sender_sp));
  __ leave();
  __ pop(rsi);
  __ mov(rsp, rdx);
  __ push(rsi);
  __ jmp(rax);
  __ bind(call_method);
  __ movptr(rsp, STATE(_stack));                                     // pop args to c++ interpreter, set sp to java stack top
  __ lea(rsp, Address(rsp, wordSize));
  __ movptr(rbx, STATE(_result._to_call._callee));                   // get method to execute
  __ movptr(rcx, constMethod);
  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
  ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
  __ pushptr(recursive.addr());                                      // make it look good in the debugger
  InternalAddress entry(entry_point);
  __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
  __ jcc(Assembler::equal, re_dispatch);                             // yes
  __ pop(rax);                                                       // pop dummy address
  __ movptr(rax, STATE(_result._to_call._callee_entry_point));
  __ mov(sender_sp_on_entry, rsp);
  __ pushptr(return_from_native_method.addr());
  __ jmp(rax);
  __ bind(bad_msg);
  __ stop("Bad message from interpreter");
  Label unwind_initial_with_pending_exception;
  __ bind(throw_exception);
  __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD);                 // returning from recursive interpreter call?
  __ jcc(Assembler::equal, unwind_initial_with_pending_exception);  // no, back to native code (call_stub/c1/c2)
  __ movptr(rax, STATE(_locals));                                   // pop parameters get new stack value
  __ addptr(rax, wordSize);                                         // account for prepush before we return
  __ jmp(unwind_recursive_activation);
  __ bind(unwind_initial_with_pending_exception);
  __ jmp(unwind_and_forward);
  interpreter_frame_manager = entry_point;
  return entry_point;
}
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
  bool synchronized = false;
  address entry_point = NULL;
  switch (kind) {
    case Interpreter::zerolocals             :                                                                             break;
    case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
    case Interpreter::native                 : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false);  break;
    case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true);   break;
    case Interpreter::empty                  : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry();        break;
    case Interpreter::accessor               : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();     break;
    case Interpreter::abstract               : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();     break;
    case Interpreter::method_handle          : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
    case Interpreter::java_lang_math_sin     : // fall thru
    case Interpreter::java_lang_math_cos     : // fall thru
    case Interpreter::java_lang_math_tan     : // fall thru
    case Interpreter::java_lang_math_abs     : // fall thru
    case Interpreter::java_lang_math_log     : // fall thru
    case Interpreter::java_lang_math_log10   : // fall thru
    case Interpreter::java_lang_math_sqrt    : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind);     break;
    case Interpreter::java_lang_ref_reference_get
                                             : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
    default                                  : ShouldNotReachHere();                                                       break;
  }
  if (entry_point) return entry_point;
  return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
 : CppInterpreterGenerator(code) {
   generate_all(); // down here so it can be "virtual"
}
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
  const int stub_code = 4;  // see generate_call_stub
  int monitor_size    = method->is_synchronized() ?
                                1*frame::interpreter_frame_monitor_size() : 0;
  const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
    ( frame::sender_sp_offset - frame::link_offset) + 2;
  const int method_stack = (method->max_locals() + method->max_stack()) *
                           Interpreter::stackElementWords;
  return overhead_size + method_stack + stub_code;
}
static int size_activation_helper(int extra_locals_size, int monitor_size) {
  return (extra_locals_size +                  // the addition space for locals
          2*BytesPerWord +                     // return address and saved rbp
          2*BytesPerWord +                     // "static long no_params() method" issue
          sizeof(BytecodeInterpreter) +               // interpreterState
          monitor_size);                       // monitors
}
void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
                                           frame* caller,
                                           frame* current,
                                           Method* method,
                                           intptr_t* locals,
                                           intptr_t* stack,
                                           intptr_t* stack_base,
                                           intptr_t* monitor_base,
                                           intptr_t* frame_bottom,
                                           bool is_top_frame
                                           )
{
  to_fill->_thread = JavaThread::current();
  to_fill->_bcp = method->code_base();
  to_fill->_locals = locals;
  to_fill->_constants = method->constants()->cache();
  to_fill->_method = method;
  to_fill->_mdx = NULL;
  to_fill->_stack = stack;
  if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
    to_fill->_msg = deopt_resume2;
  } else {
    to_fill->_msg = method_resume;
  }
  to_fill->_result._to_call._bcp_advance = 0;
  to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
  to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
  to_fill->_prev_link = NULL;
  to_fill->_sender_sp = caller->unextended_sp();
  if (caller->is_interpreted_frame()) {
    interpreterState prev  = caller->get_interpreterState();
    to_fill->_prev_link = prev;
    prev->_result._to_call._callee = method;
    if (*prev->_bcp == Bytecodes::_invokeinterface) {
      prev->_result._to_call._bcp_advance = 5;
    } else {
      prev->_result._to_call._bcp_advance = 3;
    }
  }
  to_fill->_oop_temp = NULL;
  to_fill->_stack_base = stack_base;
  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
  to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
  to_fill->_self_link = to_fill;
  assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
         "Stack top out of range");
}
static int frame_size_helper(int max_stack,
                             int tempcount,
                             int moncount,
                             int callee_param_count,
                             int callee_locals,
                             bool is_top_frame,
                             int& monitor_size,
                             int& full_frame_size) {
  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
  monitor_size = sizeof(BasicObjectLock) * moncount;
  int short_frame_size = size_activation_helper(extra_locals_size,
                                                monitor_size);
  full_frame_size = short_frame_size + max_stack * BytesPerWord;
  short_frame_size = short_frame_size + tempcount * BytesPerWord;
  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
  return frame_size;
}
int AbstractInterpreter::size_activation(int max_stack,
                                         int tempcount,
                                         int extra_args,
                                         int moncount,
                                         int callee_param_count,
                                         int callee_locals,
                                         bool is_top_frame) {
  assert(extra_args == 0, "FIX ME");
  int unused_monitor_size = 0;
  int unused_full_frame_size = 0;
  return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
                           is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
}
void AbstractInterpreter::layout_activation(Method* method,
                                            int tempcount,  //
                                            int popframe_extra_args,
                                            int moncount,
                                            int caller_actual_parameters,
                                            int callee_param_count,
                                            int callee_locals,
                                            frame* caller,
                                            frame* interpreter_frame,
                                            bool is_top_frame,
                                            bool is_bottom_frame) {
  assert(popframe_extra_args == 0, "FIX ME");
  int monitor_size = 0;
  int full_frame_size = 0;
  int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
                                     is_top_frame, monitor_size, full_frame_size);
#ifdef ASSERT
  assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
#endif
  intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
  intptr_t* locals;
  if (caller->is_interpreted_frame()) {
    interpreterState prev  = caller->get_interpreterState();
    locals = prev->stack() + method->size_of_parameters();
    if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
    }
  } else {
    locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
  }
  intptr_t* monitor_base = (intptr_t*) cur_state;
  intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
  BytecodeInterpreter::layout_interpreterState(cur_state,
                                               caller,
                                               interpreter_frame,
                                               method,
                                               locals,
                                               stack,
                                               stack_base,
                                               monitor_base,
                                               frame_bottom,
                                               is_top_frame);
}
#endif // CC_INTERP (all)
C:\hotspot-69087d08d473\src\cpu\x86\vm/cppInterpreter_x86.hpp
#ifndef CPU_X86_VM_CPPINTERPRETER_X86_HPP
#define CPU_X86_VM_CPPINTERPRETER_X86_HPP
  protected:
  const static int InterpreterCodeSize = 168 * 1024;
#endif // CPU_X86_VM_CPPINTERPRETER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/debug_x86.cpp
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "runtime/frame.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/top.hpp"
void pd_ps(frame f) {}
C:\hotspot-69087d08d473\src\cpu\x86\vm/depChecker_x86.cpp
#include "precompiled.hpp"
#include "compiler/disassembler.hpp"
#include "depChecker_x86.hpp"
C:\hotspot-69087d08d473\src\cpu\x86\vm/depChecker_x86.hpp
#ifndef CPU_X86_VM_DEPCHECKER_X86_HPP
#define CPU_X86_VM_DEPCHECKER_X86_HPP
#endif // CPU_X86_VM_DEPCHECKER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/disassembler_x86.hpp
#ifndef CPU_X86_VM_DISASSEMBLER_X86_HPP
#define CPU_X86_VM_DISASSEMBLER_X86_HPP
  static int pd_instruction_alignment() {
    return 1;
  }
  static const char* pd_cpu_opts() {
    return "";
  }
#endif // CPU_X86_VM_DISASSEMBLER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/frame_x86.cpp
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/os.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_x86.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#include "runtime/vframeArray.hpp"
#endif
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
bool frame::safe_for_sender(JavaThread *thread) {
  address   sp = (address)_sp;
  address   fp = (address)_fp;
  address   unextended_sp = (address)_unextended_sp;
  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
  bool sp_safe = (sp < thread->stack_base()) &&
                 (sp >= thread->stack_base() - usable_stack_size);
  if (!sp_safe) {
    return false;
  }
  bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
                            (unextended_sp >= sp);
  if (!unextended_sp_safe) {
    return false;
  }
  bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
  if (_cb != NULL ) {
    if (!_cb->is_frame_complete_at(_pc)) {
      if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
        return false;
      }
    }
    if (!_cb->code_contains(_pc)) {
      return false;
    }
    if (is_entry_frame()) {
      if (!fp_safe) return false;
      address jcw = (address)entry_frame_call_wrapper();
      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
      return jcw_safe;
    }
    intptr_t* sender_sp = NULL;
    address   sender_pc = NULL;
    if (is_interpreted_frame()) {
      if (!fp_safe) {
        return false;
      }
      sender_pc = (address) this->fp()[return_addr_offset];
      sender_sp = (intptr_t*) addr_at(sender_sp_offset);
    } else {
      if (_cb->frame_size() <= 0) {
        return false;
      }
      sender_sp = _unextended_sp + _cb->frame_size();
      sender_pc = (address) *(sender_sp-1);
    }
    if (Interpreter::contains(sender_pc)) {
      intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
      if (!saved_fp_safe) {
        return false;
      }
      frame sender(sender_sp, saved_fp, sender_pc);
      return sender.is_interpreted_frame_valid(thread);
    }
    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
    if (sender_pc == NULL ||  sender_blob == NULL) {
      return false;
    }
    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
      return false;
    }
    if (!sender_blob->code_contains(sender_pc)) {
      return false;
    }
    if (sender_blob->is_adapter_blob()) {
      return false;
    }
    if (StubRoutines::returns_to_call_stub(sender_pc)) {
      intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
      if (!saved_fp_safe) {
        return false;
      }
      frame sender(sender_sp, saved_fp, sender_pc);
      address jcw = (address)sender.entry_frame_call_wrapper();
      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
      return jcw_safe;
    }
    if (sender_blob->is_nmethod()) {
        nmethod* nm = sender_blob->as_nmethod_or_null();
        if (nm != NULL) {
            if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
                nm->method()->is_method_handle_intrinsic()) {
                return false;
            }
        }
    }
    if (sender_blob->frame_size() <= 0) {
      assert(!sender_blob->is_nmethod(), "should count return address at least");
      return false;
    }
    if (!sender_blob->is_nmethod()) {
        return false;
    }
    return true;
  }
  if (!fp_safe) {
    return false;
  }
  if ( (address) this->fp()[return_addr_offset] == NULL) return false;
  return true;
}
void frame::patch_pc(Thread* thread, address pc) {
  address* pc_addr = &(((address*) sp())[-1]);
  if (TracePcPatching) {
    tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
                  pc_addr, *pc_addr, pc);
  }
  assert(_pc == *pc_addr || pc == *pc_addr, "must be");
  _cb = CodeCache::find_blob(pc);
  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    assert(original_pc == _pc, "expected original PC to be stored before patching");
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
    _pc = pc;
  }
}
bool frame::is_interpreted_frame() const  {
  return Interpreter::contains(pc());
}
int frame::frame_size(RegisterMap* map) const {
  frame sender = this->sender(map);
  return sender.sp() - sp();
}
intptr_t* frame::entry_frame_argument_at(int offset) const {
  int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
  return &unextended_sp()[index];
}
#ifdef CC_INTERP
intptr_t* frame::interpreter_frame_sender_sp() const {
  assert(is_interpreted_frame(), "interpreted frame expected");
  return get_interpreterState()->sender_sp();
}
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
  return get_interpreterState()->monitor_base();
}
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
  return (BasicObjectLock*) get_interpreterState()->stack_base();
}
#else // CC_INTERP
intptr_t* frame::interpreter_frame_sender_sp() const {
  assert(is_interpreted_frame(), "interpreted frame expected");
  return (intptr_t*) at(interpreter_frame_sender_sp_offset);
}
void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
  assert(is_interpreted_frame(), "interpreted frame expected");
  ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
}
BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
  return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
}
BasicObjectLock* frame::interpreter_frame_monitor_end() const {
  BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
  assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
  assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
  return result;
}
void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
}
void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
}
#endif // CC_INTERP
frame frame::sender_for_entry_frame(RegisterMap* map) const {
  assert(map != NULL, "map must be set");
  JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
  assert(!entry_frame_is_first(), "next Java fp must be non zero");
  assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
  if (!jfa->walkable()) {
    jfa->capture_last_Java_pc();
  }
  map->clear();
  assert(map->include_argument_oops(), "should be set by clear");
  assert(jfa->last_Java_pc() != NULL, "not walkable");
  frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
  return fr;
}
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
  frame fr;
  fr._unextended_sp = unextended_sp;
  address original_pc = nm->get_original_pc(&fr);
  assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
}
#endif
void frame::adjust_unextended_sp() {
  nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
  if (sender_nm != NULL) {
    if (sender_nm->is_deopt_entry(_pc) ||
        sender_nm->is_deopt_mh_entry(_pc)) {
      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
    }
  }
}
void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
  map->set_location(rbp->as_VMReg(), (address) link_addr);
#ifdef AMD64
  if (true) {
    map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
  }
#endif // AMD64
}
frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
  intptr_t* sender_sp = this->sender_sp();
  intptr_t* unextended_sp = interpreter_frame_sender_sp();
#ifdef COMPILER2
  if (map->update_map()) {
    update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
  }
#endif // COMPILER2
  return frame(sender_sp, unextended_sp, link(), sender_pc());
}
frame frame::sender_for_compiled_frame(RegisterMap* map) const {
  assert(map != NULL, "map must be set");
  assert(_cb->frame_size() >= 0, "must have non-zero frame size");
  intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
  intptr_t* unextended_sp = sender_sp;
  address sender_pc = (address) *(sender_sp-1);
  intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
  if (map->update_map()) {
    map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
    if (_cb->oop_maps() != NULL) {
      OopMapSet::update_register_map(this, map);
    }
    update_map_with_saved_link(map, saved_fp_addr);
  }
  assert(sender_sp != sp(), "must have changed");
  return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
}
frame frame::sender(RegisterMap* map) const {
  map->set_include_argument_oops(false);
  if (is_entry_frame())       return sender_for_entry_frame(map);
  if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
  assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
  if (_cb != NULL) {
    return sender_for_compiled_frame(map);
  }
  return frame(sender_sp(), link(), sender_pc());
}
bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
  assert(is_interpreted_frame(), "must be interpreter frame");
  Method* method = interpreter_frame_method();
  int diff = (method->max_locals() - method->size_of_parameters()) *
             Interpreter::stackElementWords;
  return _fp == (fp - diff);
}
void frame::pd_gc_epilog() {
}
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
#ifdef CC_INTERP
#else
  assert(is_interpreted_frame(), "Not an interpreted frame");
  if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
    return false;
  }
  if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
    return false;
  }
  if (fp() + interpreter_frame_initial_sp_offset < sp()) {
    return false;
  }
  if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
    return false;
  }
  Method* m = *interpreter_frame_method_addr();
  if (!m->is_valid_method()) return false;
  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
    return false;
  }
  intptr_t  bcx    = interpreter_frame_bcx();
  if (m->validate_bci_from_bcx(bcx) < 0) {
    return false;
  }
  ConstantPoolCache* cp = *interpreter_frame_cache_addr();
  if (cp == NULL || !cp->is_metaspace_object()) return false;
  address locals =  (address) *interpreter_frame_locals_addr();
  if (locals > thread->stack_base() || locals < (address) fp()) return false;
#endif // CC_INTERP
  return true;
}
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
#ifdef CC_INTERP
  interpreterState istate = get_interpreterState();
#endif // CC_INTERP
  assert(is_interpreted_frame(), "interpreted frame expected");
  Method* method = interpreter_frame_method();
  BasicType type = method->result_type();
  intptr_t* tos_addr;
  if (method->is_native()) {
    tos_addr = (intptr_t*)sp();
    if (type == T_FLOAT || type == T_DOUBLE) {
#ifdef AMD64
      tos_addr += 2 * Interpreter::stackElementWords;
#else
      tos_addr += 2;
#endif // AMD64
    }
  } else {
    tos_addr = (intptr_t*)interpreter_frame_tos_address();
  }
  switch (type) {
    case T_OBJECT  :
    case T_ARRAY   : {
      oop obj;
      if (method->is_native()) {
#ifdef CC_INTERP
        obj = istate->_oop_temp;
#else
        obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
#endif // CC_INTERP
      } else {
        oop* obj_p = (oop*)tos_addr;
        obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
      }
      assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
      break;
    }
    case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
    case T_BYTE    : value_result->b = *(jbyte*)tos_addr; break;
    case T_CHAR    : value_result->c = *(jchar*)tos_addr; break;
    case T_SHORT   : value_result->s = *(jshort*)tos_addr; break;
    case T_INT     : value_result->i = *(jint*)tos_addr; break;
    case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
    case T_FLOAT   : {
#ifdef AMD64
        value_result->f = *(jfloat*)tos_addr;
#else
      if (method->is_native()) {
        jdouble d = *(jdouble*)tos_addr;  // Result was in ST0 so need to convert to jfloat
        value_result->f = (jfloat)d;
      } else {
        value_result->f = *(jfloat*)tos_addr;
      }
#endif // AMD64
      break;
    }
    case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
    case T_VOID    : /* Nothing to do */ break;
    default        : ShouldNotReachHere();
  }
  return type;
}
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
  int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
  return &interpreter_frame_tos_address()[index];
}
#ifndef PRODUCT
#define DESCRIBE_FP_OFFSET(name) \
  values.describe(frame_no, fp() + frame::name##_offset, #name)
void frame::describe_pd(FrameValues& values, int frame_no) {
  if (is_interpreted_frame()) {
    DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
    DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
    DESCRIBE_FP_OFFSET(interpreter_frame_method);
    DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
    DESCRIBE_FP_OFFSET(interpreter_frame_cache);
    DESCRIBE_FP_OFFSET(interpreter_frame_locals);
    DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
    DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
  }
}
#endif
intptr_t *frame::initial_deoptimization_info() {
  return fp();
}
intptr_t* frame::real_fp() const {
  if (_cb != NULL) {
    int size = _cb->frame_size();
    if (size > 0) {
      return unextended_sp() + size;
    }
  }
  assert(! is_compiled_frame(), "unknown compiled frame size");
  return fp();
}
#ifndef PRODUCT
frame::frame(void* sp, void* fp, void* pc) {
  init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
}
#endif
void JavaFrameAnchor::make_walkable(JavaThread* thread) {
  if (last_Java_sp() == NULL) return;
  if (walkable()) return;
  assert(Thread::current() == (Thread*)thread, "not current thread");
  assert(last_Java_sp() != NULL, "not called from Java code?");
  assert(last_Java_pc() == NULL, "already walkable");
  capture_last_Java_pc();
  assert(walkable(), "something went wrong");
}
void JavaFrameAnchor::capture_last_Java_pc() {
  assert(_last_Java_sp != NULL, "no last frame set");
  assert(_last_Java_pc == NULL, "already walkable");
  _last_Java_pc = (address)_last_Java_sp[-1];
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/frame_x86.hpp
#ifndef CPU_X86_VM_FRAME_X86_HPP
#define CPU_X86_VM_FRAME_X86_HPP
#include "runtime/synchronizer.hpp"
#include "utilities/top.hpp"
 public:
  enum {
    pc_return_offset                                 =  0,
    link_offset                                      =  0,
    return_addr_offset                               =  1,
    sender_sp_offset                                 =  2,
#ifndef CC_INTERP
    interpreter_frame_result_handler_offset          =  3, // for native calls only
    interpreter_frame_oop_temp_offset                =  2, // for native calls only
    interpreter_frame_sender_sp_offset               = -1,
    interpreter_frame_last_sp_offset                 = interpreter_frame_sender_sp_offset - 1,
    interpreter_frame_method_offset                  = interpreter_frame_last_sp_offset - 1,
    interpreter_frame_mdx_offset                     = interpreter_frame_method_offset - 1,
    interpreter_frame_cache_offset                   = interpreter_frame_mdx_offset - 1,
    interpreter_frame_locals_offset                  = interpreter_frame_cache_offset - 1,
    interpreter_frame_bcx_offset                     = interpreter_frame_locals_offset - 1,
    interpreter_frame_initial_sp_offset              = interpreter_frame_bcx_offset - 1,
    interpreter_frame_monitor_block_top_offset       = interpreter_frame_initial_sp_offset,
    interpreter_frame_monitor_block_bottom_offset    = interpreter_frame_initial_sp_offset,
#endif // CC_INTERP
#ifdef AMD64
#ifdef _WIN64
    entry_frame_after_call_words                     =  28,
    entry_frame_call_wrapper_offset                  =  2,
    arg_reg_save_area_bytes                          = 32, // Register argument save area
#else
    entry_frame_after_call_words                     = 13,
    entry_frame_call_wrapper_offset                  = -6,
    arg_reg_save_area_bytes                          =  0,
#endif // _WIN64
#else
    entry_frame_call_wrapper_offset                  =  2,
#endif // AMD64
    native_frame_initial_param_offset                =  2
  };
  intptr_t ptr_at(int offset) const {
    return *ptr_at_addr(offset);
  }
  void ptr_at_put(int offset, intptr_t value) {
  }
 private:
  intptr_t*   _fp; // frame pointer
  intptr_t*     _unextended_sp;
  void adjust_unextended_sp();
  intptr_t* ptr_at_addr(int offset) const {
    return (intptr_t*) addr_at(offset);
  }
#ifdef ASSERT
  static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
#endif
 public:
  frame(intptr_t* sp, intptr_t* fp, address pc);
  frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
  frame(intptr_t* sp, intptr_t* fp);
  void init(intptr_t* sp, intptr_t* fp, address pc);
  intptr_t*   fp() const { return _fp; }
  inline address* sender_pc_addr() const;
  inline address* native_param_addr(int idx) const;
  intptr_t* interpreter_frame_last_sp() const;
  static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
#ifndef CC_INTERP
  void interpreter_frame_set_last_sp(intptr_t* sp);
#endif // CC_INTERP
#ifdef CC_INTERP
  inline interpreterState get_interpreterState() const;
#endif // CC_INTERP
#endif // CPU_X86_VM_FRAME_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/frame_x86.inline.hpp
#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
#define CPU_X86_VM_FRAME_X86_INLINE_HPP
#include "code/codeCache.hpp"
inline frame::frame() {
  _pc = NULL;
  _sp = NULL;
  _unextended_sp = NULL;
  _fp = NULL;
  _cb = NULL;
  _deopt_state = unknown;
}
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
  _sp = sp;
  _unextended_sp = sp;
  _fp = fp;
  _pc = pc;
  assert(pc != NULL, "no pc?");
  _cb = CodeCache::find_blob(pc);
  adjust_unextended_sp();
  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
  init(sp, fp, pc);
}
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
  _sp = sp;
  _unextended_sp = unextended_sp;
  _fp = fp;
  _pc = pc;
  assert(pc != NULL, "no pc?");
  _cb = CodeCache::find_blob(pc);
  adjust_unextended_sp();
  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}
inline frame::frame(intptr_t* sp, intptr_t* fp) {
  _sp = sp;
  _unextended_sp = sp;
  _fp = fp;
  _pc = (address)(sp[-1]);
  _cb = CodeCache::find_blob(_pc);
  adjust_unextended_sp();
  address original_pc = nmethod::get_deopt_original_pc(this);
  if (original_pc != NULL) {
    _pc = original_pc;
    _deopt_state = is_deoptimized;
  } else {
    _deopt_state = not_deoptimized;
  }
}
inline bool frame::equal(frame other) const {
  bool ret =  sp() == other.sp()
              && unextended_sp() == other.unextended_sp()
              && fp() == other.fp()
              && pc() == other.pc();
  assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
  return ret;
}
inline intptr_t* frame::id(void) const { return unextended_sp(); }
inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                    return this->id() < id ; }
inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
                                                    return this->id() > id ; }
inline intptr_t* frame::link() const              { return (intptr_t*) *(intptr_t **)addr_at(link_offset); }
inline void      frame::set_link(intptr_t* addr)  { *(intptr_t **)addr_at(link_offset) = addr; }
inline intptr_t* frame::unextended_sp() const     { return _unextended_sp; }
inline address* frame::sender_pc_addr()      const { return (address*) addr_at( return_addr_offset); }
inline address  frame::sender_pc()           const { return *sender_pc_addr(); }
inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); }
#ifdef CC_INTERP
inline interpreterState frame::get_interpreterState() const {
  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
}
inline intptr_t*    frame::sender_sp()        const {
  if (is_interpreted_frame()) {
    assert(false, "should never happen");
    return get_interpreterState()->sender_sp();
  } else {
    return            addr_at(sender_sp_offset);
  }
}
inline intptr_t** frame::interpreter_frame_locals_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_locals);
}
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return (intptr_t*) &(get_interpreterState()->_bcp);
}
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_constants);
}
inline Method** frame::interpreter_frame_method_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return &(get_interpreterState()->_method);
}
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
  assert(is_interpreted_frame(), "must be interpreted");
  return (intptr_t*) &(get_interpreterState()->_mdx);
}
inline intptr_t* frame::interpreter_frame_tos_address() const {
  assert(is_interpreted_frame(), "wrong frame type");
  return get_interpreterState()->_stack + 1;
}
#else /* asm interpreter */
inline intptr_t*    frame::sender_sp()        const { return            addr_at(   sender_sp_offset); }
inline intptr_t** frame::interpreter_frame_locals_addr() const {
  return (intptr_t**)addr_at(interpreter_frame_locals_offset);
}
inline intptr_t* frame::interpreter_frame_last_sp() const {
  return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset);
}
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
  return (intptr_t*)addr_at(interpreter_frame_bcx_offset);
}
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
  return (intptr_t*)addr_at(interpreter_frame_mdx_offset);
}
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
  return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset);
}
inline Method** frame::interpreter_frame_method_addr() const {
  return (Method**)addr_at(interpreter_frame_method_offset);
}
inline intptr_t* frame::interpreter_frame_tos_address() const {
  intptr_t* last_sp = interpreter_frame_last_sp();
  if (last_sp == NULL) {
    return sp();
  } else {
    assert(last_sp <= (intptr_t*) interpreter_frame_monitor_end(), "bad tos");
    return last_sp;
  }
}
inline oop* frame::interpreter_frame_temp_oop_addr() const {
  return (oop *)(fp() + interpreter_frame_oop_temp_offset);
}
#endif /* CC_INTERP */
inline int frame::pd_oop_map_offset_adjustment() const {
  return 0;
}
inline int frame::interpreter_frame_monitor_size() {
  return BasicObjectLock::size();
}
inline intptr_t* frame::interpreter_frame_expression_stack() const {
  intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end();
  return monitor_end-1;
}
inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
 return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
}
inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
  return (nof_args - local_index + (local_index < nof_args ? 1: -1));
}
inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
  return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
}
inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
  return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
}
inline bool frame::volatile_across_calls(Register reg) {
  return true;
}
inline oop frame::saved_oop_result(RegisterMap* map) const {
  oop* result_adr = (oop *)map->location(rax->as_VMReg());
  guarantee(result_adr != NULL, "bad register save location");
  return (*result_adr);
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
  oop* result_adr = (oop *)map->location(rax->as_VMReg());
  guarantee(result_adr != NULL, "bad register save location");
}
#endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/globalDefinitions_x86.hpp
#ifndef CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
#define CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
const int StackAlignmentInBytes  = 16;
const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORTS_NATIVE_CX8
#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/globals_x86.hpp
#ifndef CPU_X86_VM_GLOBALS_X86_HPP
#define CPU_X86_VM_GLOBALS_X86_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
define_pd_global(bool, ConvertSleepToYield,      true);
define_pd_global(bool, CountInterpCalls,         true);
define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks,      false); // Not needed on x86.
define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs passed to check cast
#ifdef COMPILER2
define_pd_global(intx, CodeEntryAlignment,       32);
#else
define_pd_global(intx, CodeEntryAlignment,       16);
#endif // COMPILER2
define_pd_global(intx, OptoLoopAlignment,        16);
define_pd_global(intx, InlineFrequencyCount,     100);
define_pd_global(intx, InlineSmallCode,          1000);
define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
define_pd_global(intx, StackRedPages, 1);
#ifdef AMD64
define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
#else
define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
#endif // AMD64
define_pd_global(intx, PreInflateSpin,           10);
define_pd_global(bool, RewriteBytecodes,     true);
define_pd_global(bool, RewriteFrequentPairs, true);
#ifdef _ALLBSD_SOURCE
define_pd_global(bool, UseMembar,            true);
#else
define_pd_global(bool, UseMembar,            false);
#endif
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, PreserveFramePointer, false);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
                                                                            \
  develop(bool, IEEEPrecision, true,                                        \
          "Enables IEEE precision (for INTEL only)")                        \
                                                                            \
  product(intx, FenceInstruction, 0,                                        \
          "(Unsafe,Unstable) Experimental")                                 \
                                                                            \
  product(intx,  ReadPrefetchInstr, 0,                                      \
          "Prefetch instruction to prefetch ahead")                         \
                                                                            \
  product(bool, UseStoreImmI16, true,                                       \
          "Use store immediate 16-bits value instruction on x86")           \
                                                                            \
  product(intx, UseAVX, 99,                                                 \
          "Highest supported AVX instructions set on x86/x64")              \
                                                                            \
  product(bool, UseCLMUL, false,                                            \
          "Control whether CLMUL instructions can be used on x86/x64")      \
                                                                            \
  diagnostic(bool, UseIncDec, true,                                         \
          "Use INC, DEC instructions on x86")                               \
                                                                            \
  product(bool, UseNewLongLShift, false,                                    \
          "Use optimized bitwise shift left")                               \
                                                                            \
  product(bool, UseAddressNop, false,                                       \
          "Use '0F 1F [addr]' NOP instructions on x86 cpus")                \
                                                                            \
  product(bool, UseXmmLoadAndClearUpper, true,                              \
          "Load low part of XMM register and clear upper part")             \
                                                                            \
  product(bool, UseXmmRegToRegMoveAll, false,                               \
          "Copy all XMM register bits when moving value between registers") \
                                                                            \
  product(bool, UseXmmI2D, false,                                           \
          "Use SSE2 CVTDQ2PD instruction to convert Integer to Double")     \
                                                                            \
  product(bool, UseXmmI2F, false,                                           \
          "Use SSE2 CVTDQ2PS instruction to convert Integer to Float")      \
                                                                            \
  product(bool, UseUnalignedLoadStores, false,                              \
          "Use SSE2 MOVDQU instruction for Arraycopy")                      \
                                                                            \
  product(bool, UseFastStosb, false,                                        \
          "Use fast-string operation for zeroing: rep stosb")               \
                                                                            \
  product(bool, UseRTMLocking, false,                                       \
          "Enable RTM lock eliding for inflated locks in compiled code")    \
                                                                            \
  experimental(bool, UseRTMForStackLocks, false,                            \
          "Enable RTM lock eliding for stack locks in compiled code")       \
                                                                            \
  product(bool, UseRTMDeopt, false,                                         \
          "Perform deopt and recompilation based on RTM abort ratio")       \
                                                                            \
  product(uintx, RTMRetryCount, 5,                                          \
          "Number of RTM retries on lock abort or busy")                    \
                                                                            \
  experimental(intx, RTMSpinLoopCount, 100,                                 \
          "Spin count for lock to become free before RTM retry")            \
                                                                            \
  experimental(intx, RTMAbortThreshold, 1000,                               \
          "Calculate abort ratio after this number of aborts")              \
                                                                            \
  experimental(intx, RTMLockingThreshold, 10000,                            \
          "Lock count at which to do RTM lock eliding without "             \
          "abort ratio calculation")                                        \
                                                                            \
  experimental(intx, RTMAbortRatio, 50,                                     \
          "Lock abort ratio at which to stop use RTM lock eliding")         \
                                                                            \
  experimental(intx, RTMTotalCountIncrRate, 64,                             \
          "Increment total RTM attempted lock count once every n times")    \
                                                                            \
  experimental(intx, RTMLockingCalculationDelay, 0,                         \
          "Number of milliseconds to wait before start calculating aborts " \
          "for RTM locking")                                                \
                                                                            \
  experimental(bool, UseRTMXendForLockBusy, true,                           \
          "Use RTM Xend instead of Xabort when lock busy")                  \
                                                                            \
  product(bool, Use486InstrsOnly, false,                                    \
          "Use 80486 Compliant instruction subset")                         \
                                                                            \
  product(bool, UseCountLeadingZerosInstruction, false,                     \
          "Use count leading zeros instruction")                            \
                                                                            \
  product(bool, UseCountTrailingZerosInstruction, false,                    \
          "Use count trailing zeros instruction")                           \
                                                                            \
  product(bool, UseBMI1Instructions, false,                                 \
          "Use BMI1 instructions")                                          \
                                                                            \
  product(bool, UseBMI2Instructions, false,                                 \
          "Use BMI2 instructions")
#endif // CPU_X86_VM_GLOBALS_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/icache_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/icache.hpp"
#define __ _masm->
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
  StubCodeMark mark(this, "ICache", "flush_icache_stub");
  address start = __ pc();
#ifdef AMD64
  const Register addr  = c_rarg0;
  const Register lines = c_rarg1;
  const Register magic = c_rarg2;
  Label flush_line, done;
  __ testl(lines, lines);
  __ jcc(Assembler::zero, done);
  __ mfence();
  __ bind(flush_line);
  __ clflush(Address(addr, 0));
  __ addptr(addr, ICache::line_size);
  __ decrementl(lines);
  __ jcc(Assembler::notZero, flush_line);
  __ mfence();
  __ bind(done);
#else
  const Address magic(rsp, 3*wordSize);
  __ lock(); __ addl(Address(rsp, 0), 0);
#endif // AMD64
  __ movptr(rax, magic); // Handshake with caller to make sure it happened!
  __ ret(0);
}
#undef __
C:\hotspot-69087d08d473\src\cpu\x86\vm/icache_x86.hpp
#ifndef CPU_X86_VM_ICACHE_X86_HPP
#define CPU_X86_VM_ICACHE_X86_HPP
class ICache : public AbstractICache {
 public:
#ifdef AMD64
  enum {
    stub_size      = 64, // Size of the icache flush stub in bytes
    line_size      = 64, // Icache line size in bytes
    log2_line_size = 6   // log2(line_size)
  };
#else
  enum {
    stub_size      = 16,                 // Size of the icache flush stub in bytes
    line_size      = BytesPerWord,      // conservative
    log2_line_size = LogBytesPerWord    // log2(line_size)
  };
#endif // AMD64
};
#endif // CPU_X86_VM_ICACHE_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/icBuffer_x86.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
  return NativeMovConstReg::instruction_size +
         NativeJump::instruction_size +
         1;
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
  ResourceMark rm;
  CodeBuffer      code(code_begin, ic_stub_code_size());
  MacroAssembler* masm            = new MacroAssembler(&code);
  masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
  masm->jump(ExternalAddress(entry_point));
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);   // creation also verifies the object
  NativeJump*        jump = nativeJump_at(move->next_instruction_address());
  return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
  NativeJump*        jump = nativeJump_at(move->next_instruction_address());
  void* o = (void*)move->data();
  return o;
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreterGenerator_x86.hpp
#ifndef CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
#define CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
  friend class AbstractInterpreterGenerator;
 private:
  address generate_normal_entry(bool synchronized);
  address generate_native_entry(bool synchronized);
  address generate_abstract_entry(void);
  address generate_math_entry(AbstractInterpreter::MethodKind kind);
  address generate_empty_entry(void);
  address generate_accessor_entry(void);
  address generate_Reference_get_entry();
  address generate_CRC32_update_entry();
  address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
  void lock_method(void);
  void generate_stack_overflow_check(void);
  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
  void generate_counter_overflow(Label* do_continue);
#endif // CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreterRT_x86.hpp
#ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP
#define CPU_X86_VM_INTERPRETERRT_X86_HPP
#include "memory/allocation.hpp"
class SignatureHandlerGenerator: public NativeSignatureIterator {
 private:
  MacroAssembler* _masm;
#ifdef AMD64
#ifdef _WIN64
  unsigned int _num_args;
#else
  unsigned int _num_fp_args;
  unsigned int _num_int_args;
#endif // _WIN64
  int _stack_offset;
#else
  void move(int from_offset, int to_offset);
  void box(int from_offset, int to_offset);
#endif // AMD64
  void pass_int();
  void pass_long();
  void pass_float();
#ifdef AMD64
  void pass_double();
#endif // AMD64
  void pass_object();
 public:
  SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
    _masm = new MacroAssembler(buffer);
#ifdef AMD64
#ifdef _WIN64
    _num_args = (method->is_static() ? 1 : 0);
    _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address
#else
    _num_int_args = (method->is_static() ? 1 : 0);
    _num_fp_args = 0;
    _stack_offset = wordSize; // don't overwrite return address
#endif // _WIN64
#endif // AMD64
  }
  void generate(uint64_t fingerprint);
  static Register from();
  static Register to();
  static Register temp();
};
#endif // CPU_X86_VM_INTERPRETERRT_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreterRT_x86_32.cpp
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
  move(offset(), jni_offset() + 1);
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
  move(offset(), jni_offset() + 1);
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
   move(offset(), jni_offset() + 2);
   move(offset() + 1, jni_offset() + 1);
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
  box (offset(), jni_offset() + 1);
}
void InterpreterRuntime::SignatureHandlerGenerator::move(int from_offset, int to_offset) {
  __ movl(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset)));
  __ movl(Address(to(), to_offset * wordSize), temp());
}
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) {
  __ lea(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset)));
  __ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI
  Label L;
  __ jcc(Assembler::notZero, L);
  __ movptr(temp(), NULL_WORD);
  __ bind(L);
  __ movptr(Address(to(), to_offset * wordSize), temp());
}
void InterpreterRuntime::SignatureHandlerGenerator::generate( uint64_t fingerprint) {
  iterate(fingerprint);
  __ lea(rax,
         ExternalAddress((address)Interpreter::result_handler(method()->result_type())));
  __ ret(0);
  __ flush();
}
Register InterpreterRuntime::SignatureHandlerGenerator::from()       { return rdi; }
Register InterpreterRuntime::SignatureHandlerGenerator::to()         { return rsp; }
Register InterpreterRuntime::SignatureHandlerGenerator::temp()       { return rcx; }
void SignatureHandlerLibrary::pd_set_handler(address handler) {}
class SlowSignatureHandler: public NativeSignatureIterator {
 private:
  address   _from;
  intptr_t* _to;
  virtual void pass_int() {
    _from -= Interpreter::stackElementSize;
  }
  virtual void pass_float() {
    _from -= Interpreter::stackElementSize;
  }
  virtual void pass_long() {
    _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
    _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
    _to += 2;
    _from -= 2*Interpreter::stackElementSize;
  }
  virtual void pass_object() {
    intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
   }
 public:
  SlowSignatureHandler(methodHandle method, address from, intptr_t* to) :
    NativeSignatureIterator(method) {
    _from = from;
    _to   = to + (is_static() ? 2 : 1);
  }
};
IRT_ENTRY(address, InterpreterRuntime::slow_signature_handler(JavaThread* thread, Method* method, intptr_t* from, intptr_t* to))
  methodHandle m(thread, (Method*)method);
  assert(m->is_native(), "sanity check");
  SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
  return Interpreter::result_handler(m->result_type());
IRT_END
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreterRT_x86_64.cpp
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
Register InterpreterRuntime::SignatureHandlerGenerator::from() { return r14; }
Register InterpreterRuntime::SignatureHandlerGenerator::to()   { return rsp; }
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
  const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
#ifdef _WIN64
  switch (_num_args) {
  case 0:
    __ movl(c_rarg1, src);
    _num_args++;
    break;
  case 1:
    __ movl(c_rarg2, src);
    _num_args++;
    break;
  case 2:
    __ movl(c_rarg3, src);
    _num_args++;
    break;
  default:
    __ movl(rax, src);
    __ movl(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
    break;
  }
#else
  switch (_num_int_args) {
  case 0:
    __ movl(c_rarg1, src);
    _num_int_args++;
    break;
  case 1:
    __ movl(c_rarg2, src);
    _num_int_args++;
    break;
  case 2:
    __ movl(c_rarg3, src);
    _num_int_args++;
    break;
  case 3:
    __ movl(c_rarg4, src);
    _num_int_args++;
    break;
  case 4:
    __ movl(c_rarg5, src);
    _num_int_args++;
    break;
  default:
    __ movl(rax, src);
    __ movl(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
    break;
  }
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
  const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
#ifdef _WIN64
  switch (_num_args) {
  case 0:
    __ movptr(c_rarg1, src);
    _num_args++;
    break;
  case 1:
    __ movptr(c_rarg2, src);
    _num_args++;
    break;
  case 2:
    __ movptr(c_rarg3, src);
    _num_args++;
    break;
  case 3:
  default:
    __ movptr(rax, src);
    __ movptr(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
    break;
  }
#else
  switch (_num_int_args) {
  case 0:
    __ movptr(c_rarg1, src);
    _num_int_args++;
    break;
  case 1:
    __ movptr(c_rarg2, src);
    _num_int_args++;
    break;
  case 2:
    __ movptr(c_rarg3, src);
    _num_int_args++;
    break;
  case 3:
    __ movptr(c_rarg4, src);
    _num_int_args++;
    break;
  case 4:
    __ movptr(c_rarg5, src);
    _num_int_args++;
    break;
  default:
    __ movptr(rax, src);
    __ movptr(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
    break;
  }
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
  const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
#ifdef _WIN64
  if (_num_args < Argument::n_float_register_parameters_c-1) {
    __ movflt(as_XMMRegister(++_num_args), src);
  } else {
    __ movl(rax, src);
    __ movl(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
  }
#else
  if (_num_fp_args < Argument::n_float_register_parameters_c) {
    __ movflt(as_XMMRegister(_num_fp_args++), src);
  } else {
    __ movl(rax, src);
    __ movl(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
  }
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
  const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
#ifdef _WIN64
  if (_num_args < Argument::n_float_register_parameters_c-1) {
    __ movdbl(as_XMMRegister(++_num_args), src);
  } else {
    __ movptr(rax, src);
    __ movptr(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
  }
#else
  if (_num_fp_args < Argument::n_float_register_parameters_c) {
    __ movdbl(as_XMMRegister(_num_fp_args++), src);
  } else {
    __ movptr(rax, src);
    __ movptr(Address(to(), _stack_offset), rax);
    _stack_offset += wordSize;
  }
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
  const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
#ifdef _WIN64
  switch (_num_args) {
  case 0:
    assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
    __ lea(c_rarg1, src);
    _num_args++;
    break;
  case 1:
    __ lea(rax, src);
    __ xorl(c_rarg2, c_rarg2);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg2, rax);
    _num_args++;
    break;
  case 2:
    __ lea(rax, src);
    __ xorl(c_rarg3, c_rarg3);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg3, rax);
    _num_args++;
    break;
  default:
    __ lea(rax, src);
    __ xorl(temp(), temp());
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, temp(), rax);
    __ movptr(Address(to(), _stack_offset), temp());
    _stack_offset += wordSize;
    break;
  }
#else
  switch (_num_int_args) {
  case 0:
    assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
    __ lea(c_rarg1, src);
    _num_int_args++;
    break;
  case 1:
    __ lea(rax, src);
    __ xorl(c_rarg2, c_rarg2);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg2, rax);
    _num_int_args++;
    break;
  case 2:
    __ lea(rax, src);
    __ xorl(c_rarg3, c_rarg3);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg3, rax);
    _num_int_args++;
    break;
  case 3:
    __ lea(rax, src);
    __ xorl(c_rarg4, c_rarg4);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg4, rax);
    _num_int_args++;
    break;
  case 4:
    __ lea(rax, src);
    __ xorl(c_rarg5, c_rarg5);
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, c_rarg5, rax);
    _num_int_args++;
    break;
  default:
    __ lea(rax, src);
    __ xorl(temp(), temp());
    __ cmpptr(src, 0);
    __ cmov(Assembler::notEqual, temp(), rax);
    __ movptr(Address(to(), _stack_offset), temp());
    _stack_offset += wordSize;
    break;
  }
#endif
}
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
  iterate(fingerprint);
  __ lea(rax, ExternalAddress(Interpreter::result_handler(method()->result_type())));
  __ ret(0);
  __ flush();
}
void SignatureHandlerLibrary::pd_set_handler(address handler) {}
#ifdef _WIN64
class SlowSignatureHandler
  : public NativeSignatureIterator {
 private:
  address   _from;
  intptr_t* _to;
  intptr_t* _reg_args;
  intptr_t* _fp_identifiers;
  unsigned int _num_args;
  virtual void pass_int()
  {
    jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_args < Argument::n_int_register_parameters_c-1) {
      _num_args++;
    } else {
    }
  }
  virtual void pass_long()
  {
    intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
    _from -= 2*Interpreter::stackElementSize;
    if (_num_args < Argument::n_int_register_parameters_c-1) {
      _num_args++;
    } else {
    }
  }
  virtual void pass_object()
  {
    intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_args < Argument::n_int_register_parameters_c-1) {
      _num_args++;
    } else {
    }
  }
  virtual void pass_float()
  {
    jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_args < Argument::n_float_register_parameters_c-1) {
      assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
      _num_args++;
    } else {
    }
  }
  virtual void pass_double()
  {
    intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
    _from -= 2*Interpreter::stackElementSize;
    if (_num_args < Argument::n_float_register_parameters_c-1) {
      assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
      _num_args++;
    } else {
    }
  }
 public:
  SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
    : NativeSignatureIterator(method)
  {
    _from = from;
    _to   = to;
    _reg_args = to - (method->is_static() ? 4 : 5);
    _fp_identifiers = to - 2;
    _to = _to + 4;  // Windows reserves stack space for register arguments
    _num_args = (method->is_static() ? 1 : 0);
  }
};
#else
class SlowSignatureHandler
  : public NativeSignatureIterator {
 private:
  address   _from;
  intptr_t* _to;
  intptr_t* _int_args;
  intptr_t* _fp_args;
  intptr_t* _fp_identifiers;
  unsigned int _num_int_args;
  unsigned int _num_fp_args;
  virtual void pass_int()
  {
    jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_int_args < Argument::n_int_register_parameters_c-1) {
      _num_int_args++;
    } else {
    }
  }
  virtual void pass_long()
  {
    intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
    _from -= 2*Interpreter::stackElementSize;
    if (_num_int_args < Argument::n_int_register_parameters_c-1) {
      _num_int_args++;
    } else {
    }
  }
  virtual void pass_object()
  {
    intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_int_args < Argument::n_int_register_parameters_c-1) {
      _num_int_args++;
    } else {
    }
  }
  virtual void pass_float()
  {
    jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
    _from -= Interpreter::stackElementSize;
    if (_num_fp_args < Argument::n_float_register_parameters_c) {
      _num_fp_args++;
    } else {
    }
  }
  virtual void pass_double()
  {
    intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
    _from -= 2*Interpreter::stackElementSize;
    if (_num_fp_args < Argument::n_float_register_parameters_c) {
      _num_fp_args++;
    } else {
    }
  }
 public:
  SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
    : NativeSignatureIterator(method)
  {
    _from = from;
    _to   = to;
    _int_args = to - (method->is_static() ? 14 : 15);
    _fp_args =  to - 9;
    _fp_identifiers = to - 10;
    _num_int_args = (method->is_static() ? 1 : 0);
    _num_fp_args = 0;
  }
};
#endif
IRT_ENTRY(address,
          InterpreterRuntime::slow_signature_handler(JavaThread* thread,
                                                     Method* method,
                                                     intptr_t* from,
                                                     intptr_t* to))
  methodHandle m(thread, (Method*)method);
  assert(m->is_native(), "sanity check");
  SlowSignatureHandler(m, (address)from, to + 1).iterate(UCONST64(-1));
  return Interpreter::result_handler(m->result_type());
IRT_END
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreter_x86.hpp
#ifndef CPU_X86_VM_INTERPRETER_X86_HPP
#define CPU_X86_VM_INTERPRETER_X86_HPP
 public:
  static Address::ScaleFactor stackElementScale() {
    return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
  }
  static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
  static int expr_index_at(int i)        { return stackElementWords * i; }
  static int local_index_at(int i) {
    assert(i <= 0, "local direction already negated");
    return stackElementWords * i;
  }
#endif // CPU_X86_VM_INTERPRETER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreter_x86_32.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();
  __ mov(rcx, rsp);
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
  __ ret(0);
  return entry;
}
address InterpreterGenerator::generate_empty_entry(void) {
  if (!UseFastEmptyMethods) return NULL;
  address entry_point = __ pc();
  Label slow_path;
  ExternalAddress state(SafepointSynchronize::address_of_state());
  __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
           SafepointSynchronize::_not_synchronized);
  __ jcc(Assembler::notEqual, slow_path);
  __ pop(rax);
  __ mov(rsp, rsi);
  __ jmp(rax);
  __ bind(slow_path);
  (void) generate_normal_entry(false);
  return entry_point;
}
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
  address entry_point = __ pc();
  __ fld_d(Address(rsp, 1*wordSize));
  switch (kind) {
    case Interpreter::java_lang_math_sin :
        __ trigfunc('s');
        break;
    case Interpreter::java_lang_math_cos :
        __ trigfunc('c');
        break;
    case Interpreter::java_lang_math_tan :
        __ trigfunc('t');
        break;
    case Interpreter::java_lang_math_sqrt:
        __ fsqrt();
        break;
    case Interpreter::java_lang_math_abs:
        __ fabs();
        break;
    case Interpreter::java_lang_math_log:
        __ flog();
        __ push_fTOS();
        __ pop_fTOS();
        break;
    case Interpreter::java_lang_math_log10:
        __ flog10();
        __ push_fTOS();
        __ pop_fTOS();
        break;
    case Interpreter::java_lang_math_pow:
      __ fld_d(Address(rsp, 3*wordSize)); // second argument
      __ pow_with_fallback(0);
      __ push_fTOS();
      __ pop_fTOS();
      break;
    case Interpreter::java_lang_math_exp:
      __ exp_with_fallback(0);
      __ push_fTOS();
      __ pop_fTOS();
      break;
    default                              :
        ShouldNotReachHere();
  }
  if (UseSSE >= 2) {
    __ subptr(rsp, 2*wordSize);
    __ fstp_d(Address(rsp, 0));
    __ movdbl(xmm0, Address(rsp, 0));
    __ addptr(rsp, 2*wordSize);
  }
  __ pop(rdi);                               // get return address
  __ mov(rsp, rsi);                          // set sp to sender sp
  __ jmp(rdi);
  return entry_point;
}
address InterpreterGenerator::generate_abstract_entry(void) {
  address entry_point = __ pc();
  __ empty_expression_stack();
  __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
  __ should_not_reach_here();
  return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
  assert(f->is_interpreted_frame(), "must be interpreted");
}
C:\hotspot-69087d08d473\src\cpu\x86\vm/interpreter_x86_64.cpp
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#define __ _masm->
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#ifdef _WIN64
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();
  __ mov(c_rarg3, rsp);
  __ subptr(rsp, 4 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);
  __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers
  for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
    XMMRegister floatreg = as_XMMRegister(i+1);
    Label isfloatordouble, isdouble, next;
    __ testl(c_rarg3, 1 << (i*2));      // Float or Double?
    __ jcc(Assembler::notZero, isfloatordouble);
    switch ( i ) {
      case 0:
        __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
        __ testl(rscratch1, JVM_ACC_STATIC);
        __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
        break;
      case 1:
        __ movptr(c_rarg2, Address(rsp, wordSize));
        break;
      case 2:
        __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
        break;
      default:
        break;
    }
    __ jmp (next);
    __ bind(isfloatordouble);
    __ testl(c_rarg3, 1 << ((i*2)+1));     // Double?
    __ jcc(Assembler::notZero, isdouble);
    __ movflt(floatreg, Address(rsp, i * wordSize));
    __ jmp(next);
    __ bind(isdouble);
    __ movdbl(floatreg, Address(rsp, i * wordSize));
    __ bind(next);
  }
  __ addptr(rsp, 4 * wordSize);
  __ ret(0);
  return entry;
}
#else
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();
  __ mov(c_rarg3, rsp);
  __ subptr(rsp, 14 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);
  __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers
  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
    const XMMRegister r = as_XMMRegister(i);
    Label d, done;
    __ testl(c_rarg3, 1 << i);
    __ jcc(Assembler::notZero, d);
    __ movflt(r, Address(rsp, (6 + i) * wordSize));
    __ jmp(done);
    __ bind(d);
    __ movdbl(r, Address(rsp, (6 + i) * wordSize));
    __ bind(done);
  }
  __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
  __ testl(c_rarg3, JVM_ACC_STATIC);
  __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
  __ movptr(c_rarg2, Address(rsp, wordSize));
  __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
  __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
  __ movptr(c_rarg5, Address(rsp, 4 * wordSize));
  __ addptr(rsp, 14 * wordSize);
  __ ret(0);
  return entry;
}
#endif

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值