ccccccccc1

C:\hotspot-69087d08d473\src\cpu\x86\vm/assembler_x86.cpp
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#define STOP(error) stop(error)
#else
#define BLOCK_COMMENT(str) block_comment(str)
#define STOP(error) block_comment(error); stop(error)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
  _is_lval = false;
  _target = target;
  switch (rtype) {
  case relocInfo::oop_type:
  case relocInfo::metadata_type:
    break;
  case relocInfo::external_word_type:
    _rspec = external_word_Relocation::spec(target);
    break;
  case relocInfo::internal_word_type:
    _rspec = internal_word_Relocation::spec(target);
    break;
  case relocInfo::opt_virtual_call_type:
    _rspec = opt_virtual_call_Relocation::spec();
    break;
  case relocInfo::static_call_type:
    _rspec = static_call_Relocation::spec();
    break;
  case relocInfo::runtime_call_type:
    _rspec = runtime_call_Relocation::spec();
    break;
  case relocInfo::poll_type:
  case relocInfo::poll_return_type:
    _rspec = Relocation::spec_simple(rtype);
    break;
  case relocInfo::none:
    break;
  default:
    ShouldNotReachHere();
    break;
  }
}
#ifdef _LP64
Address Address::make_array(ArrayAddress adr) {
  ShouldNotReachHere();
  return Address();
}
Address::Address(int disp, address loc, relocInfo::relocType rtype) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = disp;
  switch (rtype) {
    case relocInfo::external_word_type:
      _rspec = external_word_Relocation::spec(loc);
      break;
    case relocInfo::internal_word_type:
      _rspec = internal_word_Relocation::spec(loc);
      break;
    case relocInfo::runtime_call_type:
      _rspec = runtime_call_Relocation::spec();
      break;
    case relocInfo::poll_type:
    case relocInfo::poll_return_type:
      _rspec = Relocation::spec_simple(rtype);
      break;
    case relocInfo::none:
      break;
    default:
      ShouldNotReachHere();
  }
}
#else // LP64
Address Address::make_array(ArrayAddress adr) {
  AddressLiteral base = adr.base();
  Address index = adr.index();
  assert(index._disp == 0, "must not have disp"); // maybe it can?
  Address array(index._base, index._index, index._scale, (intptr_t) base.target());
  array._rspec = base._rspec;
  return array;
}
Address::Address(address loc, RelocationHolder spec) {
  _base  = noreg;
  _index = noreg;
  _scale = no_scale;
  _disp  = (intptr_t) loc;
  _rspec = spec;
}
#endif // _LP64
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
  RelocationHolder rspec;
  if (disp_reloc != relocInfo::none) {
    rspec = Relocation::spec_simple(disp_reloc);
  }
  bool valid_index = index != rsp->encoding();
  if (valid_index) {
    Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
    madr._rspec = rspec;
    return madr;
  } else {
    Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
    madr._rspec = rspec;
    return madr;
  }
}
int AbstractAssembler::code_fill_byte() {
  return (u_char)'\xF4'; // hlt
}
void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
  if (rtype == relocInfo::none)
        emit_int32(data);
  else  emit_data(data, Relocation::spec_simple(rtype), format);
}
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  if (rspec.type() !=  relocInfo::none) {
    #ifdef ASSERT
      check_relocation(rspec, format);
    #endif
    if (format == call32_operand)
      code_section()->relocate(inst_mark(), rspec, disp32_operand);
    else
      code_section()->relocate(inst_mark(), rspec, format);
  }
  emit_int32(data);
}
static int encode(Register r) {
  int enc = r->encoding();
  if (enc >= 8) {
    enc -= 8;
  }
  return enc;
}
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
  assert(dst->has_byte_register(), "must have byte register");
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert(isByte(imm8), "not a byte");
  assert((op1 & 0x01) == 0, "should be 8bit operation");
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
  emit_int8(imm8);
}
void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
    emit_int8(op1 | 0x02); // set sign bit
    emit_int8(op2 | encode(dst));
    emit_int8(imm32 & 0xFF);
  } else {
    emit_int8(op1);
    emit_int8(op2 | encode(dst));
    emit_int32(imm32);
  }
}
void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  emit_int8(op1);
  emit_int8(op2 | encode(dst));
  emit_int32(imm32);
}
void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
  assert((op1 & 0x01) == 1, "should be 32bit operation");
  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
  if (is8bit(imm32)) {
    emit_int8(op1 | 0x02); // set sign bit
    emit_operand(rm, adr, 1);
    emit_int8(imm32 & 0xFF);
  } else {
    emit_int8(op1);
    emit_operand(rm, adr, 4);
    emit_int32(imm32);
  }
}
void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
  assert(isByte(op1) && isByte(op2), "wrong opcode");
  emit_int8(op1);
  emit_int8(op2 | encode(dst) << 3 | encode(src));
}
void Assembler::emit_operand(Register reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
                             RelocationHolder const& rspec,
                             int rip_relative_correction) {
  relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
  int regenc = encode(reg) << 3;
  int indexenc = index->is_valid() ? encode(index) << 3 : 0;
  int baseenc = base->is_valid() ? encode(base) : 0;
  if (base->is_valid()) {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      if (disp == 0 && rtype == relocInfo::none  &&
          base != rbp LP64_ONLY(&& base != r13)) {
        assert(index != rsp, "illegal addressing mode");
        emit_int8(0x04 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        assert(index != rsp, "illegal addressing mode");
        emit_int8(0x44 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
        emit_int8(disp & 0xFF);
      } else {
        assert(index != rsp, "illegal addressing mode");
        emit_int8(0x84 | regenc);
        emit_int8(scale << 6 | indexenc | baseenc);
        emit_data(disp, rspec, disp32_operand);
      }
    } else if (base == rsp LP64_ONLY(|| base == r12)) {
      if (disp == 0 && rtype == relocInfo::none) {
        emit_int8(0x04 | regenc);
        emit_int8(0x24);
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        emit_int8(0x44 | regenc);
        emit_int8(0x24);
        emit_int8(disp & 0xFF);
      } else {
        emit_int8(0x84 | regenc);
        emit_int8(0x24);
        emit_data(disp, rspec, disp32_operand);
      }
    } else {
      assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
      if (disp == 0 && rtype == relocInfo::none &&
          base != rbp LP64_ONLY(&& base != r13)) {
        emit_int8(0x00 | regenc | baseenc);
      } else if (is8bit(disp) && rtype == relocInfo::none) {
        emit_int8(0x40 | regenc | baseenc);
        emit_int8(disp & 0xFF);
      } else {
        emit_int8(0x80 | regenc | baseenc);
        emit_data(disp, rspec, disp32_operand);
      }
    }
  } else {
    if (index->is_valid()) {
      assert(scale != Address::no_scale, "inconsistent address");
      assert(index != rsp, "illegal addressing mode");
      emit_int8(0x04 | regenc);
      emit_int8(scale << 6 | indexenc | 0x05);
      emit_data(disp, rspec, disp32_operand);
    } else if (rtype != relocInfo::none ) {
      emit_int8(0x05 | regenc);
      assert(inst_mark() != NULL, "must be inside InstructionMark");
      address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
      int64_t adjusted = disp;
      LP64_ONLY(adjusted -=  (next_ip - inst_mark()));
      assert(is_simm32(adjusted),
             "must be 32bit offset (RIP relative address)");
      emit_data((int32_t) adjusted, rspec, disp32_operand);
    } else {
      emit_int8(0x04 | regenc);
      emit_int8(0x25);
      emit_data(disp, rspec, disp32_operand);
    }
  }
}
void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
                             Address::ScaleFactor scale, int disp,
                             RelocationHolder const& rspec) {
  emit_operand((Register)reg, base, index, scale, disp, rspec);
}
#define end_pc_operand (_WhichOperand_limit)
address Assembler::locate_operand(address inst, WhichOperand which) {
  address ip = inst;
  bool is_64bit = false;
  debug_only(bool has_disp32 = false);
  int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
  again_after_prefix:
  switch (0xFF & *ip++) {
#define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
#define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
             case (x)+4: case (x)+5: case (x)+6: case (x)+7
#define REP16(x) REP8((x)+0): \
              case REP8((x)+8)
  case CS_segment:
  case SS_segment:
  case DS_segment:
  case ES_segment:
  case FS_segment:
  case GS_segment:
    LP64_ONLY(assert(false, "shouldn't have that prefix"));
    assert(ip == inst+1, "only one prefix allowed");
    goto again_after_prefix;
  case 0x67:
  case REX:
  case REX_B:
  case REX_X:
  case REX_XB:
  case REX_R:
  case REX_RB:
  case REX_RX:
  case REX_RXB:
    NOT_LP64(assert(false, "64bit prefixes"));
    goto again_after_prefix;
  case REX_W:
  case REX_WB:
  case REX_WX:
  case REX_WXB:
  case REX_WR:
  case REX_WRB:
  case REX_WRX:
  case REX_WRXB:
    NOT_LP64(assert(false, "64bit prefixes"));
    is_64bit = true;
    goto again_after_prefix;
  case 0xFF: // pushq a; decl a; incl a; call a; jmp a
  case 0x88: // movb a, r
  case 0x89: // movl a, r
  case 0x8A: // movb r, a
  case 0x8B: // movl r, a
  case 0x8F: // popl a
    debug_only(has_disp32 = true);
    break;
  case 0x68: // pushq #32
    if (which == end_pc_operand) {
      return ip + 4;
    }
    assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
    return ip;                  // not produced by emit_operand
  case 0x66: // movw ... (size prefix)
    again_after_size_prefix2:
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
      NOT_LP64(assert(false, "64bit prefix found"));
      goto again_after_size_prefix2;
    case 0x8B: // movw r, a
    case 0x89: // movw a, r
      debug_only(has_disp32 = true);
      break;
    case 0xC7: // movw a, #16
      debug_only(has_disp32 = true);
      tail_size = 2;  // the imm16
      break;
    case 0x0F: // several SSE/SSE2 variants
      ip--;    // reparse the 0x0F
      goto again_after_prefix;
    default:
      ShouldNotReachHere();
    }
    break;
  case REP8(0xB8): // movl/q r, #32/#64(oop?)
    if (which == end_pc_operand)  return ip + (is_64bit ? 8 : 4);
#ifndef _LP64
    assert(which == imm_operand || which == disp32_operand,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
#else
    assert((which == call32_operand || which == imm_operand) && is_64bit ||
           which == narrow_oop_operand && !is_64bit,
           err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
#endif // _LP64
    return ip;
  case 0x69: // imul r, a, #32
  case 0xC7: // movl a, #32(oop?)
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
  case 0x0F: // movx..., etc.
    switch (0xFF & *ip++) {
    case 0x3A: // pcmpestri
      tail_size = 1;
    case 0x38: // ptest, pmovzxbw
      ip++; // skip opcode
      debug_only(has_disp32 = true); // has both kinds of operands!
      break;
    case 0x70: // pshufd r, r/a, #8
      debug_only(has_disp32 = true); // has both kinds of operands!
    case 0x73: // psrldq r, #8
      tail_size = 1;
      break;
    case 0x12: // movlps
    case 0x28: // movaps
    case 0x2E: // ucomiss
    case 0x2F: // comiss
    case 0x54: // andps
    case 0x55: // andnps
    case 0x56: // orps
    case 0x57: // xorps
    case 0x6E: // movd
    case 0x7E: // movd
    case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
      debug_only(has_disp32 = true);
      break;
    case 0xAD: // shrd r, a, %cl
    case 0xAF: // imul r, a
    case 0xBE: // movsbl r, a (movsxb)
    case 0xBF: // movswl r, a (movsxw)
    case 0xB6: // movzbl r, a (movzxb)
    case 0xB7: // movzwl r, a (movzxw)
    case REP16(0x40): // cmovl cc, r, a
    case 0xB0: // cmpxchgb
    case 0xB1: // cmpxchg
    case 0xC1: // xaddl
    case 0xC7: // cmpxchg8
    case REP16(0x90): // setcc a
      debug_only(has_disp32 = true);
      break;
    case 0xC4: // pinsrw r, a, #8
      debug_only(has_disp32 = true);
    case 0xC5: // pextrw r, r, #8
      tail_size = 1;  // the imm8
      break;
    case 0xAC: // shrd r, a, #8
      debug_only(has_disp32 = true);
      tail_size = 1;  // the imm8
      break;
    case REP16(0x80): // jcc rdisp32
      if (which == end_pc_operand)  return ip + 4;
      assert(which == call32_operand, "jcc has no disp32 or imm");
      return ip;
    default:
      ShouldNotReachHere();
    }
    break;
  case 0x81: // addl a, #32; addl r, #32
    tail_size = 4;
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
  case 0x83: // addl a, #8; addl r, #8
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1;
    break;
  case 0x9B:
    switch (0xFF & *ip++) {
    case 0xD9: // fnstcw a
      debug_only(has_disp32 = true);
      break;
    default:
      ShouldNotReachHere();
    }
    break;
  case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
  case REP4(0x10): // adc...
  case REP4(0x20): // and...
  case REP4(0x30): // xor...
  case REP4(0x08): // or...
  case REP4(0x18): // sbb...
  case REP4(0x28): // sub...
  case 0xF7: // mull a
  case 0x8D: // lea r, a
  case 0x87: // xchg r, a
  case REP4(0x38): // cmp...
  case 0x85: // test r, a
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
  case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
  case 0xC6: // movb a, #8
  case 0x80: // cmpb a, #8
  case 0x6B: // imul r, a, #8
    debug_only(has_disp32 = true); // has both kinds of operands!
    tail_size = 1; // the imm8
    break;
  case 0xC4: // VEX_3bytes
  case 0xC5: // VEX_2bytes
    assert((UseAVX > 0), "shouldn't have VEX prefix");
    assert(ip == inst+1, "no prefixes allowed");
    NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
    if ((0xFF & *inst) == VEX_3bytes) {
      ip++; // third byte
      is_64bit = ((VEX_W & *ip) == VEX_W);
    }
    ip++; // opcode
    switch (0xFF & *ip) {
    case 0x61: // pcmpestri r, r/a, #8
    case 0x70: // pshufd r, r/a, #8
    case 0x73: // psrldq r, #8
      tail_size = 1;  // the imm8
      break;
    default:
      break;
    }
    ip++; // skip opcode
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
  case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
  case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
  case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
  case 0xDD: // fld_d a; fst_d a; fstp_d a
  case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
  case 0xDF: // fild_d a; fistp_d a
  case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
  case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
  case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
    debug_only(has_disp32 = true);
    break;
  case 0xE8: // call rdisp32
  case 0xE9: // jmp  rdisp32
    if (which == end_pc_operand)  return ip + 4;
    assert(which == call32_operand, "call has no disp32 or imm");
    return ip;
  case 0xF0:                    // Lock
    assert(os::is_MP(), "only on MP");
    goto again_after_prefix;
  case 0xF3:                    // For SSE
  case 0xF2:                    // For SSE2
    switch (0xFF & *ip++) {
    case REX:
    case REX_B:
    case REX_X:
    case REX_XB:
    case REX_R:
    case REX_RB:
    case REX_RX:
    case REX_RXB:
    case REX_W:
    case REX_WB:
    case REX_WX:
    case REX_WXB:
    case REX_WR:
    case REX_WRB:
    case REX_WRX:
    case REX_WRXB:
      NOT_LP64(assert(false, "found 64bit prefix"));
      ip++;
    default:
      ip++;
    }
    debug_only(has_disp32 = true); // has both kinds of operands!
    break;
  default:
    ShouldNotReachHere();
#undef REP8
#undef REP16
  }
  assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
#ifdef _LP64
  assert(which != imm_operand, "instruction is not a movq reg, imm64");
#else
  assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
#endif // LP64
  assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
  int op2 = 0xFF & *ip++;
  int base = op2 & 0x07;
  int op3 = -1;
  const int b100 = 4;
  const int b101 = 5;
  if (base == b100 && (op2 >> 6) != 3) {
    op3 = 0xFF & *ip++;
    base = op3 & 0x07;   // refetch the base
  }
  switch (op2 >> 6) {
  case 0:
    if (base == b101) {
      if (which == disp32_operand)
        return ip;              // caller wants the disp32
      ip += 4;                  // skip the disp32
    }
    break;
  case 1:
    ip += 1;                    // skip the disp8
    break;
  case 2:
    if (which == disp32_operand)
      return ip;                // caller wants the disp32
    ip += 4;                    // skip the disp32
    break;
  case 3:
    break;
  }
  if (which == end_pc_operand) {
    return ip + tail_size;
  }
#ifdef _LP64
  assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
#else
  assert(which == imm_operand, "instruction has only an imm field");
#endif // LP64
  return ip;
}
address Assembler::locate_next_instruction(address inst) {
  return locate_operand(inst, end_pc_operand);
}
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
  address inst = inst_mark();
  assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
  address opnd;
  Relocation* r = rspec.reloc();
  if (r->type() == relocInfo::none) {
    return;
  } else if (r->is_call() || format == call32_operand) {
    opnd = locate_operand(inst, call32_operand);
  } else if (r->is_data()) {
    assert(format == imm_operand || format == disp32_operand
           LP64_ONLY(|| format == narrow_oop_operand), "format ok");
    opnd = locate_operand(inst, (WhichOperand)format);
  } else {
    assert(format == imm_operand, "cannot specify a format");
    return;
  }
  assert(opnd == pc(), "must put operand where relocs can find it");
}
#endif // ASSERT
void Assembler::emit_operand32(Register reg, Address adr) {
  assert(reg->encoding() < 8, "no extended registers");
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec);
}
void Assembler::emit_operand(Register reg, Address adr,
                             int rip_relative_correction) {
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec,
               rip_relative_correction);
}
void Assembler::emit_operand(XMMRegister reg, Address adr) {
  emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
               adr._rspec);
}
void Assembler::emit_operand(MMXRegister reg, Address adr) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}
void Assembler::emit_operand(Address adr, MMXRegister reg) {
  assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
  emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}
void Assembler::emit_farith(int b1, int b2, int i) {
  assert(isByte(b1) && isByte(b2), "wrong opcode");
  assert(0 <= i &&  i < 8, "illegal stack offset");
  emit_int8(b1);
  emit_int8(b2 + i);
}
void Assembler::adcl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rdx, dst, imm32);
}
void Assembler::adcl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x11);
  emit_operand(src, dst);
}
void Assembler::adcl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD0, dst, imm32);
}
void Assembler::adcl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x13);
  emit_operand(dst, src);
}
void Assembler::adcl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x13, 0xC0, dst, src);
}
void Assembler::addl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rax, dst, imm32);
}
void Assembler::addl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x01);
  emit_operand(src, dst);
}
void Assembler::addl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC0, dst, imm32);
}
void Assembler::addl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x03);
  emit_operand(dst, src);
}
void Assembler::addl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
}
void Assembler::addr_nop_4() {
  assert(UseAddressNop, "no CPU support");
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
}
void Assembler::addr_nop_5() {
  assert(UseAddressNop, "no CPU support");
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  emit_int8(0);    // 8-bits offset (1 byte)
}
void Assembler::addr_nop_7() {
  assert(UseAddressNop, "no CPU support");
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x80);
  emit_int32(0);   // 32-bits offset (4 bytes)
}
void Assembler::addr_nop_8() {
  assert(UseAddressNop, "no CPU support");
  emit_int8(0x0F);
  emit_int8(0x1F);
  emit_int8((unsigned char)0x84);
  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  emit_int32(0);   // 32-bits offset (4 bytes)
}
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
}
void Assembler::addsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
}
void Assembler::addss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
}
void Assembler::addss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
}
void Assembler::aesdec(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDE);
  emit_operand(dst, src);
}
void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDE);
  emit_int8(0xC0 | encode);
}
void Assembler::aesdeclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDF);
  emit_operand(dst, src);
}
void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::aesenc(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDC);
  emit_operand(dst, src);
}
void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDC);
  emit_int8(0xC0 | encode);
}
void Assembler::aesenclast(XMMRegister dst, Address src) {
  assert(VM_Version::supports_aes(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDD);
  emit_operand(dst, src);
}
void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_aes(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0x81);
  emit_operand(rsp, dst, 4);
  emit_int32(imm32);
}
void Assembler::andl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xE0, dst, imm32);
}
void Assembler::andl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x23);
  emit_operand(dst, src);
}
void Assembler::andl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x23, 0xC0, dst, src);
}
void Assembler::andnl(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andnl(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}
void Assembler::bsfl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::bsrl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::bswapl(Register reg) { // bswap
  int encode = prefix_and_encode(reg->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
}
void Assembler::blsil(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsil(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}
void Assembler::blsmskl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsmskl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}
void Assembler::blsrl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsrl(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}
void Assembler::call(Label& L, relocInfo::relocType rtype) {
  int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
  if (L.is_bound()) {
    const int long_size = 5;
    int offs = (int)( target(L) - pc() );
    assert(offs <= 0, "assembler error");
    InstructionMark im(this);
    emit_int8((unsigned char)0xE8);
    emit_data(offs - long_size, rtype, operand);
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xE8);
    emit_data(int(0), rtype, operand);
  }
}
void Assembler::call(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xD0 | encode));
}
void Assembler::call(Address adr) {
  InstructionMark im(this);
  prefix(adr);
  emit_int8((unsigned char)0xFF);
  emit_operand(rdx, adr);
}
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  assert(entry != NULL, "call most probably wrong");
  InstructionMark im(this);
  emit_int8((unsigned char)0xE8);
  intptr_t disp = entry - (pc() + sizeof(int32_t));
  assert(is_simm32(disp), "must be 32bit offset (call2)");
  int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
  emit_data((int) disp, rspec, operand);
}
void Assembler::cdql() {
  emit_int8((unsigned char)0x99);
}
void Assembler::cld() {
  emit_int8((unsigned char)0xFC);
}
void Assembler::cmovl(Condition cc, Register dst, Register src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cmovl(Condition cc, Register dst, Address src) {
  NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_operand(dst, src);
}
void Assembler::cmpb(Address dst, int imm8) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0x80);
  emit_operand(rdi, dst, 1);
  emit_int8(imm8);
}
void Assembler::cmpl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0x81);
  emit_operand(rdi, dst, 4);
  emit_int32(imm32);
}
void Assembler::cmpl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF8, dst, imm32);
}
void Assembler::cmpl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
}
void Assembler::cmpl(Register dst, Address  src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8((unsigned char)0x3B);
  emit_operand(dst, src);
}
void Assembler::cmpw(Address dst, int imm16) {
  InstructionMark im(this);
  assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
  emit_int8(0x66);
  emit_int8((unsigned char)0x81);
  emit_operand(rdi, dst, 2);
  emit_int16(imm16);
}
void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
  InstructionMark im(this);
  prefix(adr, reg);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
  emit_operand(reg, adr);
}
void Assembler::comisd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
}
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
}
void Assembler::comiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
}
void Assembler::comiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
}
void Assembler::cpuid() {
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA2);
}
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
}
void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
}
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
}
void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
}
void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
}
void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
}
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
}
void Assembler::cvtss2sd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
}
void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::decl(Address dst) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0xFF);
  emit_operand(rcx, dst);
}
void Assembler::divsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
}
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
}
void Assembler::divss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
}
void Assembler::divss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
}
void Assembler::emms() {
  NOT_LP64(assert(VM_Version::supports_mmx(), ""));
  emit_int8(0x0F);
  emit_int8(0x77);
}
void Assembler::hlt() {
  emit_int8((unsigned char)0xF4);
}
void Assembler::idivl(Register src) {
  int encode = prefix_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
}
void Assembler::divl(Register src) { // Unsigned
  int encode = prefix_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF0 | encode));
}
void Assembler::imull(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::imull(Register dst, Register src, int value) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  if (is8bit(value)) {
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
  } else {
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int32(value);
  }
}
void Assembler::imull(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}
void Assembler::incl(Address dst) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0xFF);
  emit_operand(rax, dst);
}
void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
  InstructionMark im(this);
  assert((0 <= cc) && (cc < 16), "illegal cc");
  if (L.is_bound()) {
    address dst = target(L);
    assert(dst != NULL, "jcc most probably wrong");
    const int short_size = 2;
    const int long_size = 6;
    intptr_t offs = (intptr_t)dst - (intptr_t)pc();
    if (maybe_short && is8bit(offs - short_size)) {
      emit_int8(0x70 | cc);
      emit_int8((offs - short_size) & 0xFF);
    } else {
      assert(is_simm32(offs - long_size),
             "must be 32bit offset (call4)");
      emit_int8(0x0F);
      emit_int8((unsigned char)(0x80 | cc));
      emit_int32(offs - long_size);
    }
  } else {
    L.add_patch_at(code(), locator());
    emit_int8(0x0F);
    emit_int8((unsigned char)(0x80 | cc));
    emit_int32(0);
  }
}
void Assembler::jccb(Condition cc, Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
#ifdef ASSERT
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
    intptr_t offs = (intptr_t)entry - (intptr_t)pc();
    emit_int8(0x70 | cc);
    emit_int8((offs - short_size) & 0xFF);
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
    emit_int8(0x70 | cc);
    emit_int8(0);
  }
}
void Assembler::jmp(Address adr) {
  InstructionMark im(this);
  prefix(adr);
  emit_int8((unsigned char)0xFF);
  emit_operand(rsp, adr);
}
void Assembler::jmp(Label& L, bool maybe_short) {
  if (L.is_bound()) {
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
    InstructionMark im(this);
    const int short_size = 2;
    const int long_size = 5;
    intptr_t offs = entry - pc();
    if (maybe_short && is8bit(offs - short_size)) {
      emit_int8((unsigned char)0xEB);
      emit_int8((offs - short_size) & 0xFF);
    } else {
      emit_int8((unsigned char)0xE9);
      emit_int32(offs - long_size);
    }
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xE9);
    emit_int32(0);
  }
}
void Assembler::jmp(Register entry) {
  int encode = prefix_and_encode(entry->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xE0 | encode));
}
void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xE9);
  assert(dest != NULL, "must have a target");
  intptr_t disp = dest - (pc() + sizeof(int32_t));
  assert(is_simm32(disp), "must be 32bit offset (jmp)");
  emit_data(disp, rspec.reloc(), call32_operand);
}
void Assembler::jmpb(Label& L) {
  if (L.is_bound()) {
    const int short_size = 2;
    address entry = target(L);
    assert(entry != NULL, "jmp most probably wrong");
#ifdef ASSERT
    intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
    intptr_t delta = short_branch_delta();
    if (delta != 0) {
      dist += (dist < 0 ? (-delta) :delta);
    }
    assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
    intptr_t offs = entry - pc();
    emit_int8((unsigned char)0xEB);
    emit_int8((offs - short_size) & 0xFF);
  } else {
    InstructionMark im(this);
    L.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xEB);
    emit_int8(0);
  }
}
void Assembler::ldmxcsr( Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  prefix(src);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_operand(as_Register(2), src);
}
void Assembler::leal(Register dst, Address src) {
  InstructionMark im(this);
#ifdef _LP64
  emit_int8(0x67); // addr32
  prefix(src, dst);
#endif // LP64
  emit_int8((unsigned char)0x8D);
  emit_operand(dst, src);
}
void Assembler::lfence() {
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xE8);
}
void Assembler::lock() {
  emit_int8((unsigned char)0xF0);
}
void Assembler::lzcntl(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
  emit_int8((unsigned char)0xF3);
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::mfence() {
  NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_int8((unsigned char)0xF0);
}
void Assembler::mov(Register dst, Register src) {
  LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
}
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
}
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
}
void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movb(Register dst, Address src) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  InstructionMark im(this);
  prefix(src, dst, true);
  emit_int8((unsigned char)0x8A);
  emit_operand(dst, src);
}
void Assembler::movb(Address dst, int imm8) {
  InstructionMark im(this);
   prefix(dst);
  emit_int8((unsigned char)0xC6);
  emit_operand(rax, dst, 1);
  emit_int8(imm8);
}
void Assembler::movb(Address dst, Register src) {
  assert(src->has_byte_register(), "must have byte register");
  InstructionMark im(this);
  prefix(dst, src, true);
  emit_int8((unsigned char)0x88);
  emit_operand(src, dst);
}
void Assembler::movdl(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movdl(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movdl(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
  emit_int8(0x6E);
  emit_operand(dst, src);
}
void Assembler::movdl(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
  emit_int8(0x7E);
  emit_operand(src, dst);
}
void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}
void Assembler::movdqa(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
}
void Assembler::movdqu(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
}
void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
}
void Assembler::movdqu(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_F3);
  emit_int8(0x7F);
  emit_operand(src, dst);
}
void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
  assert(UseAVX > 0, "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  emit_int8(0x6F);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vmovdqu(XMMRegister dst, Address src) {
  assert(UseAVX > 0, "");
  InstructionMark im(this);
  bool vector256 = true;
  vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  emit_int8(0x6F);
  emit_operand(dst, src);
}
void Assembler::vmovdqu(Address dst, XMMRegister src) {
  assert(UseAVX > 0, "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
  emit_int8(0x7F);
  emit_operand(src, dst);
}
void Assembler::movl(Register dst, int32_t imm32) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)(0xB8 | encode));
  emit_int32(imm32);
}
void Assembler::movl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8((unsigned char)0x8B);
  emit_operand(dst, src);
}
void Assembler::movl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0xC7);
  emit_operand(rax, dst, 4);
  emit_int32(imm32);
}
void Assembler::movl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8((unsigned char)0x89);
  emit_operand(src, dst);
}
void Assembler::movlpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
}
void Assembler::movq( MMXRegister dst, Address src ) {
  assert( VM_Version::supports_mmx(), "" );
  emit_int8(0x0F);
  emit_int8(0x6F);
  emit_operand(dst, src);
}
void Assembler::movq( Address dst, MMXRegister src ) {
  assert( VM_Version::supports_mmx(), "" );
  emit_int8(0x0F);
  emit_int8(0x7F);
  emit_operand(dst, src);
}
void Assembler::movq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_F3);
  emit_int8(0x7E);
  emit_operand(dst, src);
}
void Assembler::movq(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
  emit_int8((unsigned char)0xD6);
  emit_operand(src, dst);
}
void Assembler::movsbl(Register dst, Address src) { // movsxb
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_operand(dst, src);
}
void Assembler::movsbl(Register dst, Register src) { // movsxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
}
void Assembler::movsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
}
void Assembler::movsd(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_F2);
  emit_int8(0x11);
  emit_operand(src, dst);
}
void Assembler::movss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
}
void Assembler::movss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
}
void Assembler::movss(Address dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_F3);
  emit_int8(0x11);
  emit_operand(src, dst);
}
void Assembler::movswl(Register dst, Address src) { // movsxw
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_operand(dst, src);
}
void Assembler::movswl(Register dst, Register src) { // movsxw
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movw(Address dst, int imm16) {
  InstructionMark im(this);
  emit_int8(0x66); // switch to 16-bit mode
  prefix(dst);
  emit_int8((unsigned char)0xC7);
  emit_operand(rax, dst, 2);
  emit_int16(imm16);
}
void Assembler::movw(Register dst, Address src) {
  InstructionMark im(this);
  emit_int8(0x66);
  prefix(src, dst);
  emit_int8((unsigned char)0x8B);
  emit_operand(dst, src);
}
void Assembler::movw(Address dst, Register src) {
  InstructionMark im(this);
  emit_int8(0x66);
  prefix(dst, src);
  emit_int8((unsigned char)0x89);
  emit_operand(src, dst);
}
void Assembler::movzbl(Register dst, Address src) { // movzxb
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_operand(dst, src);
}
void Assembler::movzbl(Register dst, Register src) { // movzxb
  NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
  int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
}
void Assembler::movzwl(Register dst, Address src) { // movzxw
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
  emit_operand(dst, src);
}
void Assembler::movzwl(Register dst, Register src) { // movzxw
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8(0xC0 | encode);
}
void Assembler::mull(Address src) {
  InstructionMark im(this);
  prefix(src);
  emit_int8((unsigned char)0xF7);
  emit_operand(rsp, src);
}
void Assembler::mull(Register src) {
  int encode = prefix_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
}
void Assembler::mulsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
}
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
}
void Assembler::mulss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
}
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
}
void Assembler::negl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
}
void Assembler::nop(int i) {
#ifdef ASSERT
  assert(i > 0, " ");
  for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
  return;
#endif // ASSERT
  if (UseAddressNop && VM_Version::is_intel()) {
    while(i >= 15) {
      i -= 15;
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      addr_nop_8();
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8(0x66);   // size prefix
      emit_int8((unsigned char)0x90);
    }
    switch (i) {
      case 14:
        emit_int8(0x66); // size prefix
      case 13:
        emit_int8(0x66); // size prefix
      case 12:
        addr_nop_8();
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8(0x66); // size prefix
        emit_int8((unsigned char)0x90);
        break;
      case 11:
        emit_int8(0x66); // size prefix
      case 10:
        emit_int8(0x66); // size prefix
      case 9:
        emit_int8(0x66); // size prefix
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
        emit_int8(0x66); // size prefix
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        emit_int8(0x66); // size prefix
      case 2:
        emit_int8(0x66); // size prefix
      case 1:
        emit_int8((unsigned char)0x90);
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }
  if (UseAddressNop && VM_Version::is_amd()) {
    while(i >= 22) {
      i -= 11;
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
      emit_int8(0x66); // size prefix
      addr_nop_8();
    }
    switch (i) {
      case 21:
        i -= 1;
        emit_int8(0x66); // size prefix
      case 20:
      case 19:
        i -= 1;
        emit_int8(0x66); // size prefix
      case 18:
      case 17:
        i -= 1;
        emit_int8(0x66); // size prefix
      case 16:
      case 15:
        i -= 8;
        addr_nop_8();
        break;
      case 14:
      case 13:
        i -= 7;
        addr_nop_7();
        break;
      case 12:
        i -= 6;
        emit_int8(0x66); // size prefix
        addr_nop_5();
        break;
      default:
        assert(i < 12, " ");
    }
    switch (i) {
      case 11:
        emit_int8(0x66); // size prefix
      case 10:
        emit_int8(0x66); // size prefix
      case 9:
        emit_int8(0x66); // size prefix
      case 8:
        addr_nop_8();
        break;
      case 7:
        addr_nop_7();
        break;
      case 6:
        emit_int8(0x66); // size prefix
      case 5:
        addr_nop_5();
        break;
      case 4:
        addr_nop_4();
        break;
      case 3:
        emit_int8(0x66); // size prefix
      case 2:
        emit_int8(0x66); // size prefix
      case 1:
        emit_int8((unsigned char)0x90);
        break;
      default:
        assert(i == 0, " ");
    }
    return;
  }
  while(i > 12) {
    i -= 4;
    emit_int8(0x66); // size prefix
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
  }
  if(i > 8) {
    if(i > 9) {
      i -= 1;
      emit_int8(0x66);
    }
    i -= 3;
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
  }
  if(i > 4) {
    if(i > 6) {
      i -= 1;
      emit_int8(0x66);
    }
    i -= 3;
    emit_int8(0x66);
    emit_int8(0x66);
    emit_int8((unsigned char)0x90);
  }
  switch (i) {
    case 4:
      emit_int8(0x66);
    case 3:
      emit_int8(0x66);
    case 2:
      emit_int8(0x66);
    case 1:
      emit_int8((unsigned char)0x90);
      break;
    default:
      assert(i == 0, " ");
  }
}
void Assembler::notl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
}
void Assembler::orl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rcx, dst, imm32);
}
void Assembler::orl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xC8, dst, imm32);
}
void Assembler::orl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x0B);
  emit_operand(dst, src);
}
void Assembler::orl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
}
void Assembler::orl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x09);
  emit_operand(src, dst);
}
void Assembler::packuswb(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
}
void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
}
void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x67, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
  assert(VM_Version::supports_avx2(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
  emit_int8(0x00);
  emit_int8(0xC0 | encode);
  emit_int8(imm8);
}
void Assembler::pause() {
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)0x90);
}
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  emit_int8(0x61);
  emit_operand(dst, src);
  emit_int8(imm8);
}
void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_2(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  emit_int8(0x61);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x16);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true);
  emit_int8(0x22);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::pmovzxbw(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x30);
  emit_operand(dst, src);
}
void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x30);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::pop(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8(0x58 | encode);
}
void Assembler::popcntl(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
  emit_int8((unsigned char)0xF3);
  prefix(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
  emit_operand(dst, src);
}
void Assembler::popcntl(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
  emit_int8((unsigned char)0xF3);
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::popf() {
  emit_int8((unsigned char)0x9D);
}
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::popl(Address dst) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0x8F);
  emit_operand(rax, dst);
}
#endif
void Assembler::prefetch_prefix(Address src) {
  prefix(src);
  emit_int8(0x0F);
}
void Assembler::prefetchnta(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x18);
  emit_operand(rax, src); // 0, src
}
void Assembler::prefetchr(Address src) {
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x0D);
  emit_operand(rax, src); // 0, src
}
void Assembler::prefetcht0(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x18);
  emit_operand(rcx, src); // 1, src
}
void Assembler::prefetcht1(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x18);
  emit_operand(rdx, src); // 2, src
}
void Assembler::prefetcht2(Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x18);
  emit_operand(rbx, src); // 3, src
}
void Assembler::prefetchw(Address src) {
  assert(VM_Version::supports_3dnow_prefetch(), "must support");
  InstructionMark im(this);
  prefetch_prefix(src);
  emit_int8(0x0D);
  emit_operand(rcx, src); // 1, src
}
void Assembler::prefix(Prefix p) {
  emit_int8(p);
}
void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_ssse3(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x00);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::pshufb(XMMRegister dst, Address src) {
  assert(VM_Version::supports_ssse3(), "");
  InstructionMark im(this);
  simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x00);
  emit_operand(dst, src);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
  emit_int8(mode & 0xFF);
}
void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66);
  emit_int8(0x70);
  emit_operand(dst, src);
  emit_int8(mode & 0xFF);
}
void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
  emit_int8(mode & 0xFF);
}
void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
  assert(isByte(mode), "invalid value");
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_F2);
  emit_int8(0x70);
  emit_operand(dst, src);
  emit_int8(mode & 0xFF);
}
void Assembler::psrldq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
}
void Assembler::pslldq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66);
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift);
}
void Assembler::ptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_sse4_1(), "");
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  InstructionMark im(this);
  simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_operand(dst, src);
}
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vptest(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
  emit_int8(0x17);
  emit_operand(dst, src);
}
void Assembler::vptest(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x17);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::punpcklbw(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
}
void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
}
void Assembler::punpckldq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
}
void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
}
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
}
void Assembler::push(int32_t imm32) {
  emit_int8(0x68);
  emit_int32(imm32);
}
void Assembler::push(Register src) {
  int encode = prefix_and_encode(src->encoding());
  emit_int8(0x50 | encode);
}
void Assembler::pushf() {
  emit_int8((unsigned char)0x9C);
}
#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::pushl(Address src) {
  InstructionMark im(this);
  prefix(src);
  emit_int8((unsigned char)0xFF);
  emit_operand(rsi, src);
}
#endif
void Assembler::rcll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)0xD0 | encode);
    emit_int8(imm8);
  }
}
void Assembler::rdtsc() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x31);
}
void Assembler::rep_mov() {
  emit_int8((unsigned char)0xF3);
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xA5);
}
void Assembler::rep_stosb() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xAA); // STOSB
}
void Assembler::rep_stos() {
  emit_int8((unsigned char)0xF3); // REP
  LP64_ONLY(prefix(REX_W));       // LP64:STOSQ, LP32:STOSD
  emit_int8((unsigned char)0xAB);
}
void Assembler::repne_scan() { // repne_scan
  emit_int8((unsigned char)0xF2);
  LP64_ONLY(prefix(REX_W));
  emit_int8((unsigned char)0xAF);
}
#ifdef _LP64
void Assembler::repne_scanl() { // repne_scan
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)0xAF);
}
#endif
void Assembler::ret(int imm16) {
  if (imm16 == 0) {
    emit_int8((unsigned char)0xC3);
  } else {
    emit_int8((unsigned char)0xC2);
    emit_int16(imm16);
  }
}
void Assembler::sahf() {
#ifdef _LP64
  ShouldNotReachHere();
#endif
  emit_int8((unsigned char)0x9E);
}
void Assembler::sarl(Register dst, int imm8) {
  int encode = prefix_and_encode(dst->encoding());
  assert(isShiftCount(imm8), "illegal shift count");
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
  }
}
void Assembler::sarl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
}
void Assembler::sbbl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}
void Assembler::sbbl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xD8, dst, imm32);
}
void Assembler::sbbl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x1B);
  emit_operand(dst, src);
}
void Assembler::sbbl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
}
void Assembler::setb(Condition cc, Register dst) {
  assert(0 <= cc && cc < 16, "illegal cc");
  int encode = prefix_and_encode(dst->encoding(), true);
  emit_int8(0x0F);
  emit_int8((unsigned char)0x90 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::shll(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  if (imm8 == 1 ) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
  }
}
void Assembler::shll(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
}
void Assembler::shrl(Register dst, int imm8) {
  assert(isShiftCount(imm8), "illegal shift count");
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
}
void Assembler::shrl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE8 | encode));
}
void Assembler::smovl() {
  emit_int8((unsigned char)0xA5);
}
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
}
void Assembler::sqrtsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
}
void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
}
void Assembler::std() {
  emit_int8((unsigned char)0xFD);
}
void Assembler::sqrtss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
}
void Assembler::stmxcsr( Address dst) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  prefix(dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_operand(as_Register(3), dst);
}
void Assembler::subl(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefix(dst);
  emit_arith_operand(0x81, rbp, dst, imm32);
}
void Assembler::subl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x29);
  emit_operand(src, dst);
}
void Assembler::subl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xE8, dst, imm32);
}
void Assembler::subl_imm32(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}
void Assembler::subl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x2B);
  emit_operand(dst, src);
}
void Assembler::subl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
}
void Assembler::subsd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
}
void Assembler::subss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
}
void Assembler::subss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
}
void Assembler::testb(Register dst, int imm8) {
  NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  (void) prefix_and_encode(dst->encoding(), true);
  emit_arith_b(0xF6, 0xC0, dst, imm8);
}
void Assembler::testl(Register dst, int32_t imm32) {
  int encode = dst->encoding();
  if (encode == 0) {
    emit_int8((unsigned char)0xA9);
  } else {
    encode = prefix_and_encode(encode);
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
  }
  emit_int32(imm32);
}
void Assembler::testl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
}
void Assembler::testl(Register dst, Address  src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8((unsigned char)0x85);
  emit_operand(dst, src);
}
void Assembler::tzcntl(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)0xC0 | encode);
}
void Assembler::tzcntq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::ucomisd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
}
void Assembler::ucomiss(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
}
void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
}
void Assembler::xabort(int8_t imm8) {
  emit_int8((unsigned char)0xC6);
  emit_int8((unsigned char)0xF8);
  emit_int8((unsigned char)(imm8 & 0xFF));
}
void Assembler::xaddl(Address dst, Register src) {
  InstructionMark im(this);
  prefix(dst, src);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
  emit_operand(src, dst);
}
void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
  InstructionMark im(this);
  relocate(rtype);
  if (abort.is_bound()) {
    address entry = target(abort);
    assert(entry != NULL, "abort entry NULL");
    intptr_t offset = entry - pc();
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(offset - 6); // 2 opcode + 4 address
  } else {
    abort.add_patch_at(code(), locator());
    emit_int8((unsigned char)0xC7);
    emit_int8((unsigned char)0xF8);
    emit_int32(0);
  }
}
void Assembler::xchgl(Register dst, Address src) { // xchg
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8((unsigned char)0x87);
  emit_operand(dst, src);
}
void Assembler::xchgl(Register dst, Register src) {
  int encode = prefix_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::xend() {
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0x01);
  emit_int8((unsigned char)0xD5);
}
void Assembler::xgetbv() {
  emit_int8(0x0F);
  emit_int8(0x01);
  emit_int8((unsigned char)0xD0);
}
void Assembler::xorl(Register dst, int32_t imm32) {
  prefix(dst);
  emit_arith(0x81, 0xF0, dst, imm32);
}
void Assembler::xorl(Register dst, Address src) {
  InstructionMark im(this);
  prefix(src, dst);
  emit_int8(0x33);
  emit_operand(dst, src);
}
void Assembler::xorl(Register dst, Register src) {
  (void) prefix_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
}
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
}
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
}
void Assembler::addpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
}
void Assembler::addps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
}
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::subpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
}
void Assembler::subps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
}
void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
}
void Assembler::mulps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
}
void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::divpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
}
void Assembler::divps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
}
void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}
void Assembler::andps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}
void Assembler::andps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
}
void Assembler::andpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
}
void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}
void Assembler::xorps(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}
void Assembler::xorpd(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
}
void Assembler::xorps(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
}
void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx(), "");
  emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
}
void Assembler::paddb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
}
void Assembler::paddw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
}
void Assembler::paddd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
}
void Assembler::paddq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
}
void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::psubb(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
}
void Assembler::psubw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
}
void Assembler::psubd(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
}
void Assembler::psubq(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
}
void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
}
void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_sse4_1(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x40);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  InstructionMark im(this);
  int dst_enc = dst->encoding();
  int nds_enc = nds->is_valid() ? nds->encoding() : 0;
  vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
  emit_int8(0x40);
  emit_operand(dst, src);
}
void Assembler::psllw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::pslld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psllq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
}
void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
}
void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
}
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::psrlw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psrld(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psrlq(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
  emit_int8(0x73);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
}
void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
}
void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
}
void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::psraw(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
  emit_int8(0x71);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psrad(XMMRegister dst, int shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
  emit_int8(0x72);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(shift & 0xFF);
}
void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
}
void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
}
void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
  emit_int8(shift & 0xFF);
}
void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
}
void Assembler::pand(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
}
void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::por(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
}
void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
}
void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
  assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
  emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
}
void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x18);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(0x01);
}
void Assembler::vinsertf128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  emit_int8(0x18);
  emit_operand(dst, src);
  emit_int8(0x01);
}
void Assembler::vextractf128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  emit_int8(0x19);
  emit_operand(src, dst);
  emit_int8(0x01);
}
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x38);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(0x01);
}
void Assembler::vinserti128h(XMMRegister dst, Address src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(dst != xnoreg, "sanity");
  int dst_enc = dst->encoding();
  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  emit_int8(0x38);
  emit_operand(dst, src);
  emit_int8(0x01);
}
void Assembler::vextracti128h(Address dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  InstructionMark im(this);
  bool vector256 = true;
  assert(src != xnoreg, "sanity");
  int src_enc = src->encoding();
  vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
  emit_int8(0x39);
  emit_operand(src, dst);
  emit_int8(0x01);
}
void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
  assert(VM_Version::supports_avx2(), "");
  bool vector256 = true;
  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
  emit_int8(0x58);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
  assert(VM_Version::supports_clmul(), "");
  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
  assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
  bool vector256 = false;
  int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
  emit_int8(0x44);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8((unsigned char)mask);
}
void Assembler::vzeroupper() {
  assert(VM_Version::supports_avx(), "");
  (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
  emit_int8(0x77);
}
#ifndef _LP64
void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | src1->encoding()));
  emit_data(imm32, rspec, 0);
}
void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  emit_int8((unsigned char)0x81);
  emit_operand(rdi, src1);
  emit_data(imm32, rspec, 0);
}
void Assembler::cmpxchg8(Address adr) {
  InstructionMark im(this);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC7);
  emit_operand(rcx, adr);
}
void Assembler::decl(Register dst) {
 emit_int8(0x48 | dst->encoding());
}
#endif // _LP64
void Assembler::fabs() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE1);
}
void Assembler::fadd(int i) {
  emit_farith(0xD8, 0xC0, i);
}
void Assembler::fadd_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rax, src);
}
void Assembler::fadd_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rax, src);
}
void Assembler::fadda(int i) {
  emit_farith(0xDC, 0xC0, i);
}
void Assembler::faddp(int i) {
  emit_farith(0xDE, 0xC0, i);
}
void Assembler::fchs() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE0);
}
void Assembler::fcom(int i) {
  emit_farith(0xD8, 0xD0, i);
}
void Assembler::fcomp(int i) {
  emit_farith(0xD8, 0xD8, i);
}
void Assembler::fcomp_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rbx, src);
}
void Assembler::fcomp_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rbx, src);
}
void Assembler::fcompp() {
  emit_int8((unsigned char)0xDE);
  emit_int8((unsigned char)0xD9);
}
void Assembler::fcos() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFF);
}
void Assembler::fdecstp() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF6);
}
void Assembler::fdiv(int i) {
  emit_farith(0xD8, 0xF0, i);
}
void Assembler::fdiv_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rsi, src);
}
void Assembler::fdiv_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rsi, src);
}
void Assembler::fdiva(int i) {
  emit_farith(0xDC, 0xF8, i);
}
void Assembler::fdivp(int i) {
  emit_farith(0xDE, 0xF8, i);                    // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}
void Assembler::fdivr(int i) {
  emit_farith(0xD8, 0xF8, i);
}
void Assembler::fdivr_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rdi, src);
}
void Assembler::fdivr_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rdi, src);
}
void Assembler::fdivra(int i) {
  emit_farith(0xDC, 0xF0, i);
}
void Assembler::fdivrp(int i) {
  emit_farith(0xDE, 0xF0, i);                    // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
}
void Assembler::ffree(int i) {
  emit_farith(0xDD, 0xC0, i);
}
void Assembler::fild_d(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDF);
  emit_operand32(rbp, adr);
}
void Assembler::fild_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDB);
  emit_operand32(rax, adr);
}
void Assembler::fincstp() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF7);
}
void Assembler::finit() {
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xDB);
  emit_int8((unsigned char)0xE3);
}
void Assembler::fist_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDB);
  emit_operand32(rdx, adr);
}
void Assembler::fistp_d(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDF);
  emit_operand32(rdi, adr);
}
void Assembler::fistp_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDB);
  emit_operand32(rbx, adr);
}
void Assembler::fld1() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE8);
}
void Assembler::fld_d(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDD);
  emit_operand32(rax, adr);
}
void Assembler::fld_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rax, adr);
}
void Assembler::fld_s(int index) {
  emit_farith(0xD9, 0xC0, index);
}
void Assembler::fld_x(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDB);
  emit_operand32(rbp, adr);
}
void Assembler::fldcw(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rbp, src);
}
void Assembler::fldenv(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rsp, src);
}
void Assembler::fldlg2() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEC);
}
void Assembler::fldln2() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xED);
}
void Assembler::fldz() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEE);
}
void Assembler::flog() {
  fldln2();
  fxch();
  fyl2x();
}
void Assembler::flog10() {
  fldlg2();
  fxch();
  fyl2x();
}
void Assembler::fmul(int i) {
  emit_farith(0xD8, 0xC8, i);
}
void Assembler::fmul_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rcx, src);
}
void Assembler::fmul_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rcx, src);
}
void Assembler::fmula(int i) {
  emit_farith(0xDC, 0xC8, i);
}
void Assembler::fmulp(int i) {
  emit_farith(0xDE, 0xC8, i);
}
void Assembler::fnsave(Address dst) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDD);
  emit_operand32(rsi, dst);
}
void Assembler::fnstcw(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0x9B);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rdi, src);
}
void Assembler::fnstsw_ax() {
  emit_int8((unsigned char)0xDF);
  emit_int8((unsigned char)0xE0);
}
void Assembler::fprem() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF8);
}
void Assembler::fprem1() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF5);
}
void Assembler::frstor(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDD);
  emit_operand32(rsp, src);
}
void Assembler::fsin() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFE);
}
void Assembler::fsqrt() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFA);
}
void Assembler::fst_d(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDD);
  emit_operand32(rdx, adr);
}
void Assembler::fst_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rdx, adr);
}
void Assembler::fstp_d(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDD);
  emit_operand32(rbx, adr);
}
void Assembler::fstp_d(int index) {
  emit_farith(0xDD, 0xD8, index);
}
void Assembler::fstp_s(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD9);
  emit_operand32(rbx, adr);
}
void Assembler::fstp_x(Address adr) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDB);
  emit_operand32(rdi, adr);
}
void Assembler::fsub(int i) {
  emit_farith(0xD8, 0xE0, i);
}
void Assembler::fsub_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rsp, src);
}
void Assembler::fsub_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rsp, src);
}
void Assembler::fsuba(int i) {
  emit_farith(0xDC, 0xE8, i);
}
void Assembler::fsubp(int i) {
  emit_farith(0xDE, 0xE8, i);                    // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
}
void Assembler::fsubr(int i) {
  emit_farith(0xD8, 0xE8, i);
}
void Assembler::fsubr_d(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xDC);
  emit_operand32(rbp, src);
}
void Assembler::fsubr_s(Address src) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xD8);
  emit_operand32(rbp, src);
}
void Assembler::fsubra(int i) {
  emit_farith(0xDC, 0xE0, i);
}
void Assembler::fsubrp(int i) {
  emit_farith(0xDE, 0xE0, i);                    // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
}
void Assembler::ftan() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)0xDD);
  emit_int8((unsigned char)0xD8);
}
void Assembler::ftst() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xE4);
}
void Assembler::fucomi(int i) {
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDB, 0xE8, i);
}
void Assembler::fucomip(int i) {
  guarantee(VM_Version::supports_cmov(), "illegal instruction");
  emit_farith(0xDF, 0xE8, i);
}
void Assembler::fwait() {
  emit_int8((unsigned char)0x9B);
}
void Assembler::fxch(int i) {
  emit_farith(0xD9, 0xC8, i);
}
void Assembler::fyl2x() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF1);
}
void Assembler::frndint() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xFC);
}
void Assembler::f2xm1() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xF0);
}
void Assembler::fldl2e() {
  emit_int8((unsigned char)0xD9);
  emit_int8((unsigned char)0xEA);
}
static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
static int simd_opc[4] = { 0,    0, 0x38, 0x3A };
void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
    emit_int8(simd_pre[pre]);
  }
  if (rex_w) {
    prefixq(adr, xreg);
  } else {
    prefix(adr, xreg);
  }
  if (opc > 0) {
    emit_int8(0x0F);
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
      emit_int8(opc2);
    }
  }
}
int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
  if (pre > 0) {
    emit_int8(simd_pre[pre]);
  }
  int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
                          prefix_and_encode(dst_enc, src_enc);
  if (opc > 0) {
    emit_int8(0x0F);
    int opc2 = simd_opc[opc];
    if (opc2 > 0) {
      emit_int8(opc2);
    }
  }
  return encode;
}
void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
  if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
    prefix(VEX_3bytes);
    int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
    byte1 = (~byte1) & 0xE0;
    byte1 |= opc;
    emit_int8(byte1);
    int byte2 = ((~nds_enc) & 0xf) << 3;
    byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
    emit_int8(byte2);
  } else {
    prefix(VEX_2bytes);
    int byte1 = vex_r ? VEX_R : 0;
    byte1 = (~byte1) & 0x80;
    byte1 |= ((~nds_enc) & 0xf) << 3;
    byte1 |= (vector256 ? 4 : 0) | pre;
    emit_int8(byte1);
  }
}
void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
  bool vex_r = (xreg_enc >= 8);
  bool vex_b = adr.base_needs_rex();
  bool vex_x = adr.index_needs_rex();
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
}
int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
  bool vex_r = (dst_enc >= 8);
  bool vex_b = (src_enc >= 8);
  bool vex_x = false;
  vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
  return (((dst_enc & 7) << 3) | (src_enc & 7));
}
void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  if (UseAVX > 0) {
    int xreg_enc = xreg->encoding();
    int  nds_enc = nds->is_valid() ? nds->encoding() : 0;
    vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
    rex_prefix(adr, xreg, pre, opc, rex_w);
  }
}
int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
  int dst_enc = dst->encoding();
  int src_enc = src->encoding();
  if (UseAVX > 0) {
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
  } else {
    assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
    return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
  }
}
void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, dst, src, pre);
  emit_int8(opcode);
  emit_operand(dst, src);
}
void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, dst, src, pre);
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
  InstructionMark im(this);
  simd_prefix(dst, xnoreg, src, pre);
  emit_int8(opcode);
  emit_operand(dst, src);
}
void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
  int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               Address src, VexSimdPrefix pre, bool vector256) {
  InstructionMark im(this);
  vex_prefix(dst, nds, src, pre, vector256);
  emit_int8(opcode);
  emit_operand(dst, src);
}
void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                               XMMRegister src, VexSimdPrefix pre, bool vector256) {
  int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
  emit_int8(opcode);
  emit_int8((unsigned char)(0xC0 | encode));
}
#ifndef _LP64
void Assembler::incl(Register dst) {
  emit_int8(0x40 | dst->encoding());
}
void Assembler::lea(Register dst, Address src) {
  leal(dst, src);
}
void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
  emit_int8((unsigned char)0xC7);
  emit_operand(rax, dst);
  emit_data((int)imm32, rspec, 0);
}
void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)(0xB8 | encode));
  emit_data((int)imm32, rspec, 0);
}
void Assembler::popa() { // 32bit
  emit_int8(0x61);
}
void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  emit_int8(0x68);
  emit_data(imm32, rspec, 0);
}
void Assembler::pusha() { // 32bit
  emit_int8(0x60);
}
void Assembler::set_byte_if_not_zero(Register dst) {
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | dst->encoding()));
}
void Assembler::shldl(Register dst, Register src) {
  emit_int8(0x0F);
  emit_int8((unsigned char)0xA5);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
}
void Assembler::shrdl(Register dst, Register src) {
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAD);
  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
}
#else // LP64
void Assembler::set_byte_if_not_zero(Register dst) {
  int enc = prefix_and_encode(dst->encoding(), true);
  emit_int8(0x0F);
  emit_int8((unsigned char)0x95);
  emit_int8((unsigned char)(0xE0 | enc));
}
bool Assembler::reachable(AddressLiteral adr) {
  int64_t disp;
  if (adr.reloc() == relocInfo::none) {
    return false;
  }
  if (adr.reloc() == relocInfo::internal_word_type) {
    return true;
  }
  if (adr.reloc() == relocInfo::virtual_call_type ||
      adr.reloc() == relocInfo::opt_virtual_call_type ||
      adr.reloc() == relocInfo::static_call_type ||
      adr.reloc() == relocInfo::static_stub_type ) {
    return true;
  }
  if (adr.reloc() != relocInfo::external_word_type &&
      adr.reloc() != relocInfo::poll_return_type &&  // these are really external_word but need special
      adr.reloc() != relocInfo::poll_type &&         // relocs to identify them
      adr.reloc() != relocInfo::runtime_call_type ) {
    return false;
  }
  if (ForceUnreachable) {
    if (CodeCache::find_blob(adr._target) == NULL) {
      return false;
    }
  }
  disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;
  disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
  if (!is_simm32(disp)) return false;
  disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
  const int fudge = 12 + 4;
  if (disp < 0) {
    disp -= fudge;
  } else {
    disp += fudge;
  }
  return is_simm32(disp);
}
bool Assembler::is_polling_page_far() {
  intptr_t addr = (intptr_t)os::get_polling_page();
  return ForceUnreachable ||
         !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
         !is_simm32(addr - (intptr_t)CodeCache::high_bound());
}
void Assembler::emit_data64(jlong data,
                            relocInfo::relocType rtype,
                            int format) {
  if (rtype == relocInfo::none) {
    emit_int64(data);
  } else {
    emit_data64(data, Relocation::spec_simple(rtype), format);
  }
}
void Assembler::emit_data64(jlong data,
                            RelocationHolder const& rspec,
                            int format) {
  assert(imm_operand == 0, "default format must be immediate in this file");
  assert(imm_operand == format, "must be immediate");
  assert(inst_mark() != NULL, "must be inside InstructionMark");
  code_section()->relocate(inst_mark(), rspec, format);
#ifdef ASSERT
  check_relocation(rspec, format);
#endif
  emit_int64(data);
}
int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
  if (reg_enc >= 8) {
    prefix(REX_B);
    reg_enc -= 8;
  } else if (byteinst && reg_enc >= 4) {
    prefix(REX);
  }
  return reg_enc;
}
int Assembler::prefixq_and_encode(int reg_enc) {
  if (reg_enc < 8) {
    prefix(REX_W);
  } else {
    prefix(REX_WB);
    reg_enc -= 8;
  }
  return reg_enc;
}
int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
  if (dst_enc < 8) {
    if (src_enc >= 8) {
      prefix(REX_B);
      src_enc -= 8;
    } else if (byteinst && src_enc >= 4) {
      prefix(REX);
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_R);
    } else {
      prefix(REX_RB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}
int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
  if (dst_enc < 8) {
    if (src_enc < 8) {
      prefix(REX_W);
    } else {
      prefix(REX_WB);
      src_enc -= 8;
    }
  } else {
    if (src_enc < 8) {
      prefix(REX_WR);
    } else {
      prefix(REX_WRB);
      src_enc -= 8;
    }
    dst_enc -= 8;
  }
  return dst_enc << 3 | src_enc;
}
void Assembler::prefix(Register reg) {
  if (reg->encoding() >= 8) {
    prefix(REX_B);
  }
}
void Assembler::prefix(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_XB);
    } else {
      prefix(REX_B);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_X);
    }
  }
}
void Assembler::prefixq(Address adr) {
  if (adr.base_needs_rex()) {
    if (adr.index_needs_rex()) {
      prefix(REX_WXB);
    } else {
      prefix(REX_WB);
    }
  } else {
    if (adr.index_needs_rex()) {
      prefix(REX_WX);
    } else {
      prefix(REX_W);
    }
  }
}
void Assembler::prefix(Address adr, Register reg, bool byteinst) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
      } else if (byteinst && reg->encoding() >= 4 ) {
        prefix(REX);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}
void Assembler::prefixq(Address adr, Register src) {
  if (src->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WXB);
      } else {
        prefix(REX_WB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WX);
      } else {
        prefix(REX_W);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WRXB);
      } else {
        prefix(REX_WRB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WRX);
      } else {
        prefix(REX_WR);
      }
    }
  }
}
void Assembler::prefix(Address adr, XMMRegister reg) {
  if (reg->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_XB);
      } else {
        prefix(REX_B);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_X);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_RXB);
      } else {
        prefix(REX_RB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_RX);
      } else {
        prefix(REX_R);
      }
    }
  }
}
void Assembler::prefixq(Address adr, XMMRegister src) {
  if (src->encoding() < 8) {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WXB);
      } else {
        prefix(REX_WB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WX);
      } else {
        prefix(REX_W);
      }
    }
  } else {
    if (adr.base_needs_rex()) {
      if (adr.index_needs_rex()) {
        prefix(REX_WRXB);
      } else {
        prefix(REX_WRB);
      }
    } else {
      if (adr.index_needs_rex()) {
        prefix(REX_WRX);
      } else {
        prefix(REX_WR);
      }
    }
  }
}
void Assembler::adcq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD0, dst, imm32);
}
void Assembler::adcq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x13);
  emit_operand(dst, src);
}
void Assembler::adcq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x13, 0xC0, dst, src);
}
void Assembler::addq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rax, dst,imm32);
}
void Assembler::addq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
  emit_int8(0x01);
  emit_operand(src, dst);
}
void Assembler::addq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC0, dst, imm32);
}
void Assembler::addq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x03);
  emit_operand(dst, src);
}
void Assembler::addq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x03, 0xC0, dst, src);
}
void Assembler::adcxq(Register dst, Register src) {
  emit_int8((unsigned char)0x66);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x38);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::adoxq(Register dst, Register src) {
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x38);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0x81);
  emit_operand(rsp, dst, 4);
  emit_int32(imm32);
}
void Assembler::andq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE0, dst, imm32);
}
void Assembler::andq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x23);
  emit_operand(dst, src);
}
void Assembler::andq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x23, 0xC0, dst, src);
}
void Assembler::andnq(Register dst, Register src1, Register src2) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::andnq(Register dst, Register src1, Address src2) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(dst, src1, src2);
  emit_int8((unsigned char)0xF2);
  emit_operand(dst, src2);
}
void Assembler::bsfq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBC);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::bsrq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::bswapq(Register reg) {
  int encode = prefixq_and_encode(reg->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)(0xC8 | encode));
}
void Assembler::blsiq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsiq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rbx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rbx, src);
}
void Assembler::blsmskq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsmskq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rdx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rdx, src);
}
void Assembler::blsrq(Register dst, Register src) {
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::blsrq(Register dst, Address src) {
  InstructionMark im(this);
  assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
  vex_prefix_0F38_q(rcx, dst, src);
  emit_int8((unsigned char)0xF3);
  emit_operand(rcx, src);
}
void Assembler::cdqq() {
  prefix(REX_W);
  emit_int8((unsigned char)0x99);
}
void Assembler::clflush(Address adr) {
  prefix(adr);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_operand(rdi, adr);
}
void Assembler::cmovq(Condition cc, Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cmovq(Condition cc, Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8(0x40 | cc);
  emit_operand(dst, src);
}
void Assembler::cmpq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0x81);
  emit_operand(rdi, dst, 4);
  emit_int32(imm32);
}
void Assembler::cmpq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xF8, dst, imm32);
}
void Assembler::cmpq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
  emit_int8(0x3B);
  emit_operand(src, dst);
}
void Assembler::cmpq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x3B, 0xC0, dst, src);
}
void Assembler::cmpq(Register dst, Address  src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x3B);
  emit_operand(dst, src);
}
void Assembler::cmpxchgq(Register reg, Address adr) {
  InstructionMark im(this);
  prefixq(adr, reg);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB1);
  emit_operand(reg, adr);
}
void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
  emit_int8(0x2A);
  emit_operand(dst, src);
}
void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
  emit_int8(0x2A);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  InstructionMark im(this);
  simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
  emit_int8(0x2A);
  emit_operand(dst, src);
}
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::cvttss2siq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
  emit_int8(0x2C);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::decl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC8 | encode));
}
void Assembler::decq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8(0xC8 | encode);
}
void Assembler::decq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0xFF);
  emit_operand(rcx, dst);
}
void Assembler::fxrstor(Address src) {
  prefixq(src);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_operand(as_Register(1), src);
}
void Assembler::fxsave(Address dst) {
  prefixq(dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAE);
  emit_operand(as_Register(0), dst);
}
void Assembler::idivq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xF8 | encode));
}
void Assembler::imulq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xAF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::imulq(Register dst, Register src, int value) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  if (is8bit(value)) {
    emit_int8(0x6B);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int8(value & 0xFF);
  } else {
    emit_int8(0x69);
    emit_int8((unsigned char)(0xC0 | encode));
    emit_int32(value);
  }
}
void Assembler::imulq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char) 0xAF);
  emit_operand(dst, src);
}
void Assembler::incl(Register dst) {
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::incq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xFF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::incq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0xFF);
  emit_operand(rax, dst);
}
void Assembler::lea(Register dst, Address src) {
  leaq(dst, src);
}
void Assembler::leaq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8((unsigned char)0x8D);
  emit_operand(dst, src);
}
void Assembler::mov64(Register dst, int64_t imm64) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)(0xB8 | encode));
  emit_int64(imm64);
}
void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8(0xB8 | encode);
  emit_data64(imm64, rspec);
}
void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(dst->encoding());
  emit_int8((unsigned char)(0xB8 | encode));
  emit_data((int)imm32, rspec, narrow_oop_operand);
}
void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(dst);
  emit_int8((unsigned char)0xC7);
  emit_operand(rax, dst, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}
void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  int encode = prefix_and_encode(src1->encoding());
  emit_int8((unsigned char)0x81);
  emit_int8((unsigned char)(0xF8 | encode));
  emit_data((int)imm32, rspec, narrow_oop_operand);
}
void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
  InstructionMark im(this);
  prefix(src1);
  emit_int8((unsigned char)0x81);
  emit_operand(rax, src1, 4);
  emit_data((int)imm32, rspec, narrow_oop_operand);
}
void Assembler::lzcntq(Register dst, Register src) {
  assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBD);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movdq(XMMRegister dst, Register src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
  emit_int8(0x6E);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movdq(Register dst, XMMRegister src) {
  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
  emit_int8(0x7E);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x8B);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8((unsigned char)0x8B);
  emit_operand(dst, src);
}
void Assembler::movq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
  emit_int8((unsigned char)0x89);
  emit_operand(src, dst);
}
void Assembler::movsbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_operand(dst, src);
}
void Assembler::movsbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBE);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movslq(Register dst, int32_t imm32) {
  ShouldNotReachHere();
  InstructionMark im(this);
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)(0xC7 | encode));
  emit_int32(imm32);
}
void Assembler::movslq(Address dst, int32_t imm32) {
  assert(is_simm32(imm32), "lost bits");
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0xC7);
  emit_operand(rax, dst, 4);
  emit_int32(imm32);
}
void Assembler::movslq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x63);
  emit_operand(dst, src);
}
void Assembler::movslq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x63);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movswq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xBF);
  emit_operand(dst, src);
}
void Assembler::movswq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xBF);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movzbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB6);
  emit_operand(dst, src);
}
void Assembler::movzbq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8(0x0F);
  emit_int8((unsigned char)0xB6);
  emit_int8(0xC0 | encode);
}
void Assembler::movzwq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
  emit_operand(dst, src);
}
void Assembler::movzwq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB7);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::mulq(Address src) {
  InstructionMark im(this);
  prefixq(src);
  emit_int8((unsigned char)0xF7);
  emit_operand(rsp, src);
}
void Assembler::mulq(Register src) {
  int encode = prefixq_and_encode(src->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xE0 | encode));
}
void Assembler::mulxq(Register dst1, Register dst2, Register src) {
  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
  int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, true, false);
  emit_int8((unsigned char)0xF6);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::negq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD8 | encode));
}
void Assembler::notq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xF7);
  emit_int8((unsigned char)(0xD0 | encode));
}
void Assembler::orq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0x81);
  emit_operand(rcx, dst, 4);
  emit_int32(imm32);
}
void Assembler::orq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xC8, dst, imm32);
}
void Assembler::orq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x0B);
  emit_operand(dst, src);
}
void Assembler::orq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x0B, 0xC0, dst, src);
}
void Assembler::popa() { // 64bit
  movq(r15, Address(rsp, 0));
  movq(r14, Address(rsp, wordSize));
  movq(r13, Address(rsp, 2 * wordSize));
  movq(r12, Address(rsp, 3 * wordSize));
  movq(r11, Address(rsp, 4 * wordSize));
  movq(r10, Address(rsp, 5 * wordSize));
  movq(r9,  Address(rsp, 6 * wordSize));
  movq(r8,  Address(rsp, 7 * wordSize));
  movq(rdi, Address(rsp, 8 * wordSize));
  movq(rsi, Address(rsp, 9 * wordSize));
  movq(rbp, Address(rsp, 10 * wordSize));
  movq(rbx, Address(rsp, 12 * wordSize));
  movq(rdx, Address(rsp, 13 * wordSize));
  movq(rcx, Address(rsp, 14 * wordSize));
  movq(rax, Address(rsp, 15 * wordSize));
  addq(rsp, 16 * wordSize);
}
void Assembler::popcntq(Register dst, Address src) {
  assert(VM_Version::supports_popcnt(), "must support");
  InstructionMark im(this);
  emit_int8((unsigned char)0xF3);
  prefixq(src, dst);
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
  emit_operand(dst, src);
}
void Assembler::popcntq(Register dst, Register src) {
  assert(VM_Version::supports_popcnt(), "must support");
  emit_int8((unsigned char)0xF3);
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x0F);
  emit_int8((unsigned char)0xB8);
  emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::popq(Address dst) {
  InstructionMark im(this);
  prefixq(dst);
  emit_int8((unsigned char)0x8F);
  emit_operand(rax, dst);
}
void Assembler::pusha() { // 64bit
  movq(Address(rsp, -5 * wordSize), rsp);
  subq(rsp, 16 * wordSize);
  movq(Address(rsp, 15 * wordSize), rax);
  movq(Address(rsp, 14 * wordSize), rcx);
  movq(Address(rsp, 13 * wordSize), rdx);
  movq(Address(rsp, 12 * wordSize), rbx);
  movq(Address(rsp, 10 * wordSize), rbp);
  movq(Address(rsp, 9 * wordSize), rsi);
  movq(Address(rsp, 8 * wordSize), rdi);
  movq(Address(rsp, 7 * wordSize), r8);
  movq(Address(rsp, 6 * wordSize), r9);
  movq(Address(rsp, 5 * wordSize), r10);
  movq(Address(rsp, 4 * wordSize), r11);
  movq(Address(rsp, 3 * wordSize), r12);
  movq(Address(rsp, 2 * wordSize), r13);
  movq(Address(rsp, wordSize), r14);
  movq(Address(rsp, 0), r15);
}
void Assembler::pushq(Address src) {
  InstructionMark im(this);
  prefixq(src);
  emit_int8((unsigned char)0xFF);
  emit_operand(rsi, src);
}
void Assembler::rclq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD0 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD0 | encode));
    emit_int8(imm8);
  }
}
void Assembler::rcrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xD8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xD8 | encode));
    emit_int8(imm8);
  }
}
void Assembler::rorq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xC8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xc8 | encode));
    emit_int8(imm8);
  }
}
void Assembler::rorxq(Register dst, Register src, int imm8) {
  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, true, false);
  emit_int8((unsigned char)0xF0);
  emit_int8((unsigned char)(0xC0 | encode));
  emit_int8(imm8);
}
void Assembler::sarq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xF8 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xF8 | encode));
    emit_int8(imm8);
  }
}
void Assembler::sarq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xF8 | encode));
}
void Assembler::sbbq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rbx, dst, imm32);
}
void Assembler::sbbq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xD8, dst, imm32);
}
void Assembler::sbbq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x1B);
  emit_operand(dst, src);
}
void Assembler::sbbq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x1B, 0xC0, dst, src);
}
void Assembler::shlq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  if (imm8 == 1) {
    emit_int8((unsigned char)0xD1);
    emit_int8((unsigned char)(0xE0 | encode));
  } else {
    emit_int8((unsigned char)0xC1);
    emit_int8((unsigned char)(0xE0 | encode));
    emit_int8(imm8);
  }
}
void Assembler::shlq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8((unsigned char)(0xE0 | encode));
}
void Assembler::shrq(Register dst, int imm8) {
  assert(isShiftCount(imm8 >> 1), "illegal shift count");
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xC1);
  emit_int8((unsigned char)(0xE8 | encode));
  emit_int8(imm8);
}
void Assembler::shrq(Register dst) {
  int encode = prefixq_and_encode(dst->encoding());
  emit_int8((unsigned char)0xD3);
  emit_int8(0xE8 | encode);
}
void Assembler::subq(Address dst, int32_t imm32) {
  InstructionMark im(this);
  prefixq(dst);
  emit_arith_operand(0x81, rbp, dst, imm32);
}
void Assembler::subq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
  emit_int8(0x29);
  emit_operand(src, dst);
}
void Assembler::subq(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith(0x81, 0xE8, dst, imm32);
}
void Assembler::subq_imm32(Register dst, int32_t imm32) {
  (void) prefixq_and_encode(dst->encoding());
  emit_arith_imm32(0x81, 0xE8, dst, imm32);
}
void Assembler::subq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x2B);
  emit_operand(dst, src);
}
void Assembler::subq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x2B, 0xC0, dst, src);
}
void Assembler::testq(Register dst, int32_t imm32) {
  int encode = dst->encoding();
  if (encode == 0) {
    prefix(REX_W);
    emit_int8((unsigned char)0xA9);
  } else {
    encode = prefixq_and_encode(encode);
    emit_int8((unsigned char)0xF7);
    emit_int8((unsigned char)(0xC0 | encode));
  }
  emit_int32(imm32);
}
void Assembler::testq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x85, 0xC0, dst, src);
}
void Assembler::xaddq(Address dst, Register src) {
  InstructionMark im(this);
  prefixq(dst, src);
  emit_int8(0x0F);
  emit_int8((unsigned char)0xC1);
  emit_operand(src, dst);
}
void Assembler::xchgq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8((unsigned char)0x87);
  emit_operand(dst, src);
}
void Assembler::xchgq(Register dst, Register src) {
  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
  emit_int8((unsigned char)0x87);
  emit_int8((unsigned char)(0xc0 | encode));
}
void Assembler::xorq(Register dst, Register src) {
  (void) prefixq_and_encode(dst->encoding(), src->encoding());
  emit_arith(0x33, 0xC0, dst, src);
}
void Assembler::xorq(Register dst, Address src) {
  InstructionMark im(this);
  prefixq(src, dst);
  emit_int8(0x33);
  emit_operand(dst, src);
}
#endif // !LP64
C:\hotspot-69087d08d473\src\cpu\x86\vm/assembler_x86.hpp
#ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
#define CPU_X86_VM_ASSEMBLER_X86_HPP
#include "asm/register.hpp"
class BiasedLockingCounters;
class Argument VALUE_OBJ_CLASS_SPEC {
 public:
  enum {
#ifdef _LP64
#ifdef _WIN64
    n_int_register_parameters_c   = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
    n_float_register_parameters_c = 4,  // xmm0 - xmm3 (c_farg0, c_farg1, ... )
#else
    n_int_register_parameters_c   = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
    n_float_register_parameters_c = 8,  // xmm0 - xmm7 (c_farg0, c_farg1, ... )
#endif // _WIN64
    n_int_register_parameters_j   = 6, // j_rarg0, j_rarg1, ...
    n_float_register_parameters_j = 8  // j_farg0, j_farg1, ...
#else
    n_register_parameters = 0   // 0 registers used to pass arguments
#endif // _LP64
  };
};
#ifdef _LP64
#ifdef _WIN64
REGISTER_DECLARATION(Register, c_rarg0, rcx);
REGISTER_DECLARATION(Register, c_rarg1, rdx);
REGISTER_DECLARATION(Register, c_rarg2, r8);
REGISTER_DECLARATION(Register, c_rarg3, r9);
REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
#else
REGISTER_DECLARATION(Register, c_rarg0, rdi);
REGISTER_DECLARATION(Register, c_rarg1, rsi);
REGISTER_DECLARATION(Register, c_rarg2, rdx);
REGISTER_DECLARATION(Register, c_rarg3, rcx);
REGISTER_DECLARATION(Register, c_rarg4, r8);
REGISTER_DECLARATION(Register, c_rarg5, r9);
REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
#endif // _WIN64
REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
#ifdef _WIN64
REGISTER_DECLARATION(Register, j_rarg3, rdi);
REGISTER_DECLARATION(Register, j_rarg4, rsi);
#else
REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
#endif /* _WIN64 */
REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
REGISTER_DECLARATION(Register, rscratch1, r10);  // volatile
REGISTER_DECLARATION(Register, rscratch2, r11);  // volatile
REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
#else
#define rscratch1 noreg
#define rscratch2 noreg
#endif // _LP64
REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
class ArrayAddress;
class Address VALUE_OBJ_CLASS_SPEC {
 public:
  enum ScaleFactor {
    no_scale = -1,
    times_1  =  0,
    times_2  =  1,
    times_4  =  2,
    times_8  =  3,
    times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
  };
  static ScaleFactor times(int size) {
    assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
    if (size == 8)  return times_8;
    if (size == 4)  return times_4;
    if (size == 2)  return times_2;
    return times_1;
  }
  static int scale_size(ScaleFactor scale) {
    assert(scale != no_scale, "");
    assert(((1 << (int)times_1) == 1 &&
            (1 << (int)times_2) == 2 &&
            (1 << (int)times_4) == 4 &&
            (1 << (int)times_8) == 8), "");
    return (1 << (int)scale);
  }
 private:
  Register         _base;
  Register         _index;
  ScaleFactor      _scale;
  int              _disp;
  RelocationHolder _rspec;
  NOT_LP64(Address(address loc, RelocationHolder spec);)
  Address(int disp, address loc, relocInfo::relocType rtype);
  Address(int disp, address loc, RelocationHolder spec);
 public:
 int disp() { return _disp; }
  Address()
    : _base(noreg),
      _index(noreg),
      _scale(no_scale),
      _disp(0) {
  }
  Address(Register base, int disp)
    : _base(base),
      _index(noreg),
      _scale(no_scale),
      _disp(disp) {
  }
  Address(Register base, Register index, ScaleFactor scale, int disp = 0)
    : _base (base),
      _index(index),
      _scale(scale),
      _disp (disp) {
    assert(!index->is_valid() == (scale == Address::no_scale),
           "inconsistent address");
  }
  Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
    : _base (base),
      _index(index.register_or_noreg()),
      _scale(scale),
      _disp (disp + (index.constant_or_zero() * scale_size(scale))) {
    if (!index.is_register())  scale = Address::no_scale;
    assert(!_index->is_valid() == (scale == Address::no_scale),
           "inconsistent address");
  }
  Address plus_disp(int disp) const {
    Address a = (*this);
    a._disp += disp;
    return a;
  }
  Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
    Address a = (*this);
    a._disp += disp.constant_or_zero() * scale_size(scale);
    if (disp.is_register()) {
      assert(!a.index()->is_valid(), "competing indexes");
      a._index = disp.as_register();
      a._scale = scale;
    }
    return a;
  }
  bool is_same_address(Address a) const {
    return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
  }
#ifdef ASSERT
  Address(Register base, ByteSize disp)
    : _base(base),
      _index(noreg),
      _scale(no_scale),
      _disp(in_bytes(disp)) {
  }
  Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
    : _base(base),
      _index(index),
      _scale(scale),
      _disp(in_bytes(disp)) {
    assert(!index->is_valid() == (scale == Address::no_scale),
           "inconsistent address");
  }
  Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
    : _base (base),
      _index(index.register_or_noreg()),
      _scale(scale),
      _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) {
    if (!index.is_register())  scale = Address::no_scale;
    assert(!_index->is_valid() == (scale == Address::no_scale),
           "inconsistent address");
  }
#endif // ASSERT
  bool        uses(Register reg) const { return _base == reg || _index == reg; }
  Register    base()             const { return _base;  }
  Register    index()            const { return _index; }
  ScaleFactor scale()            const { return _scale; }
  int         disp()             const { return _disp;  }
  static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
  static Address make_array(ArrayAddress);
 private:
  bool base_needs_rex() const {
    return _base != noreg && _base->encoding() >= 8;
  }
  bool index_needs_rex() const {
    return _index != noreg &&_index->encoding() >= 8;
  }
  relocInfo::relocType reloc() const { return _rspec.type(); }
  friend class Assembler;
  friend class MacroAssembler;
  friend class LIR_Assembler; // base/index/scale/disp
};
class AddressLiteral VALUE_OBJ_CLASS_SPEC {
  friend class ArrayAddress;
  RelocationHolder _rspec;
  bool _is_lval;
  address          _target;
 protected:
  AddressLiteral()
    : _is_lval(false),
      _target(NULL)
  {}
  public:
  AddressLiteral(address target, relocInfo::relocType rtype);
  AddressLiteral(address target, RelocationHolder const& rspec)
    : _rspec(rspec),
      _is_lval(false),
      _target(target)
  {}
  AddressLiteral addr() {
    AddressLiteral ret = *this;
    ret._is_lval = true;
    return ret;
  }
 private:
  address target() { return _target; }
  bool is_lval() { return _is_lval; }
  relocInfo::relocType reloc() const { return _rspec.type(); }
  const RelocationHolder& rspec() const { return _rspec; }
  friend class Assembler;
  friend class MacroAssembler;
  friend class Address;
  friend class LIR_Assembler;
};
class RuntimeAddress: public AddressLiteral {
  public:
  RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
};
class ExternalAddress: public AddressLiteral {
 private:
  static relocInfo::relocType reloc_for_target(address target) {
    return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
  }
 public:
  ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
};
class InternalAddress: public AddressLiteral {
  public:
  InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
};
class ArrayAddress VALUE_OBJ_CLASS_SPEC {
  private:
  AddressLiteral _base;
  Address        _index;
  public:
  ArrayAddress() {};
  ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
  AddressLiteral base() { return _base; }
  Address index() { return _index; }
};
const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize);
class Assembler : public AbstractAssembler  {
  friend class AbstractAssembler; // for the non-virtual hack
  friend class LIR_Assembler; // as_Address()
  friend class StubGenerator;
 public:
  enum Condition {                     // The x86 condition codes used for conditional jumps/moves.
    zero          = 0x4,
    notZero       = 0x5,
    equal         = 0x4,
    notEqual      = 0x5,
    less          = 0xc,
    lessEqual     = 0xe,
    greater       = 0xf,
    greaterEqual  = 0xd,
    below         = 0x2,
    belowEqual    = 0x6,
    above         = 0x7,
    aboveEqual    = 0x3,
    overflow      = 0x0,
    noOverflow    = 0x1,
    carrySet      = 0x2,
    carryClear    = 0x3,
    negative      = 0x8,
    positive      = 0x9,
    parity        = 0xa,
    noParity      = 0xb
  };
  enum Prefix {
    CS_segment = 0x2e,
    SS_segment = 0x36,
    DS_segment = 0x3e,
    ES_segment = 0x26,
    FS_segment = 0x64,
    GS_segment = 0x65,
    REX        = 0x40,
    REX_B      = 0x41,
    REX_X      = 0x42,
    REX_XB     = 0x43,
    REX_R      = 0x44,
    REX_RB     = 0x45,
    REX_RX     = 0x46,
    REX_RXB    = 0x47,
    REX_W      = 0x48,
    REX_WB     = 0x49,
    REX_WX     = 0x4A,
    REX_WXB    = 0x4B,
    REX_WR     = 0x4C,
    REX_WRB    = 0x4D,
    REX_WRX    = 0x4E,
    REX_WRXB   = 0x4F,
    VEX_3bytes = 0xC4,
    VEX_2bytes = 0xC5
  };
  enum VexPrefix {
    VEX_B = 0x20,
    VEX_X = 0x40,
    VEX_R = 0x80,
    VEX_W = 0x80
  };
  enum VexSimdPrefix {
    VEX_SIMD_NONE = 0x0,
    VEX_SIMD_66   = 0x1,
    VEX_SIMD_F3   = 0x2,
    VEX_SIMD_F2   = 0x3
  };
  enum VexOpcode {
    VEX_OPCODE_NONE  = 0x0,
    VEX_OPCODE_0F    = 0x1,
    VEX_OPCODE_0F_38 = 0x2,
    VEX_OPCODE_0F_3A = 0x3
  };
  enum WhichOperand {
    imm_operand  = 0,            // embedded 32-bit|64-bit immediate operand
    disp32_operand = 1,          // embedded 32-bit displacement or address
    call32_operand = 2,          // embedded 32-bit self-relative displacement
#ifndef _LP64
    _WhichOperand_limit = 3
#else
     narrow_oop_operand = 3,     // embedded 32-bit immediate narrow oop
    _WhichOperand_limit = 4
#endif
  };
private:
  int prefix_and_encode(int reg_enc, bool byteinst = false);
  int prefixq_and_encode(int reg_enc);
  int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
  int prefixq_and_encode(int dst_enc, int src_enc);
  void prefix(Register reg);
  void prefix(Address adr);
  void prefixq(Address adr);
  void prefix(Address adr, Register reg,  bool byteinst = false);
  void prefix(Address adr, XMMRegister reg);
  void prefixq(Address adr, Register reg);
  void prefixq(Address adr, XMMRegister reg);
  void prefetch_prefix(Address src);
  void rex_prefix(Address adr, XMMRegister xreg,
                  VexSimdPrefix pre, VexOpcode opc, bool rex_w);
  int  rex_prefix_and_encode(int dst_enc, int src_enc,
                             VexSimdPrefix pre, VexOpcode opc, bool rex_w);
  void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w,
                  int nds_enc, VexSimdPrefix pre, VexOpcode opc,
                  bool vector256);
  void vex_prefix(Address adr, int nds_enc, int xreg_enc,
                  VexSimdPrefix pre, VexOpcode opc,
                  bool vex_w, bool vector256);
  void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
                  VexSimdPrefix pre, bool vector256 = false) {
    int dst_enc = dst->encoding();
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
  }
  void vex_prefix_0F38(Register dst, Register nds, Address src) {
    bool vex_w = false;
    bool vector256 = false;
    vex_prefix(src, nds->encoding(), dst->encoding(),
               VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
  }
  void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
    bool vex_w = true;
    bool vector256 = false;
    vex_prefix(src, nds->encoding(), dst->encoding(),
               VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
  }
  int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
                             VexSimdPrefix pre, VexOpcode opc,
                             bool vex_w, bool vector256);
  int  vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
    bool vex_w = false;
    bool vector256 = false;
    return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
                                 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
  }
  int  vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
    bool vex_w = true;
    bool vector256 = false;
    return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
                                 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
  }
  int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
                             VexSimdPrefix pre, bool vector256 = false,
                             VexOpcode opc = VEX_OPCODE_0F) {
    int src_enc = src->encoding();
    int dst_enc = dst->encoding();
    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
    return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
  }
  void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
                   VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
                   bool rex_w = false, bool vector256 = false);
  void simd_prefix(XMMRegister dst, Address src,
                   VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
    simd_prefix(dst, xnoreg, src, pre, opc);
  }
  void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) {
    simd_prefix(src, dst, pre);
  }
  void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
                     VexSimdPrefix pre) {
    bool rex_w = true;
    simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w);
  }
  int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
                             VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
                             bool rex_w = false, bool vector256 = false);
  int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
                             VexSimdPrefix pre) {
    return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre);
  }
  int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) {
    return simd_prefix_and_encode(dst, xnoreg, src, pre);
  }
  int simd_prefix_and_encode(Register dst, XMMRegister src,
                             VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
    return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc);
  }
  int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
                               VexSimdPrefix pre) {
    bool rex_w = true;
    return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
  }
  int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) {
    return simd_prefix_and_encode_q(dst, xnoreg, src, pre);
  }
  int simd_prefix_and_encode_q(Register dst, XMMRegister src,
                             VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) {
    bool rex_w = true;
    return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w);
  }
  void emit_arith_b(int op1, int op2, Register dst, int imm8);
  void emit_arith(int op1, int op2, Register dst, int32_t imm32);
  void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
  void emit_arith(int op1, int op2, Register dst, Register src);
  void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
  void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
  void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre);
  void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre);
  void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                      Address src, VexSimdPrefix pre, bool vector256);
  void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
                      XMMRegister src, VexSimdPrefix pre, bool vector256);
  void emit_operand(Register reg,
                    Register base, Register index, Address::ScaleFactor scale,
                    int disp,
                    RelocationHolder const& rspec,
                    int rip_relative_correction = 0);
  void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
  void emit_operand32(Register reg, Address adr);
  void emit_operand(XMMRegister reg,
                    Register base, Register index, Address::ScaleFactor scale,
                    int disp,
                    RelocationHolder const& rspec);
  void emit_operand(XMMRegister reg, Address adr);
  void emit_operand(MMXRegister reg, Address adr);
  void emit_operand(Address adr, MMXRegister reg);
  void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
  void emit_farith(int b1, int b2, int i);
 protected:
  #ifdef ASSERT
  void check_relocation(RelocationHolder const& rspec, int format);
  #endif
  void emit_data(jint data, relocInfo::relocType    rtype, int format);
  void emit_data(jint data, RelocationHolder const& rspec, int format);
  void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
  void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
  bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
#ifndef _LP64
  void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);   // 32BIT ONLY
  void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
  void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);    // 32BIT ONLY
  void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);     // 32BIT ONLY
  void push_literal32(int32_t imm32, RelocationHolder const& rspec);                 // 32BIT ONLY
#else
  void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec);   // 64BIT ONLY
  void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
  void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
  void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
  void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
#endif // _LP64
  void call_literal(address entry, RelocationHolder const& rspec);
  void jmp_literal(address entry, RelocationHolder const& rspec);
  void decl(Register dst);
  void decl(Address dst);
  void decq(Register dst);
  void decq(Address dst);
  void incl(Register dst);
  void incl(Address dst);
  void incq(Register dst);
  void incq(Address dst);
  void movss(XMMRegister dst, Address src);
  void movss(XMMRegister dst, XMMRegister src);
  void movss(Address dst, XMMRegister src);
  void movsd(XMMRegister dst, Address src);
  void movsd(XMMRegister dst, XMMRegister src);
  void movsd(Address dst, XMMRegister src);
  void movlpd(XMMRegister dst, Address src);
  void movaps(XMMRegister dst, XMMRegister src);
  void movapd(XMMRegister dst, XMMRegister src);
  void prefix(Prefix p);
  public:
  Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
  static address locate_operand(address inst, WhichOperand which);
  static address locate_next_instruction(address inst);
  static bool is_polling_page_far() NOT_LP64({ return false;});
  void lea(Register dst, Address src);
  void mov(Register dst, Register src);
  void pusha();
  void popa();
  void pushf();
  void popf();
  void push(int32_t imm32);
  void push(Register src);
  void pop(Register dst);
  void push(void* v);
  void pop(void* v);
  void rep_mov();
  void rep_stos();
  void rep_stosb();
  void repne_scan();
#ifdef _LP64
  void repne_scanl();
#endif
  void adcl(Address dst, int32_t imm32);
  void adcl(Address dst, Register src);
  void adcl(Register dst, int32_t imm32);
  void adcl(Register dst, Address src);
  void adcl(Register dst, Register src);
  void adcq(Register dst, int32_t imm32);
  void adcq(Register dst, Address src);
  void adcq(Register dst, Register src);
  void addl(Address dst, int32_t imm32);
  void addl(Address dst, Register src);
  void addl(Register dst, int32_t imm32);
  void addl(Register dst, Address src);
  void addl(Register dst, Register src);
  void addq(Address dst, int32_t imm32);
  void addq(Address dst, Register src);
  void addq(Register dst, int32_t imm32);
  void addq(Register dst, Address src);
  void addq(Register dst, Register src);
#ifdef _LP64
  void adcxq(Register dst, Register src);
  void adoxq(Register dst, Register src);
#endif
  void addr_nop_4();
  void addr_nop_5();
  void addr_nop_7();
  void addr_nop_8();
  void addsd(XMMRegister dst, Address src);
  void addsd(XMMRegister dst, XMMRegister src);
  void addss(XMMRegister dst, Address src);
  void addss(XMMRegister dst, XMMRegister src);
  void aesdec(XMMRegister dst, Address src);
  void aesdec(XMMRegister dst, XMMRegister src);
  void aesdeclast(XMMRegister dst, Address src);
  void aesdeclast(XMMRegister dst, XMMRegister src);
  void aesenc(XMMRegister dst, Address src);
  void aesenc(XMMRegister dst, XMMRegister src);
  void aesenclast(XMMRegister dst, Address src);
  void aesenclast(XMMRegister dst, XMMRegister src);
  void andl(Address  dst, int32_t imm32);
  void andl(Register dst, int32_t imm32);
  void andl(Register dst, Address src);
  void andl(Register dst, Register src);
  void andq(Address  dst, int32_t imm32);
  void andq(Register dst, int32_t imm32);
  void andq(Register dst, Address src);
  void andq(Register dst, Register src);
  void andnl(Register dst, Register src1, Register src2);
  void andnl(Register dst, Register src1, Address src2);
  void andnq(Register dst, Register src1, Register src2);
  void andnq(Register dst, Register src1, Address src2);
  void blsil(Register dst, Register src);
  void blsil(Register dst, Address src);
  void blsiq(Register dst, Register src);
  void blsiq(Register dst, Address src);
  void blsmskl(Register dst, Register src);
  void blsmskl(Register dst, Address src);
  void blsmskq(Register dst, Register src);
  void blsmskq(Register dst, Address src);
  void blsrl(Register dst, Register src);
  void blsrl(Register dst, Address src);
  void blsrq(Register dst, Register src);
  void blsrq(Register dst, Address src);
  void bsfl(Register dst, Register src);
  void bsrl(Register dst, Register src);
#ifdef _LP64
  void bsfq(Register dst, Register src);
  void bsrq(Register dst, Register src);
#endif
  void bswapl(Register reg);
  void bswapq(Register reg);
  void call(Label& L, relocInfo::relocType rtype);
  void call(Register reg);  // push pc; pc <- reg
  void call(Address adr);   // push pc; pc <- adr
  void cdql();
  void cdqq();
  void cld();
  void clflush(Address adr);
  void cmovl(Condition cc, Register dst, Register src);
  void cmovl(Condition cc, Register dst, Address src);
  void cmovq(Condition cc, Register dst, Register src);
  void cmovq(Condition cc, Register dst, Address src);
  void cmpb(Address dst, int imm8);
  void cmpl(Address dst, int32_t imm32);
  void cmpl(Register dst, int32_t imm32);
  void cmpl(Register dst, Register src);
  void cmpl(Register dst, Address src);
  void cmpq(Address dst, int32_t imm32);
  void cmpq(Address dst, Register src);
  void cmpq(Register dst, int32_t imm32);
  void cmpq(Register dst, Register src);
  void cmpq(Register dst, Address src);
  void cmpl(Register dst, void* junk); // dummy
  void cmpq(Register dst, void* junk); // dummy
  void cmpw(Address dst, int imm16);
  void cmpxchg8 (Address adr);
  void cmpxchgl(Register reg, Address adr);
  void cmpxchgq(Register reg, Address adr);
  void comisd(XMMRegister dst, Address src);
  void comisd(XMMRegister dst, XMMRegister src);
  void comiss(XMMRegister dst, Address src);
  void comiss(XMMRegister dst, XMMRegister src);
  void cpuid();
  void cvtsd2ss(XMMRegister dst, XMMRegister src);
  void cvtsd2ss(XMMRegister dst, Address src);
  void cvtsi2sdl(XMMRegister dst, Register src);
  void cvtsi2sdl(XMMRegister dst, Address src);
  void cvtsi2sdq(XMMRegister dst, Register src);
  void cvtsi2sdq(XMMRegister dst, Address src);
  void cvtsi2ssl(XMMRegister dst, Register src);
  void cvtsi2ssl(XMMRegister dst, Address src);
  void cvtsi2ssq(XMMRegister dst, Register src);
  void cvtsi2ssq(XMMRegister dst, Address src);
  void cvtdq2pd(XMMRegister dst, XMMRegister src);
  void cvtdq2ps(XMMRegister dst, XMMRegister src);
  void cvtss2sd(XMMRegister dst, XMMRegister src);
  void cvtss2sd(XMMRegister dst, Address src);
  void cvttsd2sil(Register dst, Address src);
  void cvttsd2sil(Register dst, XMMRegister src);
  void cvttsd2siq(Register dst, XMMRegister src);
  void cvttss2sil(Register dst, XMMRegister src);
  void cvttss2siq(Register dst, XMMRegister src);
  void divsd(XMMRegister dst, Address src);
  void divsd(XMMRegister dst, XMMRegister src);
  void divss(XMMRegister dst, Address src);
  void divss(XMMRegister dst, XMMRegister src);
  void emms();
  void fabs();
  void fadd(int i);
  void fadd_d(Address src);
  void fadd_s(Address src);
  void fadda(int i); // "alternate" fadd
  void faddp(int i = 1);
  void fchs();
  void fcom(int i);
  void fcomp(int i = 1);
  void fcomp_d(Address src);
  void fcomp_s(Address src);
  void fcompp();
  void fcos();
  void fdecstp();
  void fdiv(int i);
  void fdiv_d(Address src);
  void fdivr_s(Address src);
  void fdiva(int i);  // "alternate" fdiv
  void fdivp(int i = 1);
  void fdivr(int i);
  void fdivr_d(Address src);
  void fdiv_s(Address src);
  void fdivra(int i); // "alternate" reversed fdiv
  void fdivrp(int i = 1);
  void ffree(int i = 0);
  void fild_d(Address adr);
  void fild_s(Address adr);
  void fincstp();
  void finit();
  void fist_s (Address adr);
  void fistp_d(Address adr);
  void fistp_s(Address adr);
  void fld1();
  void fld_d(Address adr);
  void fld_s(Address adr);
  void fld_s(int index);
  void fld_x(Address adr);  // extended-precision (80-bit) format
  void fldcw(Address src);
  void fldenv(Address src);
  void fldlg2();
  void fldln2();
  void fldz();
  void flog();
  void flog10();
  void fmul(int i);
  void fmul_d(Address src);
  void fmul_s(Address src);
  void fmula(int i);  // "alternate" fmul
  void fmulp(int i = 1);
  void fnsave(Address dst);
  void fnstcw(Address src);
  void fnstsw_ax();
  void fprem();
  void fprem1();
  void frstor(Address src);
  void fsin();
  void fsqrt();
  void fst_d(Address adr);
  void fst_s(Address adr);
  void fstp_d(Address adr);
  void fstp_d(int index);
  void fstp_s(Address adr);
  void fstp_x(Address adr); // extended-precision (80-bit) format
  void fsub(int i);
  void fsub_d(Address src);
  void fsub_s(Address src);
  void fsuba(int i);  // "alternate" fsub
  void fsubp(int i = 1);
  void fsubr(int i);
  void fsubr_d(Address src);
  void fsubr_s(Address src);
  void fsubra(int i); // "alternate" reversed fsub
  void fsubrp(int i = 1);
  void ftan();
  void ftst();
  void fucomi(int i = 1);
  void fucomip(int i = 1);
  void fwait();
  void fxch(int i = 1);
  void fxrstor(Address src);
  void fxsave(Address dst);
  void fyl2x();
  void frndint();
  void f2xm1();
  void fldl2e();
  void hlt();
  void idivl(Register src);
  void divl(Register src); // Unsigned division
#ifdef _LP64
  void idivq(Register src);
#endif
  void imull(Register dst, Register src);
  void imull(Register dst, Register src, int value);
  void imull(Register dst, Address src);
#ifdef _LP64
  void imulq(Register dst, Register src);
  void imulq(Register dst, Register src, int value);
  void imulq(Register dst, Address src);
#endif
  void jcc(Condition cc, Label& L, bool maybe_short = true);
  void jccb(Condition cc, Label& L);
  void jmp(Address entry);    // pc <- entry
  void jmp(Label& L, bool maybe_short = true);   // unconditional jump to L
  void jmp(Register entry); // pc <- entry
  void jmpb(Label& L);
  void ldmxcsr( Address src );
  void leal(Register dst, Address src);
  void leaq(Register dst, Address src);
  void lfence();
  void lock();
  void lzcntl(Register dst, Register src);
#ifdef _LP64
  void lzcntq(Register dst, Register src);
#endif
  enum Membar_mask_bits {
    StoreStore = 1 << 3,
    LoadStore  = 1 << 2,
    StoreLoad  = 1 << 1,
    LoadLoad   = 1 << 0
  };
  void membar(Membar_mask_bits order_constraint) {
    if (os::is_MP()) {
      if (order_constraint & StoreLoad) {
        lock();
        addl(Address(rsp, 0), 0);// Assert the lock# signal here
      }
    }
  }
  void mfence();
  void mov64(Register dst, int64_t imm64);
  void movb(Address dst, Register src);
  void movb(Address dst, int imm8);
  void movb(Register dst, Address src);
  void movdl(XMMRegister dst, Register src);
  void movdl(Register dst, XMMRegister src);
  void movdl(XMMRegister dst, Address src);
  void movdl(Address dst, XMMRegister src);
  void movdq(XMMRegister dst, Register src);
  void movdq(Register dst, XMMRegister src);
  void movdqa(XMMRegister dst, XMMRegister src);
  void movdqa(XMMRegister dst, Address src);
  void movdqu(Address     dst, XMMRegister src);
  void movdqu(XMMRegister dst, Address src);
  void movdqu(XMMRegister dst, XMMRegister src);
  void vmovdqu(Address dst, XMMRegister src);
  void vmovdqu(XMMRegister dst, Address src);
  void vmovdqu(XMMRegister dst, XMMRegister src);
  void movlhps(XMMRegister dst, XMMRegister src);
  void movl(Register dst, int32_t imm32);
  void movl(Address dst, int32_t imm32);
  void movl(Register dst, Register src);
  void movl(Register dst, Address src);
  void movl(Address dst, Register src);
  void movl(Address  dst, void* junk);
  void movl(Register dst, void* junk);
#ifdef _LP64
  void movq(Register dst, Register src);
  void movq(Register dst, Address src);
  void movq(Address  dst, Register src);
#endif
  void movq(Address     dst, MMXRegister src );
  void movq(MMXRegister dst, Address src );
#ifdef _LP64
  void movq(Address  dst, void* dummy);
  void movq(Register dst, void* dummy);
#endif
  void movq(Address     dst, XMMRegister src);
  void movq(XMMRegister dst, Address src);
  void movsbl(Register dst, Address src);
  void movsbl(Register dst, Register src);
#ifdef _LP64
  void movsbq(Register dst, Address src);
  void movsbq(Register dst, Register src);
  void movslq(Address  dst, int32_t imm64);
  void movslq(Register dst, int32_t imm64);
  void movslq(Register dst, Address src);
  void movslq(Register dst, Register src);
  void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
#endif
  void movswl(Register dst, Address src);
  void movswl(Register dst, Register src);
#ifdef _LP64
  void movswq(Register dst, Address src);
  void movswq(Register dst, Register src);
#endif
  void movw(Address dst, int imm16);
  void movw(Register dst, Address src);
  void movw(Address dst, Register src);
  void movzbl(Register dst, Address src);
  void movzbl(Register dst, Register src);
#ifdef _LP64
  void movzbq(Register dst, Address src);
  void movzbq(Register dst, Register src);
#endif
  void movzwl(Register dst, Address src);
  void movzwl(Register dst, Register src);
#ifdef _LP64
  void movzwq(Register dst, Address src);
  void movzwq(Register dst, Register src);
#endif
  void mull(Address src);
  void mull(Register src);
#ifdef _LP64
  void mulq(Address src);
  void mulq(Register src);
  void mulxq(Register dst1, Register dst2, Register src);
#endif
  void mulsd(XMMRegister dst, Address src);
  void mulsd(XMMRegister dst, XMMRegister src);
  void mulss(XMMRegister dst, Address src);
  void mulss(XMMRegister dst, XMMRegister src);
  void negl(Register dst);
#ifdef _LP64
  void negq(Register dst);
#endif
  void nop(int i = 1);
  void notl(Register dst);
#ifdef _LP64
  void notq(Register dst);
#endif
  void orl(Address dst, int32_t imm32);
  void orl(Register dst, int32_t imm32);
  void orl(Register dst, Address src);
  void orl(Register dst, Register src);
  void orl(Address dst, Register src);
  void orq(Address dst, int32_t imm32);
  void orq(Register dst, int32_t imm32);
  void orq(Register dst, Address src);
  void orq(Register dst, Register src);
  void packuswb(XMMRegister dst, XMMRegister src);
  void packuswb(XMMRegister dst, Address src);
  void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
  void pause();
  void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
  void pcmpestri(XMMRegister xmm1, Address src, int imm8);
  void pextrd(Register dst, XMMRegister src, int imm8);
  void pextrq(Register dst, XMMRegister src, int imm8);
  void pinsrd(XMMRegister dst, Register src, int imm8);
  void pinsrq(XMMRegister dst, Register src, int imm8);
  void pmovzxbw(XMMRegister dst, XMMRegister src);
  void pmovzxbw(XMMRegister dst, Address src);
#ifndef _LP64 // no 32bit push/pop on amd64
  void popl(Address dst);
#endif
#ifdef _LP64
  void popq(Address dst);
#endif
  void popcntl(Register dst, Address src);
  void popcntl(Register dst, Register src);
#ifdef _LP64
  void popcntq(Register dst, Address src);
  void popcntq(Register dst, Register src);
#endif
  void prefetchnta(Address src);
  void prefetchr(Address src);
  void prefetcht0(Address src);
  void prefetcht1(Address src);
  void prefetcht2(Address src);
  void prefetchw(Address src);
  void pshufb(XMMRegister dst, XMMRegister src);
  void pshufb(XMMRegister dst, Address src);
  void pshufd(XMMRegister dst, XMMRegister src, int mode);
  void pshufd(XMMRegister dst, Address src,     int mode);
  void pshuflw(XMMRegister dst, XMMRegister src, int mode);
  void pshuflw(XMMRegister dst, Address src,     int mode);
  void psrldq(XMMRegister dst, int shift);
  void pslldq(XMMRegister dst, int shift);
  void ptest(XMMRegister dst, XMMRegister src);
  void ptest(XMMRegister dst, Address src);
  void vptest(XMMRegister dst, XMMRegister src);
  void vptest(XMMRegister dst, Address src);
  void punpcklbw(XMMRegister dst, XMMRegister src);
  void punpcklbw(XMMRegister dst, Address src);
  void punpckldq(XMMRegister dst, XMMRegister src);
  void punpckldq(XMMRegister dst, Address src);
  void punpcklqdq(XMMRegister dst, XMMRegister src);
#ifndef _LP64 // no 32bit push/pop on amd64
  void pushl(Address src);
#endif
  void pushq(Address src);
  void rcll(Register dst, int imm8);
  void rclq(Register dst, int imm8);
  void rcrq(Register dst, int imm8);
  void rdtsc();
  void ret(int imm16);
#ifdef _LP64
  void rorq(Register dst, int imm8);
  void rorxq(Register dst, Register src, int imm8);
#endif
  void sahf();
  void sarl(Register dst, int imm8);
  void sarl(Register dst);
  void sarq(Register dst, int imm8);
  void sarq(Register dst);
  void sbbl(Address dst, int32_t imm32);
  void sbbl(Register dst, int32_t imm32);
  void sbbl(Register dst, Address src);
  void sbbl(Register dst, Register src);
  void sbbq(Address dst, int32_t imm32);
  void sbbq(Register dst, int32_t imm32);
  void sbbq(Register dst, Address src);
  void sbbq(Register dst, Register src);
  void setb(Condition cc, Register dst);
  void shldl(Register dst, Register src);
  void shll(Register dst, int imm8);
  void shll(Register dst);
  void shlq(Register dst, int imm8);
  void shlq(Register dst);
  void shrdl(Register dst, Register src);
  void shrl(Register dst, int imm8);
  void shrl(Register dst);
  void shrq(Register dst, int imm8);
  void shrq(Register dst);
  void smovl(); // QQQ generic?
  void sqrtsd(XMMRegister dst, Address src);
  void sqrtsd(XMMRegister dst, XMMRegister src);
  void sqrtss(XMMRegister dst, Address src);
  void sqrtss(XMMRegister dst, XMMRegister src);
  void std();
  void stmxcsr( Address dst );
  void subl(Address dst, int32_t imm32);
  void subl(Address dst, Register src);
  void subl(Register dst, int32_t imm32);
  void subl(Register dst, Address src);
  void subl(Register dst, Register src);
  void subq(Address dst, int32_t imm32);
  void subq(Address dst, Register src);
  void subq(Register dst, int32_t imm32);
  void subq(Register dst, Address src);
  void subq(Register dst, Register src);
  void subl_imm32(Register dst, int32_t imm32);
  void subq_imm32(Register dst, int32_t imm32);
  void subsd(XMMRegister dst, Address src);
  void subsd(XMMRegister dst, XMMRegister src);
  void subss(XMMRegister dst, Address src);
  void subss(XMMRegister dst, XMMRegister src);
  void testb(Register dst, int imm8);
  void testl(Register dst, int32_t imm32);
  void testl(Register dst, Register src);
  void testl(Register dst, Address src);
  void testq(Register dst, int32_t imm32);
  void testq(Register dst, Register src);
  void tzcntl(Register dst, Register src);
  void tzcntq(Register dst, Register src);
  void ucomisd(XMMRegister dst, Address src);
  void ucomisd(XMMRegister dst, XMMRegister src);
  void ucomiss(XMMRegister dst, Address src);
  void ucomiss(XMMRegister dst, XMMRegister src);
  void xabort(int8_t imm8);
  void xaddl(Address dst, Register src);
  void xaddq(Address dst, Register src);
  void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
  void xchgl(Register reg, Address adr);
  void xchgl(Register dst, Register src);
  void xchgq(Register reg, Address adr);
  void xchgq(Register dst, Register src);
  void xend();
  void xgetbv();
  void xorl(Register dst, int32_t imm32);
  void xorl(Register dst, Address src);
  void xorl(Register dst, Register src);
  void xorq(Register dst, Address src);
  void xorq(Register dst, Register src);
  void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
  void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
  void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vaddss(XMMRegister dst, XMMRegister nds, Address src);
  void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
  void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vdivss(XMMRegister dst, XMMRegister nds, Address src);
  void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
  void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vmulss(XMMRegister dst, XMMRegister nds, Address src);
  void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
  void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vsubss(XMMRegister dst, XMMRegister nds, Address src);
  void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void addpd(XMMRegister dst, XMMRegister src);
  void addps(XMMRegister dst, XMMRegister src);
  void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void subpd(XMMRegister dst, XMMRegister src);
  void subps(XMMRegister dst, XMMRegister src);
  void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void mulpd(XMMRegister dst, XMMRegister src);
  void mulps(XMMRegister dst, XMMRegister src);
  void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void divpd(XMMRegister dst, XMMRegister src);
  void divps(XMMRegister dst, XMMRegister src);
  void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void andpd(XMMRegister dst, XMMRegister src);
  void andps(XMMRegister dst, XMMRegister src);
  void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void xorpd(XMMRegister dst, XMMRegister src);
  void xorps(XMMRegister dst, XMMRegister src);
  void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void paddb(XMMRegister dst, XMMRegister src);
  void paddw(XMMRegister dst, XMMRegister src);
  void paddd(XMMRegister dst, XMMRegister src);
  void paddq(XMMRegister dst, XMMRegister src);
  void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void psubb(XMMRegister dst, XMMRegister src);
  void psubw(XMMRegister dst, XMMRegister src);
  void psubd(XMMRegister dst, XMMRegister src);
  void psubq(XMMRegister dst, XMMRegister src);
  void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void pmullw(XMMRegister dst, XMMRegister src);
  void pmulld(XMMRegister dst, XMMRegister src);
  void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void psllw(XMMRegister dst, int shift);
  void pslld(XMMRegister dst, int shift);
  void psllq(XMMRegister dst, int shift);
  void psllw(XMMRegister dst, XMMRegister shift);
  void pslld(XMMRegister dst, XMMRegister shift);
  void psllq(XMMRegister dst, XMMRegister shift);
  void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void psrlw(XMMRegister dst, int shift);
  void psrld(XMMRegister dst, int shift);
  void psrlq(XMMRegister dst, int shift);
  void psrlw(XMMRegister dst, XMMRegister shift);
  void psrld(XMMRegister dst, XMMRegister shift);
  void psrlq(XMMRegister dst, XMMRegister shift);
  void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void psraw(XMMRegister dst, int shift);
  void psrad(XMMRegister dst, int shift);
  void psraw(XMMRegister dst, XMMRegister shift);
  void psrad(XMMRegister dst, XMMRegister shift);
  void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256);
  void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256);
  void pand(XMMRegister dst, XMMRegister src);
  void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void por(XMMRegister dst, XMMRegister src);
  void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void pxor(XMMRegister dst, XMMRegister src);
  void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
  void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256);
  void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
  void vinsertf128h(XMMRegister dst, Address src);
  void vinserti128h(XMMRegister dst, Address src);
  void vextractf128h(Address dst, XMMRegister src);
  void vextracti128h(Address dst, XMMRegister src);
  void vpbroadcastd(XMMRegister dst, XMMRegister src);
  void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
  void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
  void vzeroupper();
 protected:
  void andpd(XMMRegister dst, Address src);
  void andps(XMMRegister dst, Address src);
  void xorpd(XMMRegister dst, Address src);
  void xorps(XMMRegister dst, Address src);
};
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/assembler_x86.inline.hpp
#ifndef CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
#define CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#ifndef _LP64
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; }
inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { return dst_enc << 3 | src_enc; }
inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
inline void Assembler::prefix(Register reg) {}
inline void Assembler::prefix(Address adr) {}
inline void Assembler::prefixq(Address adr) {}
inline void Assembler::prefix(Address adr, Register reg,  bool byteinst) {}
inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
#endif // _LP64
#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/bytecodeInterpreter_x86.cpp
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/bytecodeInterpreter.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef TARGET_ARCH_x86
# include "interp_masm_x86.hpp"
#endif
#ifdef CC_INTERP
#endif // CC_INTERP (all)
C:\hotspot-69087d08d473\src\cpu\x86\vm/bytecodeInterpreter_x86.hpp
#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
#define CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
private:
    interpreterState _self_link;          /*  Previous interpreter state  */ /* sometimes points to self??? */
    address   _result_handler;            /* temp for saving native result handler */
    intptr_t* _sender_sp;                 /* sender's sp before stack (locals) extension */
    address   _extra_junk1;               /* temp to save on recompiles */
    address   _extra_junk2;               /* temp to save on recompiles */
    address   _extra_junk3;               /* temp to save on recompiles */
    address   _extra_junk4;               /* temp to save on recompiles */
    address   _extra_junk5;               /* temp to save on recompiles */
    address   _extra_junk6;               /* temp to save on recompiles */
public:
inline intptr_t* sender_sp() {
  return _sender_sp;
}
#define SET_LAST_JAVA_FRAME()
#define RESET_LAST_JAVA_FRAME()
#undef STACK_INT
#undef STACK_FLOAT
#undef STACK_ADDR
#undef STACK_OBJECT
#undef STACK_DOUBLE
#undef STACK_LONG
#define GET_STACK_SLOT(offset)    (*((intptr_t*) &topOfStack[-(offset)]))
#define STACK_SLOT(offset)    ((address) &topOfStack[-(offset)])
#define STACK_ADDR(offset)    (*((address *) &topOfStack[-(offset)]))
#define STACK_INT(offset)     (*((jint*) &topOfStack[-(offset)]))
#define STACK_FLOAT(offset)   (*((jfloat *) &topOfStack[-(offset)]))
#define STACK_OBJECT(offset)  (*((oop *) &topOfStack [-(offset)]))
#define STACK_DOUBLE(offset)  (((VMJavaVal64*) &topOfStack[-(offset)])->d)
#define STACK_LONG(offset)    (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
#define SET_STACK_SLOT(value, offset)   (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
#define SET_STACK_ADDR(value, offset)   (*((address *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_INT(value, offset)    (*((jint *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_FLOAT(value, offset)  (*((jfloat *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d =  \
                                                 ((VMJavaVal64*)(addr))->d)
#define SET_STACK_LONG(value, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
#define SET_STACK_LONG_FROM_ADDR(addr, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l =  \
                                                 ((VMJavaVal64*)(addr))->l)
#define LOCALS_SLOT(offset)    ((intptr_t*)&locals[-(offset)])
#define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
#define LOCALS_INT(offset)     ((jint)(locals[-(offset)]))
#define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
#define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
#define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
#define SET_LOCALS_SLOT(value, offset)    (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
#define SET_LOCALS_ADDR(value, offset)    (*((address *)&locals[-(offset)]) = (value))
#define SET_LOCALS_INT(value, offset)     (*((jint *)&locals[-(offset)]) = (value))
#define SET_LOCALS_FLOAT(value, offset)   (*((jfloat *)&locals[-(offset)]) = (value))
#define SET_LOCALS_OBJECT(value, offset)  (*((oop *)&locals[-(offset)]) = (value))
#define SET_LOCALS_DOUBLE(value, offset)  (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
#define SET_LOCALS_LONG(value, offset)    (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
                                                  ((VMJavaVal64*)(addr))->d)
#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
                                                ((VMJavaVal64*)(addr))->l)
#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
C:\hotspot-69087d08d473\src\cpu\x86\vm/bytecodeInterpreter_x86.inline.hpp
#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
#define CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); }
inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
  return ( op1 < op2 ? -1 :
               op1 > op2 ? 1 :
                   op1 == op2 ? 0 :
                       (direction == -1 || direction == 1) ? direction : 0);
}
inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
  to[0] = from[0]; to[1] = from[1];
}
inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
  return op1 + op2;
}
inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
  return op1 & op2;
}
inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
  return op1 / op2;
}
inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
  return op1 * op2;
}
inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
  return op1 | op2;
}
inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
  return op1 - op2;
}
inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
  return op1 ^ op2;
}
inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
  return op1 % op2;
}
inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
  return ((unsigned long long) op1) >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
  return op1 >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
  return op1 << (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
  return -op;
}
inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
  return ~op;
}
inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
  return (op <= 0);
}
inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
  return (op >= 0);
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
大学生参加学科竞赛有着诸多好处,不仅有助于个人综合素质的提升,还能为未来职业发展奠定良好基础。以下是一些分析: 首先,学科竞赛是提高专业知识和技能水平的有效途径。通过参与竞赛,学生不仅能够深入学习相关专业知识,还能够接触到最新的科研成果和技术发展趋势。这有助于拓展学生的学科视野,使其对专业领域有更深刻的理解。在竞赛过程中,学生通常需要解决实际问题,这锻炼了他们独立思考和解决问题的能力。 其次,学科竞赛培养了学生的团队合作精神。许多竞赛项目需要团队协作来完成,这促使学生学会有效地与他人合作、协调分工。在团队合作中,学生们能够学到如何有效沟通、共同制定目标和分工合作,这对于日后进入职场具有重要意义。 此外,学科竞赛是提高学生综合能力的一种途径。竞赛项目通常会涉及到理论知识、实际操作和创新思维等多个方面,要求参赛者具备全面的素质。在竞赛过程中,学生不仅需要展现自己的专业知识,还需要具备创新意识和解决问题的能力。这种全面的综合能力培养对于未来从事各类职业都具有积极作用。 此外,学科竞赛可以为学生提供展示自我、树立信心的机会。通过比赛的舞台,学生有机会展现自己在专业领域的优势,得到他人的认可和赞誉。这对于培养学生的自信心和自我价值感非常重要,有助于他们更加积极主动地投入学习和未来的职业生涯。 最后,学科竞赛对于个人职业发展具有积极的助推作用。在竞赛中脱颖而出的学生通常能够引起企业、研究机构等用人单位的关注。获得竞赛奖项不仅可以作为个人履历的亮点,还可以为进入理想的工作岗位提供有力的支持。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值