ssssssss8


#endif
void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
  klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
  if (UseFastNewInstance && klass->is_loaded()
      && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
    Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
    assert(klass->is_loaded(), "must be loaded");
    assert(klass->size_helper() >= 0, "illegal instance size");
    const int instance_size = align_object_size(klass->size_helper());
    __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
                       oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
  } else {
    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
    __ branch(lir_cond_always, T_ILLEGAL, slow_path);
    __ branch_destination(slow_path->continuation());
  }
}
static bool is_constant_zero(Instruction* inst) {
  IntConstant* c = inst->type()->as_IntConstant();
  if (c) {
    return (c->value() == 0);
  }
  return false;
}
static bool positive_constant(Instruction* inst) {
  IntConstant* c = inst->type()->as_IntConstant();
  if (c) {
    return (c->value() >= 0);
  }
  return false;
}
static ciArrayKlass* as_array_klass(ciType* type) {
  if (type != NULL && type->is_array_klass() && type->is_loaded()) {
    return (ciArrayKlass*)type;
  } else {
    return NULL;
  }
}
static ciType* phi_declared_type(Phi* phi) {
  ciType* t = phi->operand_at(0)->declared_type();
  if (t == NULL) {
    return NULL;
  }
  for(int i = 1; i < phi->operand_count(); i++) {
    if (t != phi->operand_at(i)->declared_type()) {
      return NULL;
    }
  }
  return t;
}
void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
  Instruction* src     = x->argument_at(0);
  Instruction* src_pos = x->argument_at(1);
  Instruction* dst     = x->argument_at(2);
  Instruction* dst_pos = x->argument_at(3);
  Instruction* length  = x->argument_at(4);
  ciArrayKlass* expected_type = NULL;
  bool is_exact = false, src_objarray = false, dst_objarray = false;
  {
    ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
    ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
    Phi* phi;
    if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
      src_declared_type = as_array_klass(phi_declared_type(phi));
    }
    ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
    ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
    if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
      dst_declared_type = as_array_klass(phi_declared_type(phi));
    }
    if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
      is_exact = true;
      expected_type = src_exact_type;
    } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
      ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
      ciArrayKlass* src_type = NULL;
      if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
        src_type = (ciArrayKlass*) src_exact_type;
      } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
        src_type = (ciArrayKlass*) src_declared_type;
      }
      if (src_type != NULL) {
        if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
          is_exact = true;
          expected_type = dst_type;
        }
      }
    }
    if (expected_type == NULL) expected_type = dst_exact_type;
    if (expected_type == NULL) expected_type = src_declared_type;
    if (expected_type == NULL) expected_type = dst_declared_type;
    src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
    dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
  }
  int flags = LIR_OpArrayCopy::all_flags;
  if (!src_objarray)
    flags &= ~LIR_OpArrayCopy::src_objarray;
  if (!dst_objarray)
    flags &= ~LIR_OpArrayCopy::dst_objarray;
  if (!x->arg_needs_null_check(0))
    flags &= ~LIR_OpArrayCopy::src_null_check;
  if (!x->arg_needs_null_check(2))
    flags &= ~LIR_OpArrayCopy::dst_null_check;
  if (expected_type != NULL) {
    Value length_limit = NULL;
    IfOp* ifop = length->as_IfOp();
    if (ifop != NULL) {
      if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
          ifop->x() == ifop->fval() &&
          ifop->y() == ifop->tval()) {
        length_limit = ifop->y();
      }
    }
    NewArray* src_array = src->as_NewArray();
    if (src_array != NULL) {
      flags &= ~LIR_OpArrayCopy::src_null_check;
      if (length_limit != NULL &&
          src_array->length() == length_limit &&
          is_constant_zero(src_pos)) {
        flags &= ~LIR_OpArrayCopy::src_range_check;
      }
    }
    NewArray* dst_array = dst->as_NewArray();
    if (dst_array != NULL) {
      flags &= ~LIR_OpArrayCopy::dst_null_check;
      if (length_limit != NULL &&
          dst_array->length() == length_limit &&
          is_constant_zero(dst_pos)) {
        flags &= ~LIR_OpArrayCopy::dst_range_check;
      }
    }
    if (positive_constant(src_pos))
      flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
    if (positive_constant(dst_pos))
      flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
    if (positive_constant(length))
      flags &= ~LIR_OpArrayCopy::length_positive_check;
    ArrayLength* al = length->as_ArrayLength();
    if (al != NULL) {
      if (al->array() == src) {
        flags &= ~LIR_OpArrayCopy::length_positive_check;
        flags &= ~LIR_OpArrayCopy::src_null_check;
        if (is_constant_zero(src_pos))
          flags &= ~LIR_OpArrayCopy::src_range_check;
      }
      if (al->array() == dst) {
        flags &= ~LIR_OpArrayCopy::length_positive_check;
        flags &= ~LIR_OpArrayCopy::dst_null_check;
        if (is_constant_zero(dst_pos))
          flags &= ~LIR_OpArrayCopy::dst_range_check;
      }
    }
    if (is_exact) {
      flags &= ~LIR_OpArrayCopy::type_check;
    }
  }
  IntConstant* src_int = src_pos->type()->as_IntConstant();
  IntConstant* dst_int = dst_pos->type()->as_IntConstant();
  if (src_int && dst_int) {
    int s_offs = src_int->value();
    int d_offs = dst_int->value();
    if (src_int->value() >= dst_int->value()) {
      flags &= ~LIR_OpArrayCopy::overlapping;
    }
    if (expected_type != NULL) {
      BasicType t = expected_type->element_type()->basic_type();
      int element_size = type2aelembytes(t);
      if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
          ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
        flags &= ~LIR_OpArrayCopy::unaligned;
      }
    }
  } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
    flags &= ~LIR_OpArrayCopy::overlapping;
  }
  if (src == dst) {
    if (flags & LIR_OpArrayCopy::type_check) {
      flags &= ~LIR_OpArrayCopy::type_check;
    }
  }
}
LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
  assert(opr->is_register(), "why spill if item is not register?");
  if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
    LIR_Opr result = new_register(T_FLOAT);
    set_vreg_flag(result, must_start_in_memory);
    assert(opr->is_register(), "only a register can be spilled");
    assert(opr->value_type()->is_float(), "rounding only for floats available");
    __ roundfp(opr, LIR_OprFact::illegalOpr, result);
    return result;
  }
  return opr;
}
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
  assert(type2size[t] == type2size[value->type()],
         err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
  if (!value->is_register()) {
    LIR_Opr r = new_register(value->type());
    __ move(value, r);
    value = r;
  }
  LIR_Opr tmp = new_register(t);
  set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
  __ move(value, tmp);
  return tmp;
}
void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
  if (if_instr->should_profile()) {
    ciMethod* method = if_instr->profiled_method();
    assert(method != NULL, "method should be set if branch is profiled");
    ciMethodData* md = method->method_data_or_null();
    assert(md != NULL, "Sanity");
    ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
    assert(data != NULL, "must have profiling data");
    assert(data->is_BranchData(), "need BranchData for two-way branches");
    int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
    int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
    if (if_instr->is_swapped()) {
      int t = taken_count_offset;
      taken_count_offset = not_taken_count_offset;
      not_taken_count_offset = t;
    }
    LIR_Opr md_reg = new_register(T_METADATA);
    __ metadata2reg(md->constant_encoding(), md_reg);
    LIR_Opr data_offset_reg = new_pointer_register();
    __ cmove(lir_cond(cond),
             LIR_OprFact::intptrConst(taken_count_offset),
             LIR_OprFact::intptrConst(not_taken_count_offset),
             data_offset_reg, as_BasicType(if_instr->x()->type()));
    LIR_Opr data_reg = new_pointer_register();
    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
    __ move(data_addr, data_reg);
    LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
    __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
    __ move(data_reg, data_addr);
  }
}
void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
  Phi* phi = sux_val->as_Phi();
  if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
    LIR_Opr operand = cur_val->operand();
    if (cur_val->operand()->is_illegal()) {
      assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
             "these can be produced lazily");
      operand = operand_for_instruction(cur_val);
    }
    resolver->move(operand, operand_for_instruction(phi));
  }
}
void LIRGenerator::move_to_phi(ValueStack* cur_state) {
  BlockBegin* bb = block();
  if (bb->number_of_sux() == 1) {
    BlockBegin* sux = bb->sux_at(0);
    assert(sux->number_of_preds() > 0, "invalid CFG");
    if (sux->number_of_preds() > 1) {
      int max_phis = cur_state->stack_size() + cur_state->locals_size();
      PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
      ValueStack* sux_state = sux->state();
      Value sux_value;
      int index;
      assert(cur_state->scope() == sux_state->scope(), "not matching");
      assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
      assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
      for_each_stack_value(sux_state, index, sux_value) {
        move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
      }
      for_each_local_value(sux_state, index, sux_value) {
        move_to_phi(&resolver, cur_state->local_at(index), sux_value);
      }
      assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
    }
  }
}
LIR_Opr LIRGenerator::new_register(BasicType type) {
  int vreg = _virtual_register_number;
  if (vreg + 20 >= LIR_OprDesc::vreg_max) {
    bailout("out of virtual registers");
    if (vreg + 2 >= LIR_OprDesc::vreg_max) {
      _virtual_register_number = LIR_OprDesc::vreg_base;
    }
  }
  _virtual_register_number += 1;
  return LIR_OprFact::virtual_register(vreg, type);
}
LIR_Opr LIRGenerator::rlock(Value instr) {
  return new_register(instr->type());
}
LIR_Opr LIRGenerator::rlock_result(Value x) {
  LIR_Opr reg = rlock(x);
  set_result(x, reg);
  return reg;
}
LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
  LIR_Opr reg;
  switch (type) {
  case T_BYTE:
  case T_BOOLEAN:
    reg = rlock_byte(type);
    break;
  default:
    reg = rlock(x);
    break;
  }
  set_result(x, reg);
  return reg;
}
ciObject* LIRGenerator::get_jobject_constant(Value value) {
  ObjectType* oc = value->type()->as_ObjectType();
  if (oc) {
    return oc->constant_value();
  }
  return NULL;
}
void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
  assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
  assert(block()->next() == x, "ExceptionObject must be first instruction of block");
  for_each_phi_fun(block(), phi,
                   operand_for_instruction(phi));
  LIR_Opr thread_reg = getThreadPointer();
  __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
               exceptionOopOpr());
  __ move_wide(LIR_OprFact::oopConst(NULL),
               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
  __ move_wide(LIR_OprFact::oopConst(NULL),
               new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
  LIR_Opr result = new_register(T_OBJECT);
  __ move(exceptionOopOpr(), result);
  set_result(x, result);
}
void LIRGenerator::do_Phi(Phi* x) {
  ShouldNotReachHere();
}
void LIRGenerator::do_Constant(Constant* x) {
  if (x->state_before() != NULL) {
    LIR_Opr reg = rlock_result(x);
    CodeEmitInfo* info = state_for(x, x->state_before());
    __ oop2reg_patch(NULL, reg, info);
  } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
    if (!x->is_pinned()) {
      set_result(x, load_constant(x));
    } else {
      LIR_Opr res = x->operand();
      if (!res->is_valid()) {
        res = LIR_OprFact::value_type(x->type());
      }
      if (res->is_constant()) {
        LIR_Opr reg = rlock_result(x);
        __ move(res, reg);
      } else {
        set_result(x, res);
      }
    }
  } else {
    set_result(x, LIR_OprFact::value_type(x->type()));
  }
}
void LIRGenerator::do_Local(Local* x) {
  operand_for_instruction(x);
}
void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
  Unimplemented();
}
void LIRGenerator::do_Return(Return* x) {
  if (compilation()->env()->dtrace_method_probes()) {
    BasicTypeList signature;
    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
    signature.append(T_METADATA); // Method*
    LIR_OprList* args = new LIR_OprList();
    args->append(getThreadPointer());
    LIR_Opr meth = new_register(T_METADATA);
    __ metadata2reg(method()->constant_encoding(), meth);
    args->append(meth);
    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
  }
  if (x->type()->is_void()) {
    __ return_op(LIR_OprFact::illegalOpr);
  } else {
    LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
    LIRItem result(x->result(), this);
    result.load_item_force(reg);
    __ return_op(result.result());
  }
  set_no_result(x);
}
void LIRGenerator::do_Reference_get(Intrinsic* x) {
  const int referent_offset = java_lang_ref_Reference::referent_offset;
  guarantee(referent_offset > 0, "referent offset not initialized");
  assert(x->number_of_arguments() == 1, "wrong type");
  LIRItem reference(x->argument_at(0), this);
  reference.load_item();
  CodeEmitInfo* info = NULL;
  if (x->needs_null_check()) {
    info = state_for(x);
  }
  LIR_Address* referent_field_adr =
    new LIR_Address(reference.result(), referent_offset, T_OBJECT);
  LIR_Opr result = rlock_result(x);
  __ load(referent_field_adr, result, info);
  pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
              result /* pre_val */,
              false  /* do_load */,
              false  /* patch */,
              NULL   /* info */);
}
void LIRGenerator::do_isInstance(Intrinsic* x) {
  assert(x->number_of_arguments() == 2, "wrong type");
  LIRItem clazz(x->argument_at(0), this);
  LIRItem object(x->argument_at(1), this);
  clazz.load_item();
  object.load_item();
  LIR_Opr result = rlock_result(x);
  if (x->needs_null_check()) {
    CodeEmitInfo* info = state_for(x);
    __ null_check(clazz.result(), info);
  }
  LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
                                     CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
                                     x->type(),
                                     NULL); // NULL CodeEmitInfo results in a leaf call
  __ move(call_result, result);
}
void LIRGenerator::do_getClass(Intrinsic* x) {
  assert(x->number_of_arguments() == 1, "wrong type");
  LIRItem rcvr(x->argument_at(0), this);
  rcvr.load_item();
  LIR_Opr temp = new_register(T_METADATA);
  LIR_Opr result = rlock_result(x);
  CodeEmitInfo* info = NULL;
  if (x->needs_null_check()) {
    info = state_for(x);
  }
  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
}
void LIRGenerator::do_currentThread(Intrinsic* x) {
  assert(x->number_of_arguments() == 0, "wrong type");
  LIR_Opr reg = rlock_result(x);
  __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
}
void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
  assert(x->number_of_arguments() == 1, "wrong type");
  LIRItem receiver(x->argument_at(0), this);
  receiver.load_item();
  BasicTypeList signature;
  signature.append(T_OBJECT); // receiver
  LIR_OprList* args = new LIR_OprList();
  args->append(receiver.result());
  CodeEmitInfo* info = state_for(x, x->state());
  call_runtime(&signature, args,
               CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
               voidType, info);
  set_no_result(x);
}
LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
  if (x->operand()->is_illegal()) {
    Constant* c = x->as_Constant();
    if (c != NULL) {
      x->set_operand(LIR_OprFact::value_type(c->type()));
    } else {
      assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
      x->set_operand(rlock(x));
      _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
    }
  }
  return x->operand();
}
Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
  if (opr->is_virtual()) {
    return instruction_for_vreg(opr->vreg_number());
  }
  return NULL;
}
Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
  if (reg_num < _instruction_for_operand.length()) {
    return _instruction_for_operand.at(reg_num);
  }
  return NULL;
}
void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
  if (_vreg_flags.size_in_bits() == 0) {
    BitMap2D temp(100, num_vreg_flags);
    temp.clear();
    _vreg_flags = temp;
  }
  _vreg_flags.at_put_grow(vreg_num, f, true);
}
bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
  if (!_vreg_flags.is_valid_index(vreg_num, f)) {
    return false;
  }
  return _vreg_flags.at(vreg_num, f);
}
LIR_Opr LIRGenerator::load_constant(Constant* x) {
  assert(!x->is_pinned(), "only for unpinned constants");
  _unpinned_constants.append(x);
  return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
}
LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
  BasicType t = c->type();
  for (int i = 0; i < _constants.length(); i++) {
    LIR_Const* other = _constants.at(i);
    if (t == other->type()) {
      switch (t) {
      case T_INT:
      case T_FLOAT:
        if (c->as_jint_bits() != other->as_jint_bits()) continue;
        break;
      case T_LONG:
      case T_DOUBLE:
        if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
        if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
        break;
      case T_OBJECT:
        if (c->as_jobject() != other->as_jobject()) continue;
        break;
      }
      return _reg_for_constants.at(i);
    }
  }
  LIR_Opr result = new_register(t);
  __ move((LIR_Opr)c, result);
  _constants.append(c);
  _reg_for_constants.append(result);
  return result;
}
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
                               bool do_load, bool patch, CodeEmitInfo* info) {
  switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
    case BarrierSet::G1SATBCT:
    case BarrierSet::G1SATBCTLogging:
      G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
      break;
#endif // INCLUDE_ALL_GCS
    case BarrierSet::CardTableModRef:
    case BarrierSet::CardTableExtension:
      break;
    case BarrierSet::ModRef:
    case BarrierSet::Other:
      break;
    default      :
      ShouldNotReachHere();
  }
}
void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
    case BarrierSet::G1SATBCT:
    case BarrierSet::G1SATBCTLogging:
      G1SATBCardTableModRef_post_barrier(addr,  new_val);
      break;
#endif // INCLUDE_ALL_GCS
    case BarrierSet::CardTableModRef:
    case BarrierSet::CardTableExtension:
      CardTableModRef_post_barrier(addr,  new_val);
      break;
    case BarrierSet::ModRef:
    case BarrierSet::Other:
      break;
    default      :
      ShouldNotReachHere();
    }
}
#if INCLUDE_ALL_GCS
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
                                                     bool do_load, bool patch, CodeEmitInfo* info) {
  BasicType flag_type;
  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
    flag_type = T_INT;
  } else {
    guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
              "Assumption");
    flag_type = T_BYTE;
  }
  LIR_Opr thrd = getThreadPointer();
  LIR_Address* mark_active_flag_addr =
    new LIR_Address(thrd,
                    in_bytes(JavaThread::satb_mark_queue_offset() +
                             PtrQueue::byte_offset_of_active()),
                    flag_type);
  LIR_Opr flag_val = new_register(T_INT);
  __ load(mark_active_flag_addr, flag_val);
  __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
  LIR_PatchCode pre_val_patch_code = lir_patch_none;
  CodeStub* slow;
  if (do_load) {
    assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
    assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
    if (patch)
      pre_val_patch_code = lir_patch_normal;
    pre_val = new_register(T_OBJECT);
    if (!addr_opr->is_address()) {
      assert(addr_opr->is_register(), "must be");
      addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
    }
    slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
  } else {
    assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
    assert(pre_val->is_register(), "must be");
    assert(pre_val->type() == T_OBJECT, "must be an object");
    assert(info == NULL, "sanity");
    slow = new G1PreBarrierStub(pre_val);
  }
  __ branch(lir_cond_notEqual, T_INT, slow);
  __ branch_destination(slow->continuation());
}
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  if (new_val->is_constant() &&
      new_val->as_constant_ptr()->as_jobject() == NULL) return;
  if (!new_val->is_register()) {
    LIR_Opr new_val_reg = new_register(T_OBJECT);
    if (new_val->is_constant()) {
      __ move(new_val, new_val_reg);
    } else {
      __ leal(new_val, new_val_reg);
    }
    new_val = new_val_reg;
  }
  assert(new_val->is_register(), "must be a register at this point");
  if (addr->is_address()) {
    LIR_Address* address = addr->as_address_ptr();
    LIR_Opr ptr = new_pointer_register();
    if (!address->index()->is_valid() && address->disp() == 0) {
      __ move(address->base(), ptr);
    } else {
      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
      __ leal(addr, ptr);
    }
    addr = ptr;
  }
  assert(addr->is_register(), "must be a register at this point");
  LIR_Opr xor_res = new_pointer_register();
  LIR_Opr xor_shift_res = new_pointer_register();
  if (TwoOperandLIRForm ) {
    __ move(addr, xor_res);
    __ logical_xor(xor_res, new_val, xor_res);
    __ move(xor_res, xor_shift_res);
    __ unsigned_shift_right(xor_shift_res,
                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
                            xor_shift_res,
                            LIR_OprDesc::illegalOpr());
  } else {
    __ logical_xor(addr, new_val, xor_res);
    __ unsigned_shift_right(xor_res,
                            LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
                            xor_shift_res,
                            LIR_OprDesc::illegalOpr());
  }
  if (!new_val->is_register()) {
    LIR_Opr new_val_reg = new_register(T_OBJECT);
    __ leal(new_val, new_val_reg);
    new_val = new_val_reg;
  }
  assert(new_val->is_register(), "must be a register at this point");
  __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
  CodeStub* slow = new G1PostBarrierStub(addr, new_val);
  __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
  __ branch_destination(slow->continuation());
}
#endif // INCLUDE_ALL_GCS
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
  if (addr->is_address()) {
    LIR_Address* address = addr->as_address_ptr();
    LIR_Opr ptr = new_pointer_register();
    if (!address->index()->is_valid() && address->disp() == 0) {
      __ move(address->base(), ptr);
    } else {
      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
      __ leal(addr, ptr);
    }
    addr = ptr;
  }
  assert(addr->is_register(), "must be a register at this point");
#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
  CardTableModRef_post_barrier_helper(addr, card_table_base);
#else
  LIR_Opr tmp = new_pointer_register();
  if (TwoOperandLIRForm) {
    __ move(addr, tmp);
    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
  } else {
    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
  }
  if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
    __ membar_storestore();
  }
  if (can_inline_as_constant(card_table_base)) {
    __ move(LIR_OprFact::intConst(0),
              new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
  } else {
    __ move(LIR_OprFact::intConst(0),
              new LIR_Address(tmp, load_constant(card_table_base),
                              T_BYTE));
  }
#endif
}
void LIRGenerator::do_StoreField(StoreField* x) {
  bool needs_patching = x->needs_patching();
  bool is_volatile = x->field()->is_volatile();
  BasicType field_type = x->field_type();
  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
  CodeEmitInfo* info = NULL;
  if (needs_patching) {
    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
    info = state_for(x, x->state_before());
  } else if (x->needs_null_check()) {
    NullCheck* nc = x->explicit_null_check();
    if (nc == NULL) {
      info = state_for(x);
    } else {
      info = state_for(nc);
    }
  }
  LIRItem object(x->obj(), this);
  LIRItem value(x->value(),  this);
  object.load_item();
  if (is_volatile || needs_patching) {
    if (field_type == T_BYTE || field_type == T_BOOLEAN) {
      value.load_byte_item();
    } else  {
      value.load_item();
    }
  } else {
    value.load_for_store(field_type);
  }
  set_no_result(x);
#ifndef PRODUCT
  if (PrintNotLoaded && needs_patching) {
    tty->print_cr("   ###class not loaded at store_%s bci %d",
                  x->is_static() ?  "static" : "field", x->printable_bci());
  }
#endif
  if (x->needs_null_check() &&
      (needs_patching ||
       MacroAssembler::needs_explicit_null_check(x->offset()))) {
    __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
  }
  LIR_Address* address;
  if (needs_patching) {
    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
  } else {
    address = generate_address(object.result(), x->offset(), field_type);
  }
  if (is_volatile && os::is_MP()) {
    __ membar_release();
  }
  if (is_oop) {
    pre_barrier(LIR_OprFact::address(address),
                LIR_OprFact::illegalOpr /* pre_val */,
                true /* do_load*/,
                needs_patching,
                (info ? new CodeEmitInfo(info) : NULL));
  }
  if (is_volatile && !needs_patching) {
    volatile_field_store(value.result(), address, info);
  } else {
    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
    __ store(value.result(), address, info, patch_code);
  }
  if (is_oop) {
    post_barrier(object.result(), value.result());
  }
  if (is_volatile && os::is_MP()) {
    __ membar();
  }
}
void LIRGenerator::do_LoadField(LoadField* x) {
  bool needs_patching = x->needs_patching();
  bool is_volatile = x->field()->is_volatile();
  BasicType field_type = x->field_type();
  CodeEmitInfo* info = NULL;
  if (needs_patching) {
    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
    info = state_for(x, x->state_before());
  } else if (x->needs_null_check()) {
    NullCheck* nc = x->explicit_null_check();
    if (nc == NULL) {
      info = state_for(x);
    } else {
      info = state_for(nc);
    }
  }
  LIRItem object(x->obj(), this);
  object.load_item();
#ifndef PRODUCT
  if (PrintNotLoaded && needs_patching) {
    tty->print_cr("   ###class not loaded at load_%s bci %d",
                  x->is_static() ?  "static" : "field", x->printable_bci());
  }
#endif
  bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
  if (x->needs_null_check() &&
      (needs_patching ||
       MacroAssembler::needs_explicit_null_check(x->offset()) ||
       stress_deopt)) {
    LIR_Opr obj = object.result();
    if (stress_deopt) {
      obj = new_register(T_OBJECT);
      __ move(LIR_OprFact::oopConst(NULL), obj);
    }
    __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
  }
  LIR_Opr reg = rlock_result(x, field_type);
  LIR_Address* address;
  if (needs_patching) {
    address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
  } else {
    address = generate_address(object.result(), x->offset(), field_type);
  }
  if (is_volatile && !needs_patching) {
    volatile_field_load(address, reg, info);
  } else {
    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
    __ load(address, reg, info, patch_code);
  }
  if (is_volatile && os::is_MP()) {
    __ membar_acquire();
  }
}
void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
  assert(x->number_of_arguments() == 2, "wrong type");
  LIRItem buf  (x->argument_at(0), this);
  LIRItem index(x->argument_at(1), this);
  buf.load_item();
  index.load_item();
  LIR_Opr result = rlock_result(x);
  if (GenerateRangeChecks) {
    CodeEmitInfo* info = state_for(x);
    CodeStub* stub = new RangeCheckStub(info, index.result(), true);
    if (index.result()->is_constant()) {
      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
      __ branch(lir_cond_belowEqual, T_INT, stub);
    } else {
      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
                  java_nio_Buffer::limit_offset(), T_INT, info);
      __ branch(lir_cond_aboveEqual, T_INT, stub);
    }
    __ move(index.result(), result);
  } else {
    __ move(index.result(), result);
  }
}
void LIRGenerator::do_ArrayLength(ArrayLength* x) {
  LIRItem array(x->array(), this);
  array.load_item();
  LIR_Opr reg = rlock_result(x);
  CodeEmitInfo* info = NULL;
  if (x->needs_null_check()) {
    NullCheck* nc = x->explicit_null_check();
    if (nc == NULL) {
      info = state_for(x);
    } else {
      info = state_for(nc);
    }
    if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
      LIR_Opr obj = new_register(T_OBJECT);
      __ move(LIR_OprFact::oopConst(NULL), obj);
      __ null_check(obj, new CodeEmitInfo(info));
    }
  }
  __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
}
void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
  bool use_length = x->length() != NULL;
  LIRItem array(x->array(), this);
  LIRItem index(x->index(), this);
  LIRItem length(this);
  bool needs_range_check = x->compute_needs_range_check();
  if (use_length && needs_range_check) {
    length.set_instruction(x->length());
    length.load_item();
  }
  array.load_item();
  if (index.is_constant() && can_inline_as_constant(x->index())) {
    index.dont_load_item();
  } else {
    index.load_item();
  }
  CodeEmitInfo* range_check_info = state_for(x);
  CodeEmitInfo* null_check_info = NULL;
  if (x->needs_null_check()) {
    NullCheck* nc = x->explicit_null_check();
    if (nc != NULL) {
      null_check_info = state_for(nc);
    } else {
      null_check_info = range_check_info;
    }
    if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
      LIR_Opr obj = new_register(T_OBJECT);
      __ move(LIR_OprFact::oopConst(NULL), obj);
      __ null_check(obj, new CodeEmitInfo(null_check_info));
    }
  }
  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
  if (GenerateRangeChecks && needs_range_check) {
    if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
      __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
    } else if (use_length) {
      __ cmp(lir_cond_belowEqual, length.result(), index.result());
      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
    } else {
      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
      null_check_info = NULL;
    }
  }
  __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
}
void LIRGenerator::do_NullCheck(NullCheck* x) {
  if (x->can_trap()) {
    LIRItem value(x->obj(), this);
    value.load_item();
    CodeEmitInfo* info = state_for(x);
    __ null_check(value.result(), info);
  }
}
void LIRGenerator::do_TypeCast(TypeCast* x) {
  LIRItem value(x->obj(), this);
  value.load_item();
  set_result(x, value.result());
}
void LIRGenerator::do_Throw(Throw* x) {
  LIRItem exception(x->exception(), this);
  exception.load_item();
  set_no_result(x);
  LIR_Opr exception_opr = exception.result();
  CodeEmitInfo* info = state_for(x, x->state());
#ifndef PRODUCT
  if (PrintC1Statistics) {
    increment_counter(Runtime1::throw_count_address(), T_INT);
  }
#endif
  bool unwind = false;
  if (info->exception_handlers()->length() == 0) {
    unwind = true;
  } else {
    bool type_is_exact = true;
    ciType* throw_type = x->exception()->exact_type();
    if (throw_type == NULL) {
      type_is_exact = false;
      throw_type = x->exception()->declared_type();
    }
    if (throw_type != NULL && throw_type->is_instance_klass()) {
      ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
      unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
    }
  }
  if (GenerateCompilerNullChecks &&
      (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
    __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
  }
  if (compilation()->env()->jvmti_can_post_on_exceptions()) {
    unwind = false;
  }
  __ move(exception_opr, exceptionOopOpr());
  if (unwind) {
    __ unwind_exception(exceptionOopOpr());
  } else {
    __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
  }
}
void LIRGenerator::do_RoundFP(RoundFP* x) {
  LIRItem input(x->input(), this);
  input.load_item();
  LIR_Opr input_opr = input.result();
  assert(input_opr->is_register(), "why round if value is not in a register?");
  assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
  if (input_opr->is_single_fpu()) {
    set_result(x, round_item(input_opr)); // This code path not currently taken
  } else {
    LIR_Opr result = new_register(T_DOUBLE);
    set_vreg_flag(result, must_start_in_memory);
    __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
    set_result(x, result);
  }
}
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
  LIRItem base(x->base(), this);
  LIRItem idx(this);
  base.load_item();
  if (x->has_index()) {
    idx.set_instruction(x->index());
    idx.load_nonconstant();
  }
  LIR_Opr reg = rlock_result(x, x->basic_type());
  int   log2_scale = 0;
  if (x->has_index()) {
    log2_scale = x->log2_scale();
  }
  assert(!x->has_index() || idx.value() == x->index(), "should match");
  LIR_Opr base_op = base.result();
  LIR_Opr index_op = idx.result();
#ifndef _LP64
  if (base_op->type() == T_LONG) {
    base_op = new_register(T_INT);
    __ convert(Bytecodes::_l2i, base.result(), base_op);
  }
  if (x->has_index()) {
    if (index_op->type() == T_LONG) {
      LIR_Opr long_index_op = index_op;
      if (index_op->is_constant()) {
        long_index_op = new_register(T_LONG);
        __ move(index_op, long_index_op);
      }
      index_op = new_register(T_INT);
      __ convert(Bytecodes::_l2i, long_index_op, index_op);
    } else {
      assert(x->index()->type()->tag() == intTag, "must be");
    }
  }
  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
  assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
#else
  if (x->has_index()) {
    if (index_op->type() == T_INT) {
      if (!index_op->is_constant()) {
        index_op = new_register(T_LONG);
        __ convert(Bytecodes::_i2l, idx.result(), index_op);
      }
    } else {
      assert(index_op->type() == T_LONG, "must be");
      if (index_op->is_constant()) {
        index_op = new_register(T_LONG);
        __ move(idx.result(), index_op);
      }
    }
  }
  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
  assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
                            (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
#endif
  BasicType dst_type = x->basic_type();
  LIR_Address* addr;
  if (index_op->is_constant()) {
    assert(log2_scale == 0, "must not have a scale");
    assert(index_op->type() == T_INT, "only int constants supported");
    addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
  } else {
#if defined(X86) || defined(AARCH64)
    addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#elif defined(GENERATE_ADDRESS_IS_PREFERRED)
    addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
#else
    if (index_op->is_illegal() || log2_scale == 0) {
      addr = new LIR_Address(base_op, index_op, dst_type);
    } else {
      LIR_Opr tmp = new_pointer_register();
      __ shift_left(index_op, log2_scale, tmp);
      addr = new LIR_Address(base_op, tmp, dst_type);
    }
#endif
  }
  if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
    __ unaligned_move(addr, reg);
  } else {
    if (dst_type == T_OBJECT && x->is_wide()) {
      __ move_wide(addr, reg);
    } else {
      __ move(addr, reg);
    }
  }
}
void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
  int  log2_scale = 0;
  BasicType type = x->basic_type();
  if (x->has_index()) {
    log2_scale = x->log2_scale();
  }
  LIRItem base(x->base(), this);
  LIRItem value(x->value(), this);
  LIRItem idx(this);
  base.load_item();
  if (x->has_index()) {
    idx.set_instruction(x->index());
    idx.load_item();
  }
  if (type == T_BYTE || type == T_BOOLEAN) {
    value.load_byte_item();
  } else {
    value.load_item();
  }
  set_no_result(x);
  LIR_Opr base_op = base.result();
  LIR_Opr index_op = idx.result();
#ifdef GENERATE_ADDRESS_IS_PREFERRED
  LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
#else
#ifndef _LP64
  if (base_op->type() == T_LONG) {
    base_op = new_register(T_INT);
    __ convert(Bytecodes::_l2i, base.result(), base_op);
  }
  if (x->has_index()) {
    if (index_op->type() == T_LONG) {
      index_op = new_register(T_INT);
      __ convert(Bytecodes::_l2i, idx.result(), index_op);
    }
  }
  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
  assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
#else
  if (x->has_index()) {
    if (index_op->type() == T_INT) {
      index_op = new_register(T_LONG);
      __ convert(Bytecodes::_i2l, idx.result(), index_op);
    }
  }
  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
  assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
#endif
  if (log2_scale != 0) {
    LIR_Opr tmp = new_pointer_register();
    if (TwoOperandLIRForm) {
      __ move(index_op, tmp);
      index_op = tmp;
    }
    __ shift_left(index_op, log2_scale, tmp);
    if (!TwoOperandLIRForm) {
      index_op = tmp;
    }
  }
  LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
#endif // !GENERATE_ADDRESS_IS_PREFERRED
  __ move(value.result(), addr);
}
void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
  BasicType type = x->basic_type();
  LIRItem src(x->object(), this);
  LIRItem off(x->offset(), this);
  off.load_item();
  src.load_item();
  LIR_Opr value = rlock_result(x, x->basic_type());
  get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
#if INCLUDE_ALL_GCS
  if (UseG1GC && type == T_OBJECT) {
    bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
    bool gen_offset_check = true;    // Assume we need to generate the offset guard.
    bool gen_source_check = true;    // Assume we need to check the src object for null.
    bool gen_type_check = true;      // Assume we need to check the reference_type.
    if (off.is_constant()) {
      jlong off_con = (off.type()->is_int() ?
                        (jlong) off.get_jint_constant() :
                        off.get_jlong_constant());
      if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
        gen_pre_barrier = false;
      } else {
        gen_offset_check = false;
      }
    }
    if (gen_pre_barrier && src.type()->is_array()) {
      gen_pre_barrier = false;
    }
    if (gen_pre_barrier) {
      if (src.is_constant()) {
        ciObject* src_con = src.get_jobject_constant();
        guarantee(src_con != NULL, "no source constant");
        if (src_con->is_null_object()) {
          gen_pre_barrier = false;
        } else {
          gen_source_check = false;
        }
      }
    }
    if (gen_pre_barrier && !PatchALot) {
      ciType* type = src.value()->declared_type();
      if ((type != NULL) && type->is_loaded()) {
        if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
          gen_type_check = false;
        } else if (type->is_klass() &&
                   !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
          gen_pre_barrier = false;
        }
      }
    }
    if (gen_pre_barrier) {
      LabelObj* Lcont = new LabelObj();
      LIR_Opr src_reg = new_register(T_OBJECT);
      __ move(src.result(), src_reg);
      if (gen_offset_check) {
        LIR_Opr referent_off;
        if (off.type()->is_int()) {
          referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
        } else {
          assert(off.type()->is_long(), "what else?");
          referent_off = new_register(T_LONG);
          __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
        }
        __ cmp(lir_cond_notEqual, off.result(), referent_off);
        __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
      }
      if (gen_source_check) {
        __ cmp(lir_cond_equal, src_reg, LIR_OprFact::oopConst(NULL));
        __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
      }
      LIR_Opr src_klass = new_register(T_METADATA);
      if (gen_type_check) {
        __ move(new LIR_Address(src_reg, oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
        LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
        LIR_Opr reference_type = new_register(T_INT);
        __ move(reference_type_addr, reference_type);
        __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
        __ branch(lir_cond_equal, T_INT, Lcont->label());
      }
      {
        pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
                    value  /* pre_val */,
                    false  /* do_load */,
                    false  /* patch */,
                    NULL   /* info */);
      }
      __ branch_destination(Lcont->label());
    }
  }
#endif // INCLUDE_ALL_GCS
  if (x->is_volatile() && os::is_MP()) __ membar_acquire();
}
void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
  BasicType type = x->basic_type();
  LIRItem src(x->object(), this);
  LIRItem off(x->offset(), this);
  LIRItem data(x->value(), this);
  src.load_item();
  if (type == T_BOOLEAN || type == T_BYTE) {
    data.load_byte_item();
  } else {
    data.load_item();
  }
  off.load_item();
  set_no_result(x);
  if (x->is_volatile() && os::is_MP()) __ membar_release();
  put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
  if (x->is_volatile() && os::is_MP()) __ membar();
}
void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
  LIRItem src(x->object(), this);
  LIRItem off(x->offset(), this);
  src.load_item();
  if (off.is_constant() && can_inline_as_constant(x->offset())) {
    off.dont_load_item();
  } else {
    off.load_item();
  }
  set_no_result(x);
  LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
  __ prefetch(addr, is_store);
}
void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
  do_UnsafePrefetch(x, false);
}
void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
  do_UnsafePrefetch(x, true);
}
void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
  int lng = x->length();
  for (int i = 0; i < lng; i++) {
    SwitchRange* one_range = x->at(i);
    int low_key = one_range->low_key();
    int high_key = one_range->high_key();
    BlockBegin* dest = one_range->sux();
    if (low_key == high_key) {
      __ cmp(lir_cond_equal, value, low_key);
      __ branch(lir_cond_equal, T_INT, dest);
    } else if (high_key - low_key == 1) {
      __ cmp(lir_cond_equal, value, low_key);
      __ branch(lir_cond_equal, T_INT, dest);
      __ cmp(lir_cond_equal, value, high_key);
      __ branch(lir_cond_equal, T_INT, dest);
    } else {
      LabelObj* L = new LabelObj();
      __ cmp(lir_cond_less, value, low_key);
      __ branch(lir_cond_less, T_INT, L->label());
      __ cmp(lir_cond_lessEqual, value, high_key);
      __ branch(lir_cond_lessEqual, T_INT, dest);
      __ branch_destination(L->label());
    }
  }
  __ jump(default_sux);
}
SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
  SwitchRangeList* res = new SwitchRangeList();
  int len = x->length();
  if (len > 0) {
    BlockBegin* sux = x->sux_at(0);
    int key = x->lo_key();
    BlockBegin* default_sux = x->default_sux();
    SwitchRange* range = new SwitchRange(key, sux);
    for (int i = 0; i < len; i++, key++) {
      BlockBegin* new_sux = x->sux_at(i);
      if (sux == new_sux) {
        range->set_high_key(key);
      } else {
        if (sux != default_sux) {
          res->append(range);
        }
        range = new SwitchRange(key, new_sux);
      }
      sux = new_sux;
    }
    if (res->length() == 0 || res->last() != range)  res->append(range);
  }
  return res;
}
SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
  SwitchRangeList* res = new SwitchRangeList();
  int len = x->length();
  if (len > 0) {
    BlockBegin* default_sux = x->default_sux();
    int key = x->key_at(0);
    BlockBegin* sux = x->sux_at(0);
    SwitchRange* range = new SwitchRange(key, sux);
    for (int i = 1; i < len; i++) {
      int new_key = x->key_at(i);
      BlockBegin* new_sux = x->sux_at(i);
      if (key+1 == new_key && sux == new_sux) {
        range->set_high_key(new_key);
      } else {
        if (range->sux() != default_sux) {
          res->append(range);
        }
        range = new SwitchRange(new_key, new_sux);
      }
      key = new_key;
      sux = new_sux;
    }
    if (res->length() == 0 || res->last() != range)  res->append(range);
  }
  return res;
}
void LIRGenerator::do_TableSwitch(TableSwitch* x) {
  LIRItem tag(x->tag(), this);
  tag.load_item();
  set_no_result(x);
  if (x->is_safepoint()) {
    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  }
  move_to_phi(x->state());
  int lo_key = x->lo_key();
  int len = x->length();
  assert(lo_key <= (lo_key + (len - 1)), "integer overflow");
  LIR_Opr value = tag.result();
  if (UseTableRanges) {
    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  } else {
    for (int i = 0; i < len; i++) {
      __ cmp(lir_cond_equal, value, i + lo_key);
      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
    }
    __ jump(x->default_sux());
  }
}
void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
  LIRItem tag(x->tag(), this);
  tag.load_item();
  set_no_result(x);
  if (x->is_safepoint()) {
    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  }
  move_to_phi(x->state());
  LIR_Opr value = tag.result();
  if (UseTableRanges) {
    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  } else {
    int len = x->length();
    for (int i = 0; i < len; i++) {
      __ cmp(lir_cond_equal, value, x->key_at(i));
      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
    }
    __ jump(x->default_sux());
  }
}
void LIRGenerator::do_Goto(Goto* x) {
  set_no_result(x);
  if (block()->next()->as_OsrEntry()) {
    LIR_Opr osrBuffer = block()->next()->operand();
    BasicTypeList signature;
    signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
    CallingConvention* cc = frame_map()->c_calling_convention(&signature);
    __ move(osrBuffer, cc->args()->at(0));
    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
                         getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
  }
  if (x->is_safepoint()) {
    ValueStack* state = x->state_before() ? x->state_before() : x->state();
    CodeEmitInfo* info = state_for(x, state);
    increment_backedge_counter(info, x->profiled_bci());
    CodeEmitInfo* safepoint_info = state_for(x, state);
    __ safepoint(safepoint_poll_register(), safepoint_info);
  }
  if (x->should_profile()) {
    ciMethod* method = x->profiled_method();
    assert(method != NULL, "method should be set if branch is profiled");
    ciMethodData* md = method->method_data_or_null();
    assert(md != NULL, "Sanity");
    ciProfileData* data = md->bci_to_data(x->profiled_bci());
    assert(data != NULL, "must have profiling data");
    int offset;
    if (x->direction() == Goto::taken) {
      assert(data->is_BranchData(), "need BranchData for two-way branches");
      offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
    } else if (x->direction() == Goto::not_taken) {
      assert(data->is_BranchData(), "need BranchData for two-way branches");
      offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
    } else {
      assert(data->is_JumpData(), "need JumpData for branches");
      offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
    }
    LIR_Opr md_reg = new_register(T_METADATA);
    __ metadata2reg(md->constant_encoding(), md_reg);
    increment_counter(new LIR_Address(md_reg, offset,
                                      NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
  }
  move_to_phi(x->state());
  __ jump(x->default_sux());
}
ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
                                    Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
                                    ciKlass* callee_signature_k) {
  ciKlass* result = NULL;
  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
  if (!do_null && !do_update) {
    return result;
  }
  ciKlass* exact_klass = NULL;
  Compilation* comp = Compilation::current();
  if (do_update) {
    ciType* type = obj->exact_type();
    if (type == NULL) {
      type = obj->declared_type();
      type = comp->cha_exact_type(type);
    }
    assert(type == NULL || type->is_klass(), "type should be class");
    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
  }
  if (!do_null && !do_update) {
    return result;
  }
  ciKlass* exact_signature_k = NULL;
  if (do_update) {
    exact_signature_k = signature_at_call_k->exact_klass();
    if (exact_signature_k == NULL) {
      exact_signature_k = comp->cha_exact_type(signature_at_call_k);
    } else {
      result = exact_signature_k;
      profiled_k = ciTypeEntries::with_status(result, profiled_k);
    }
    if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
      exact_klass = exact_signature_k;
    }
    if (callee_signature_k != NULL &&
        callee_signature_k != signature_at_call_k) {
      ciKlass* improved_klass = callee_signature_k->exact_klass();
      if (improved_klass == NULL) {
        improved_klass = comp->cha_exact_type(callee_signature_k);
      }
      if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
        exact_klass = exact_signature_k;
      }
    }
    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
  }
  if (!do_null && !do_update) {
    return result;
  }
  if (mdp == LIR_OprFact::illegalOpr) {
    mdp = new_register(T_METADATA);
    __ metadata2reg(md->constant_encoding(), mdp);
    if (md_base_offset != 0) {
      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
      mdp = new_pointer_register();
      __ leal(LIR_OprFact::address(base_type_address), mdp);
    }
  }
  LIRItem value(obj, this);
  value.load_item();
  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
  return result;
}
void LIRGenerator::profile_parameters(Base* x) {
  if (compilation()->profile_parameters()) {
    CallingConvention* args = compilation()->frame_map()->incoming_arguments();
    ciMethodData* md = scope()->method()->method_data_or_null();
    assert(md != NULL, "Sanity");
    if (md->parameters_type_data() != NULL) {
      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
      ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
      LIR_Opr mdp = LIR_OprFact::illegalOpr;
      for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
        LIR_Opr src = args->at(i);
        assert(!src->is_illegal(), "check");
        BasicType t = src->type();
        if (t == T_OBJECT || t == T_ARRAY) {
          intptr_t profiled_k = parameters->type(j);
          Local* local = x->state()->local_at(java_index)->as_Local();
          ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
                                        in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
                                        profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
          if (exact != NULL) {
            md->set_parameter_type(j, exact);
          }
          j++;
        }
        java_index += type2size[t];
      }
    }
  }
}
void LIRGenerator::do_Base(Base* x) {
  __ std_entry(LIR_OprFact::illegalOpr);
  CallingConvention* args = compilation()->frame_map()->incoming_arguments();
  IRScope* irScope = compilation()->hir()->top_scope();
  int java_index = 0;
  for (int i = 0; i < args->length(); i++) {
    LIR_Opr src = args->at(i);
    assert(!src->is_illegal(), "check");
    BasicType t = src->type();
    switch (t) {
    case T_BYTE:
    case T_BOOLEAN:
    case T_SHORT:
    case T_CHAR:
      t = T_INT;
      break;
    }
    LIR_Opr dest = new_register(t);
    __ move(src, dest);
    Local* local = x->state()->local_at(java_index)->as_Local();
    assert(local != NULL, "Locals for incoming arguments must have been created");
#ifndef __SOFTFP__
    assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
#endif // __SOFTFP__
    local->set_operand(dest);
    _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
    java_index += type2size[t];
  }
  if (compilation()->env()->dtrace_method_probes()) {
    BasicTypeList signature;
    signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
    signature.append(T_METADATA); // Method*
    LIR_OprList* args = new LIR_OprList();
    args->append(getThreadPointer());
    LIR_Opr meth = new_register(T_METADATA);
    __ metadata2reg(method()->constant_encoding(), meth);
    args->append(meth);
    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
  }
  if (method()->is_synchronized()) {
    LIR_Opr obj;
    if (method()->is_static()) {
      obj = new_register(T_OBJECT);
      __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
    } else {
      Local* receiver = x->state()->local_at(0)->as_Local();
      assert(receiver != NULL, "must already exist");
      obj = receiver->operand();
    }
    assert(obj->is_valid(), "must be valid");
    if (method()->is_synchronized() && GenerateSynchronizationCode) {
      LIR_Opr lock = new_register(T_INT);
      __ load_stack_address_monitor(0, lock);
      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
      CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
      __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
    }
  }
  if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
    profile_parameters(x);
    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
    increment_invocation_counter(info);
  }
  __ jump(x->default_sux());
}
void LIRGenerator::do_OsrEntry(OsrEntry* x) {
  __ osr_entry(LIR_Assembler::osrBufferPointer());
  LIR_Opr result = rlock_result(x);
  __ move(LIR_Assembler::osrBufferPointer(), result);
}
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
  assert(args->length() == arg_list->length(),
         err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
  for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
    LIRItem* param = args->at(i);
    LIR_Opr loc = arg_list->at(i);
    if (loc->is_register()) {
      param->load_item_force(loc);
    } else {
      LIR_Address* addr = loc->as_address_ptr();
      param->load_for_store(addr->type());
      if (addr->type() == T_OBJECT) {
        __ move_wide(param->result(), addr);
      } else
        if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
          __ unaligned_move(param->result(), addr);
        } else {
          __ move(param->result(), addr);
        }
    }
  }
  if (x->has_receiver()) {
    LIRItem* receiver = args->at(0);
    LIR_Opr loc = arg_list->at(0);
    if (loc->is_register()) {
      receiver->load_item_force(loc);
    } else {
      assert(loc->is_address(), "just checking");
      receiver->load_for_store(T_OBJECT);
      __ move_wide(receiver->result(), loc->as_address_ptr());
    }
  }
}
LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
  LIRItemList* argument_items = new LIRItemList();
  if (x->has_receiver()) {
    LIRItem* receiver = new LIRItem(x->receiver(), this);
    argument_items->append(receiver);
  }
  for (int i = 0; i < x->number_of_arguments(); i++) {
    LIRItem* param = new LIRItem(x->argument_at(i), this);
    argument_items->append(param);
  }
  return argument_items;
}
void LIRGenerator::do_Invoke(Invoke* x) {
  CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
  LIR_OprList* arg_list = cc->args();
  LIRItemList* args = invoke_visit_arguments(x);
  LIR_Opr receiver = LIR_OprFact::illegalOpr;
  LIR_Opr result_register = LIR_OprFact::illegalOpr;
  if (x->type() != voidType) {
    result_register = result_register_for(x->type());
  }
  CodeEmitInfo* info = state_for(x, x->state());
  invoke_load_arguments(x, args, arg_list);
  if (x->has_receiver()) {
    args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
    receiver = args->at(0)->result();
  }
  bool optimized = x->target_is_loaded() && x->target_is_final();
  assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
  ciMethod* target = x->target();
  bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
                                  target->is_method_handle_intrinsic() ||
                                  target->is_compiled_lambda_form());
  if (is_method_handle_invoke) {
    info->set_is_method_handle_invoke(true);
    if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
        __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
    }
  }
  switch (x->code()) {
    case Bytecodes::_invokestatic:
      __ call_static(target, result_register,
                     SharedRuntime::get_resolve_static_call_stub(),
                     arg_list, info);
      break;
    case Bytecodes::_invokespecial:
    case Bytecodes::_invokevirtual:
    case Bytecodes::_invokeinterface:
      if (x->code() == Bytecodes::_invokespecial || optimized) {
        __ call_opt_virtual(target, receiver, result_register,
                            SharedRuntime::get_resolve_opt_virtual_call_stub(),
                            arg_list, info);
      } else if (x->vtable_index() < 0) {
        __ call_icvirtual(target, receiver, result_register,
                          SharedRuntime::get_resolve_virtual_call_stub(),
                          arg_list, info);
      } else {
        int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
        int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
        __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
      }
      break;
    case Bytecodes::_invokedynamic: {
      __ call_dynamic(target, receiver, result_register,
                      SharedRuntime::get_resolve_static_call_stub(),
                      arg_list, info);
      break;
    }
    default:
      fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
      break;
  }
  if (is_method_handle_invoke
      && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
    __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
  }
  if (x->type()->is_float() || x->type()->is_double()) {
    if (method()->is_strict()) {
      if (!x->target_is_loaded() || !x->target_is_strictfp()) {
        result_register = round_item(result_register);
      }
    }
  }
  if (result_register->is_valid()) {
    LIR_Opr result = rlock_result(x);
    __ move(result_register, result);
  }
}
void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
  assert(x->number_of_arguments() == 1, "wrong type");
  LIRItem value       (x->argument_at(0), this);
  LIR_Opr reg = rlock_result(x);
  value.load_item();
  LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
  __ move(tmp, reg);
}
void LIRGenerator::do_IfOp(IfOp* x) {
#ifdef ASSERT
  {
    ValueTag xtag = x->x()->type()->tag();
    ValueTag ttag = x->tval()->type()->tag();
    assert(xtag == intTag || xtag == objectTag, "cannot handle others");
    assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
    assert(ttag == x->fval()->type()->tag(), "cannot handle others");
  }
#endif
  LIRItem left(x->x(), this);
  LIRItem right(x->y(), this);
  left.load_item();
  if (can_inline_as_constant(right.value())) {
    right.dont_load_item();
  } else {
    right.load_item();
  }
  LIRItem t_val(x->tval(), this);
  LIRItem f_val(x->fval(), this);
  t_val.dont_load_item();
  f_val.dont_load_item();
  LIR_Opr reg = rlock_result(x);
  __ cmp(lir_cond(x->cond()), left.result(), right.result());
  __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
}
#ifdef JFR_HAVE_INTRINSICS
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
  CodeEmitInfo* info = state_for(x);
  CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
  assert(info != NULL, "must have info");
  LIRItem arg(x->argument_at(0), this);
  arg.load_item();
  LIR_Opr klass = new_register(T_METADATA);
  __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
  LIR_Opr id = new_register(T_LONG);
  ByteSize offset = KLASS_TRACE_ID_OFFSET;
  LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
  __ move(trace_id_addr, id);
  __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
  __ store(id, trace_id_addr);
#ifdef TRACE_ID_META_BITS
  __ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
#endif
#ifdef TRACE_ID_SHIFT
  __ unsigned_shift_right(id, TRACE_ID_SHIFT, id);
#endif
  __ move(id, rlock_result(x));
}
void LIRGenerator::do_getEventWriter(Intrinsic* x) {
  LabelObj* L_end = new LabelObj();
  LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
                                           in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
                                           T_OBJECT);
  LIR_Opr result = rlock_result(x);
  __ move_wide(jobj_addr, result);
  __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
  __ branch(lir_cond_equal, T_OBJECT, L_end->label());
  __ move_wide(new LIR_Address(result, T_OBJECT), result);
  __ branch_destination(L_end->label());
}
#endif
void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
    assert(x->number_of_arguments() == expected_arguments, "wrong type");
    LIR_Opr reg = result_register_for(x->type());
    __ call_runtime_leaf(routine, getThreadTemp(),
                         reg, new LIR_OprList());
    LIR_Opr result = rlock_result(x);
    __ move(reg, result);
}
#ifdef TRACE_HAVE_INTRINSICS
void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
    LIR_Opr thread = getThreadPointer();
    LIR_Opr osthread = new_pointer_register();
    __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
    size_t thread_id_size = OSThread::thread_id_size();
    if (thread_id_size == (size_t) BytesPerLong) {
      LIR_Opr id = new_register(T_LONG);
      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
      __ convert(Bytecodes::_l2i, id, rlock_result(x));
    } else if (thread_id_size == (size_t) BytesPerInt) {
      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
    } else {
      ShouldNotReachHere();
    }
}
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
    CodeEmitInfo* info = state_for(x);
    CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
    BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
    assert(info != NULL, "must have info");
    LIRItem arg(x->argument_at(1), this);
    arg.load_item();
    LIR_Opr klass = new_pointer_register();
    __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
    LIR_Opr id = new_register(T_LONG);
    ByteSize offset = TRACE_ID_OFFSET;
    LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
    __ move(trace_id_addr, id);
    __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
    __ store(id, trace_id_addr);
    __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
    __ move(id, rlock_result(x));
}
#endif
void LIRGenerator::do_Intrinsic(Intrinsic* x) {
  switch (x->id()) {
  case vmIntrinsics::_intBitsToFloat      :
  case vmIntrinsics::_doubleToRawLongBits :
  case vmIntrinsics::_longBitsToDouble    :
  case vmIntrinsics::_floatToRawIntBits   : {
    do_FPIntrinsics(x);
    break;
  }
#ifdef JFR_HAVE_INTRINSICS
  case vmIntrinsics::_getClassId:
    do_ClassIDIntrinsic(x);
    break;
  case vmIntrinsics::_getEventWriter:
    do_getEventWriter(x);
    break;
  case vmIntrinsics::_counterTime:
    do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), 0, x);
    break;
#endif
  case vmIntrinsics::_currentTimeMillis:
    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
    break;
  case vmIntrinsics::_nanoTime:
    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
    break;
  case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
  case vmIntrinsics::_isInstance:     do_isInstance(x);    break;
  case vmIntrinsics::_getClass:       do_getClass(x);      break;
  case vmIntrinsics::_currentThread:  do_currentThread(x); break;
  case vmIntrinsics::_dlog:           // fall through
  case vmIntrinsics::_dlog10:         // fall through
  case vmIntrinsics::_dabs:           // fall through
  case vmIntrinsics::_dsqrt:          // fall through
  case vmIntrinsics::_dtan:           // fall through
  case vmIntrinsics::_dsin :          // fall through
  case vmIntrinsics::_dcos :          // fall through
  case vmIntrinsics::_dexp :          // fall through
  case vmIntrinsics::_dpow :          do_MathIntrinsic(x); break;
  case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
  case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
  case vmIntrinsics::_compareAndSwapObject:
    do_CompareAndSwap(x, objectType);
    break;
  case vmIntrinsics::_compareAndSwapInt:
    do_CompareAndSwap(x, intType);
    break;
  case vmIntrinsics::_compareAndSwapLong:
    do_CompareAndSwap(x, longType);
    break;
  case vmIntrinsics::_loadFence :
    if (os::is_MP()) __ membar_acquire();
    break;
  case vmIntrinsics::_storeFence:
    if (os::is_MP()) __ membar_release();
    break;
  case vmIntrinsics::_fullFence :
    if (os::is_MP()) __ membar();
    break;
  case vmIntrinsics::_Reference_get:
    do_Reference_get(x);
    break;
  case vmIntrinsics::_updateCRC32:
  case vmIntrinsics::_updateBytesCRC32:
  case vmIntrinsics::_updateByteBufferCRC32:
    do_update_CRC32(x);
    break;
  default: ShouldNotReachHere(); break;
  }
}
void LIRGenerator::profile_arguments(ProfileCall* x) {
  if (compilation()->profile_arguments()) {
    int bci = x->bci_of_invoke();
    ciMethodData* md = x->method()->method_data_or_null();
    ciProfileData* data = md->bci_to_data(bci);
    if (data != NULL) {
      if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
          (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
        ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
        int base_offset = md->byte_offset_of_slot(data, extra);
        LIR_Opr mdp = LIR_OprFact::illegalOpr;
        ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
        Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
        int start = 0;
        int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
        if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
          assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
          start = 1;
        }
        ciSignature* callee_signature = x->callee()->signature();
        bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
        ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
        bool ignored_will_link;
        ciSignature* signature_at_call = NULL;
        x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
        ciSignatureStream signature_at_call_stream(signature_at_call);
        for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
          int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
          ciKlass* exact = profile_type(md, base_offset, off,
              args->type(i), x->profiled_arg_at(i+start), mdp,
              !x->arg_needs_null_check(i+start),
              signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
          if (exact != NULL) {
            md->set_argument_type(bci, i, exact);
          }
        }
      } else {
#ifdef ASSERT
        Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
        int n = x->nb_profiled_args();
        assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
            (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
            "only at JSR292 bytecodes");
#endif
      }
    }
  }
}
void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
  if (compilation()->profile_parameters() && x->inlined()) {
    ciMethodData* md = x->callee()->method_data_or_null();
    if (md != NULL) {
      ciParametersTypeData* parameters_type_data = md->parameters_type_data();
      if (parameters_type_data != NULL) {
        ciTypeStackSlotEntries* parameters =  parameters_type_data->parameters();
        LIR_Opr mdp = LIR_OprFact::illegalOpr;
        bool has_receiver = !x->callee()->is_static();
        ciSignature* sig = x->callee()->signature();
        ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
        int i = 0; // to iterate on the Instructions
        Value arg = x->recv();
        bool not_null = false;
        int bci = x->bci_of_invoke();
        Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
        if (arg == NULL || !Bytecodes::has_receiver(bc)) {
          i = 1;
          arg = x->profiled_arg_at(0);
          not_null = !x->arg_needs_null_check(0);
        }
        int k = 0; // to iterate on the profile data
        for (;;) {
          intptr_t profiled_k = parameters->type(k);
          ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
                                        in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
                                        profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
          if (exact != NULL) {
            md->set_parameter_type(k, exact);
          }
          k++;
          if (k >= parameters_type_data->number_of_parameters()) {
#ifdef ASSERT
            int extra = 0;
            if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
                x->nb_profiled_args() >= TypeProfileParmsLimit &&
                x->recv() != NULL && Bytecodes::has_receiver(bc)) {
              extra += 1;
            }
            assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
#endif
            break;
          }
          arg = x->profiled_arg_at(i);
          not_null = !x->arg_needs_null_check(i);
          i++;
        }
      }
    }
  }
}
void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  LIR_Opr recv = LIR_OprFact::illegalOpr;
  LIR_Opr mdo = new_register(T_METADATA);
  LIR_Opr tmp = new_pointer_register();
  if (x->nb_profiled_args() > 0) {
    profile_arguments(x);
  }
  if (x->recv() != NULL || x->nb_profiled_args() > 0) {
    profile_parameters_at_call(x);
  }
  if (x->recv() != NULL) {
    LIRItem value(x->recv(), this);
    value.load_item();
    recv = new_register(T_OBJECT);
    __ move(value.result(), recv);
  }
  __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
}
void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
  int bci = x->bci_of_invoke();
  ciMethodData* md = x->method()->method_data_or_null();
  ciProfileData* data = md->bci_to_data(bci);
  if (data != NULL) {
    assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
    ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
    LIR_Opr mdp = LIR_OprFact::illegalOpr;
    bool ignored_will_link;
    ciSignature* signature_at_call = NULL;
    x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
    ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
        ret->type(), x->ret(), mdp,
        !x->needs_null_check(),
        signature_at_call->return_type()->as_klass(),
        x->callee()->signature()->return_type()->as_klass());
    if (exact != NULL) {
      md->set_return_type(bci, exact);
    }
  }
}
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  if (!x->inlinee()->is_accessor()) {
    CodeEmitInfo* info = state_for(x, x->state(), true);
    increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
  }
}
void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
  int freq_log = 0;
  int level = compilation()->env()->comp_level();
  if (level == CompLevel_limited_profile) {
    freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
  } else if (level == CompLevel_full_profile) {
    freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
  } else {
    ShouldNotReachHere();
  }
  increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
}
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
                                                ciMethod *method, int frequency,
                                                int bci, bool backedge, bool notify) {
  assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
  int level = _compilation->env()->comp_level();
  assert(level > CompLevel_simple, "Shouldn't be here");
  int offset = -1;
  LIR_Opr counter_holder = NULL;
  if (level == CompLevel_limited_profile) {
    MethodCounters* counters_adr = method->ensure_method_counters();
    if (counters_adr == NULL) {
      bailout("method counters allocation failed");
      return;
    }
    counter_holder = new_pointer_register();
    __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
    offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
                                 MethodCounters::invocation_counter_offset());
  } else if (level == CompLevel_full_profile) {
    counter_holder = new_register(T_METADATA);
    offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
                                 MethodData::invocation_counter_offset());
    ciMethodData* md = method->method_data_or_null();
    assert(md != NULL, "Sanity");
    __ metadata2reg(md->constant_encoding(), counter_holder);
  } else {
    ShouldNotReachHere();
  }
  LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
  LIR_Opr result = new_register(T_INT);
  __ load(counter, result);
  __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
  __ store(result, counter);
  if (notify) {
    LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
    LIR_Opr meth = new_register(T_METADATA);
    __ metadata2reg(method->constant_encoding(), meth);
    __ logical_and(result, mask, result);
    __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
    CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
    __ branch(lir_cond_equal, T_INT, overflow);
    __ branch_destination(overflow->continuation());
  }
}
void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
  LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
  BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
  if (x->pass_thread()) {
    signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
    args->append(getThreadPointer());
  }
  for (int i = 0; i < x->number_of_arguments(); i++) {
    Value a = x->argument_at(i);
    LIRItem* item = new LIRItem(a, this);
    item->load_item();
    args->append(item->result());
    signature->append(as_BasicType(a->type()));
  }
  LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
  if (x->type() == voidType) {
    set_no_result(x);
  } else {
    __ move(result, rlock_result(x));
  }
}
#ifdef ASSERT
void LIRGenerator::do_Assert(Assert *x) {
  ValueTag tag = x->x()->type()->tag();
  If::Condition cond = x->cond();
  LIRItem xitem(x->x(), this);
  LIRItem yitem(x->y(), this);
  LIRItem* xin = &xitem;
  LIRItem* yin = &yitem;
  assert(tag == intTag, "Only integer assertions are valid!");
  xin->load_item();
  yin->dont_load_item();
  set_no_result(x);
  LIR_Opr left = xin->result();
  LIR_Opr right = yin->result();
  __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
}
#endif
void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
  Instruction *a = x->x();
  Instruction *b = x->y();
  if (!a || StressRangeCheckElimination) {
    assert(!b || StressRangeCheckElimination, "B must also be null");
    CodeEmitInfo *info = state_for(x, x->state());
    CodeStub* stub = new PredicateFailedStub(info);
    __ jump(stub);
  } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
    int a_int = a->type()->as_IntConstant()->value();
    int b_int = b->type()->as_IntConstant()->value();
    bool ok = false;
    switch(x->cond()) {
      case Instruction::eql: ok = (a_int == b_int); break;
      case Instruction::neq: ok = (a_int != b_int); break;
      case Instruction::lss: ok = (a_int < b_int); break;
      case Instruction::leq: ok = (a_int <= b_int); break;
      case Instruction::gtr: ok = (a_int > b_int); break;
      case Instruction::geq: ok = (a_int >= b_int); break;
      case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
      case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
      default: ShouldNotReachHere();
    }
    if (ok) {
      CodeEmitInfo *info = state_for(x, x->state());
      CodeStub* stub = new PredicateFailedStub(info);
      __ jump(stub);
    }
  } else {
    ValueTag tag = x->x()->type()->tag();
    If::Condition cond = x->cond();
    LIRItem xitem(x->x(), this);
    LIRItem yitem(x->y(), this);
    LIRItem* xin = &xitem;
    LIRItem* yin = &yitem;
    assert(tag == intTag, "Only integer deoptimizations are valid!");
    xin->load_item();
    yin->dont_load_item();
    set_no_result(x);
    LIR_Opr left = xin->result();
    LIR_Opr right = yin->result();
    CodeEmitInfo *info = state_for(x, x->state());
    CodeStub* stub = new PredicateFailedStub(info);
    __ cmp(lir_cond(cond), left, right);
    __ branch(lir_cond(cond), right->type(), stub);
  }
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
  LIRItemList args(1);
  LIRItem value(arg1, this);
  args.append(&value);
  BasicTypeList signature;
  signature.append(as_BasicType(arg1->type()));
  return call_runtime(&signature, &args, entry, result_type, info);
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
  LIRItemList args(2);
  LIRItem value1(arg1, this);
  LIRItem value2(arg2, this);
  args.append(&value1);
  args.append(&value2);
  BasicTypeList signature;
  signature.append(as_BasicType(arg1->type()));
  signature.append(as_BasicType(arg2->type()));
  return call_runtime(&signature, &args, entry, result_type, info);
}
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  LIR_Opr result = LIR_OprFact::illegalOpr;
  if (result_type->tag() != voidTag) {
    result = new_register(result_type);
    phys_reg = result_register_for(result_type);
  }
  CallingConvention* cc = frame_map()->c_calling_convention(signature);
  assert(cc->length() == args->length(), "argument mismatch");
  for (int i = 0; i < args->length(); i++) {
    LIR_Opr arg = args->at(i);
    LIR_Opr loc = cc->at(i);
    if (loc->is_register()) {
      __ move(arg, loc);
    } else {
      LIR_Address* addr = loc->as_address_ptr();
      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
        __ unaligned_move(arg, addr);
      } else {
        __ move(arg, addr);
      }
    }
  }
  if (info) {
    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  } else {
    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  }
  if (result->is_valid()) {
    __ move(phys_reg, result);
  }
  return result;
}
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  LIR_Opr result = LIR_OprFact::illegalOpr;
  if (result_type->tag() != voidTag) {
    result = new_register(result_type);
    phys_reg = result_register_for(result_type);
  }
  CallingConvention* cc = frame_map()->c_calling_convention(signature);
  assert(cc->length() == args->length(), "argument mismatch");
  for (int i = 0; i < args->length(); i++) {
    LIRItem* arg = args->at(i);
    LIR_Opr loc = cc->at(i);
    if (loc->is_register()) {
      arg->load_item_force(loc);
    } else {
      LIR_Address* addr = loc->as_address_ptr();
      arg->load_for_store(addr->type());
      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
        __ unaligned_move(arg->result(), addr);
      } else {
        __ move(arg->result(), addr);
      }
    }
  }
  if (info) {
    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  } else {
    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  }
  if (result->is_valid()) {
    __ move(phys_reg, result);
  }
  return result;
}
void LIRGenerator::do_MemBar(MemBar* x) {
  if (os::is_MP()) {
    LIR_Code code = x->code();
    switch(code) {
      case lir_membar_acquire   : __ membar_acquire(); break;
      case lir_membar_release   : __ membar_release(); break;
      case lir_membar           : __ membar(); break;
      case lir_membar_loadload  : __ membar_loadload(); break;
      case lir_membar_storestore: __ membar_storestore(); break;
      case lir_membar_loadstore : __ membar_loadstore(); break;
      case lir_membar_storeload : __ membar_storeload(); break;
      default                   : ShouldNotReachHere(); break;
    }
  }
}
LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
  if (x->check_boolean()) {
    LIR_Opr value_fixed = rlock_byte(T_BYTE);
    if (TwoOperandLIRForm) {
      __ move(value, value_fixed);
      __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
    } else {
      __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
    }
    LIR_Opr klass = new_register(T_METADATA);
    __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
    null_check_info = NULL;
    LIR_Opr layout = new_register(T_INT);
    __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
    int diffbit = Klass::layout_helper_boolean_diffbit();
    __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
    __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
    __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
    value = value_fixed;
  }
  return value;
}
C:\hotspot-69087d08d473\src\share\vm/c1/c1_LIRGenerator.hpp
#ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
#define SHARE_VM_C1_C1_LIRGENERATOR_HPP
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIR.hpp"
#include "ci/ciMethodData.hpp"
#include "jfr/support/jfrIntrinsics.hpp"
#include "utilities/sizes.hpp"
class LIRGenerator;
class LIREmitter;
class Invoke;
class SwitchRange;
class LIRItem;
define_array(LIRItemArray, LIRItem*)
define_stack(LIRItemList, LIRItemArray)
class SwitchRange: public CompilationResourceObj {
 private:
  int _low_key;
  int _high_key;
  BlockBegin* _sux;
 public:
  SwitchRange(int start_key, BlockBegin* sux): _low_key(start_key), _high_key(start_key), _sux(sux) {}
  void set_high_key(int key) { _high_key = key; }
  int high_key() const { return _high_key; }
  int low_key() const { return _low_key; }
  BlockBegin* sux() const { return _sux; }
};
define_array(SwitchRangeArray, SwitchRange*)
define_stack(SwitchRangeList, SwitchRangeArray)
class ResolveNode;
define_array(NodeArray, ResolveNode*);
define_stack(NodeList, NodeArray);
class ResolveNode: public CompilationResourceObj {
 private:
  LIR_Opr    _operand;       // the source or destinaton
  NodeList   _destinations;  // for the operand
  bool       _assigned;      // Value assigned to this Node?
  bool       _visited;       // Node already visited?
  bool       _start_node;    // Start node already visited?
 public:
  ResolveNode(LIR_Opr operand)
    : _operand(operand)
    , _assigned(false)
    , _visited(false)
    , _start_node(false) {};
  LIR_Opr operand() const           { return _operand; }
  int no_of_destinations() const    { return _destinations.length(); }
  ResolveNode* destination_at(int i)     { return _destinations[i]; }
  bool assigned() const             { return _assigned; }
  bool visited() const              { return _visited; }
  bool start_node() const           { return _start_node; }
  void append(ResolveNode* dest)         { _destinations.append(dest); }
  void set_assigned()               { _assigned = true; }
  void set_visited()                { _visited = true; }
  void set_start_node()             { _start_node = true; }
};
class PhiResolverState: public CompilationResourceObj {
  friend class PhiResolver;
 private:
  NodeList _virtual_operands; // Nodes where the operand is a virtual register
  NodeList _other_operands;   // Nodes where the operand is not a virtual register
  NodeList _vreg_table;       // Mapping from virtual register to Node
 public:
  PhiResolverState() {}
  void reset(int max_vregs);
};
class PhiResolver: public CompilationResourceObj {
 private:
  LIRGenerator*     _gen;
  PhiResolverState& _state; // temporary state cached by LIRGenerator
  ResolveNode*   _loop;
  LIR_Opr _temp;
  NodeList& virtual_operands() { return _state._virtual_operands; }
  NodeList& other_operands()   { return _state._other_operands;   }
  NodeList& vreg_table()       { return _state._vreg_table;       }
  ResolveNode* create_node(LIR_Opr opr, bool source);
  ResolveNode* source_node(LIR_Opr opr)      { return create_node(opr, true); }
  ResolveNode* destination_node(LIR_Opr opr) { return create_node(opr, false); }
  void emit_move(LIR_Opr src, LIR_Opr dest);
  void move_to_temp(LIR_Opr src);
  void move_temp_to(LIR_Opr dest);
  void move(ResolveNode* src, ResolveNode* dest);
  LIRGenerator* gen() {
    return _gen;
  }
 public:
  PhiResolver(LIRGenerator* _lir_gen, int max_vregs);
  ~PhiResolver();
  void move(LIR_Opr src, LIR_Opr dest);
};
class LIRGenerator: public InstructionVisitor, public BlockClosure {
 private:
  Compilation*  _compilation;
  ciMethod*     _method;    // method that we are compiling
  PhiResolverState  _resolver_state;
  BlockBegin*   _block;
  int           _virtual_register_number;
  Values        _instruction_for_operand;
  BitMap2D      _vreg_flags; // flags which can be set on a per-vreg basis
  LIR_List*     _lir;
  BarrierSet*   _bs;
  LIRGenerator* gen() {
    return this;
  }
  void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
#ifdef ASSERT
  LIR_List* lir(const char * file, int line) const {
    _lir->set_file_and_line(file, line);
    return _lir;
  }
#endif
  LIR_List* lir() const {
    return _lir;
  }
  GrowableArray<LIR_Const*>       _constants;
  LIR_OprList                     _reg_for_constants;
  Values                          _unpinned_constants;
  friend class PhiResolver;
  void bailout(const char* msg) const            { compilation()->bailout(msg); }
  bool bailed_out() const                        { return compilation()->bailed_out(); }
  void block_do_prolog(BlockBegin* block);
  void block_do_epilog(BlockBegin* block);
  LIR_Opr rlock(Value instr);                      // lock a free register
  LIR_Opr rlock_result(Value instr);
  LIR_Opr rlock_result(Value instr, BasicType type);
  LIR_Opr rlock_byte(BasicType type);
  LIR_Opr rlock_callee_saved(BasicType type);
  LIR_Opr load_constant(Constant* x);
  LIR_Opr load_constant(LIR_Const* constant);
  LIR_Opr load_immediate(int x, BasicType type);
  void  set_result(Value x, LIR_Opr opr)           {
    assert(opr->is_valid(), "must set to valid value");
    assert(x->operand()->is_illegal(), "operand should never change");
    assert(!opr->is_register() || opr->is_virtual(), "should never set result to a physical register");
    x->set_operand(opr);
    assert(opr == x->operand(), "must be");
    if (opr->is_virtual()) {
      _instruction_for_operand.at_put_grow(opr->vreg_number(), x, NULL);
    }
  }
  void  set_no_result(Value x)                     { assert(!x->has_uses(), "can't have use"); x->clear_operand(); }
  friend class LIRItem;
  LIR_Opr round_item(LIR_Opr opr);
  LIR_Opr force_to_spill(LIR_Opr value, BasicType t);
  PhiResolverState& resolver_state() { return _resolver_state; }
  void  move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
  void  move_to_phi(ValueStack* cur_state);
  void do_ArithmeticOp_Long   (ArithmeticOp*    x);
  void do_ArithmeticOp_Int    (ArithmeticOp*    x);
  void do_ArithmeticOp_FPU    (ArithmeticOp*    x);
  LIR_Opr getThreadPointer();
  void do_RegisterFinalizer(Intrinsic* x);
  void do_isInstance(Intrinsic* x);
  void do_getClass(Intrinsic* x);
  void do_currentThread(Intrinsic* x);
  void do_MathIntrinsic(Intrinsic* x);
  void do_ArrayCopy(Intrinsic* x);
  void do_CompareAndSwap(Intrinsic* x, ValueType* type);
  void do_NIOCheckIndex(Intrinsic* x);
  void do_FPIntrinsics(Intrinsic* x);
  void do_Reference_get(Intrinsic* x);
  void do_update_CRC32(Intrinsic* x);
  void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store);
  LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
  LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
  LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
  LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
  void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
  void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
  void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
                                         bool do_load, bool patch, CodeEmitInfo* info);
  void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
  void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
  void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
#endif
  static LIR_Opr result_register_for(ValueType* type, bool callee = false);
  ciObject* get_jobject_constant(Value value);
  LIRItemList* invoke_visit_arguments(Invoke* x);
  void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
  void trace_block_entry(BlockBegin* block);
  void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
  void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
  void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
  void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
  void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
  void increment_counter(address counter, BasicType type, int step = 1);
  void increment_counter(LIR_Address* addr, int step = 1);
  void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
  bool strength_reduce_multiply(LIR_Opr left, jint constant, LIR_Opr result, LIR_Opr tmp);
  void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes);
  void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false);
  void array_range_check          (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info);
  void nio_range_check            (LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info);
  void arithmetic_op_int  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp);
  void arithmetic_op_long (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL);
  void arithmetic_op_fpu  (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp = LIR_OprFact::illegalOpr);
  void shift_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr value, LIR_Opr count, LIR_Opr tmp);
  void logic_op   (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right);
  void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info);
  void monitor_exit  (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no);
  void new_instance    (LIR_Opr  dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr  scratch1, LIR_Opr  scratch2, LIR_Opr  scratch3,  LIR_Opr scratch4, LIR_Opr  klass_reg, CodeEmitInfo* info);
  void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info);
  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info);
  void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type);
  LIR_Address* generate_address(LIR_Opr base,
                                LIR_Opr index, int shift,
                                int disp,
                                BasicType type);
  LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) {
    return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
  }
  LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
  void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
  bool can_inline_as_constant(Value i) const;
  bool can_inline_as_constant(LIR_Const* c) const;
  bool can_store_as_constant(Value i, BasicType type) const;
  LIR_Opr safepoint_poll_register();
  void profile_branch(If* if_instr, If::Condition cond);
  void increment_event_counter_impl(CodeEmitInfo* info,
                                    ciMethod *method, int frequency,
                                    int bci, bool backedge, bool notify);
  void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
  void increment_invocation_counter(CodeEmitInfo *info) {
    if (compilation()->count_invocations()) {
      increment_event_counter(info, InvocationEntryBci, false);
    }
  }
  void increment_backedge_counter(CodeEmitInfo* info, int bci) {
    if (compilation()->count_backedges()) {
      increment_event_counter(info, bci, true);
    }
  }
  CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
  CodeEmitInfo* state_for(Instruction* x);
  LIR_Opr operand_for_instruction(Instruction *x);
  void set_block(BlockBegin* block)              { _block = block; }
  void block_prolog(BlockBegin* block);
  void block_epilog(BlockBegin* block);
  void do_root (Instruction* instr);
  void walk    (Instruction* instr);
  void bind_block_entry(BlockBegin* block);
  void start_block(BlockBegin* block);
  LIR_Opr new_register(BasicType type);
  LIR_Opr new_register(Value value)              { return new_register(as_BasicType(value->type())); }
  LIR_Opr new_register(ValueType* type)          { return new_register(as_BasicType(type)); }
  LIR_Opr new_pointer_register() {
#ifdef _LP64
    return new_register(T_LONG);
#else
    return new_register(T_INT);
#endif
  }
  static LIR_Condition lir_cond(If::Condition cond) {
    LIR_Condition l = lir_cond_unknown;
    switch (cond) {
    case If::eql: l = lir_cond_equal;        break;
    case If::neq: l = lir_cond_notEqual;     break;
    case If::lss: l = lir_cond_less;         break;
    case If::leq: l = lir_cond_lessEqual;    break;
    case If::geq: l = lir_cond_greaterEqual; break;
    case If::gtr: l = lir_cond_greater;      break;
    case If::aeq: l = lir_cond_aboveEqual;   break;
    case If::beq: l = lir_cond_belowEqual;   break;
    default: fatal("You must pass valid If::Condition");
    };
    return l;
  }
#ifdef __SOFTFP__
  void do_soft_float_compare(If *x);
#endif // __SOFTFP__
  void init();
  SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
  SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
  void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
  void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x);
#ifdef JFR_HAVE_INTRINSICS
  void do_ClassIDIntrinsic(Intrinsic* x);
  void do_getEventWriter(Intrinsic* x);
#endif
  ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
                        Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
                        ciKlass* callee_signature_k);
  void profile_arguments(ProfileCall* x);
  void profile_parameters(Base* x);
  void profile_parameters_at_call(ProfileCall* x);
  LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
 public:
  Compilation*  compilation() const              { return _compilation; }
  FrameMap*     frame_map() const                { return _compilation->frame_map(); }
  ciMethod*     method() const                   { return _method; }
  BlockBegin*   block() const                    { return _block; }
  IRScope*      scope() const                    { return block()->scope(); }
  int max_virtual_register_number() const        { return _virtual_register_number; }
  void block_do(BlockBegin* block);
  enum VregFlag {
      must_start_in_memory = 0  // needs to be assigned a memory location at beginning, but may then be loaded in a register
    , callee_saved     = 1    // must be in a callee saved register
    , byte_reg         = 2    // must be in a byte register
    , num_vreg_flags
  };
  LIRGenerator(Compilation* compilation, ciMethod* method)
    : _compilation(compilation)
    , _method(method)
    , _virtual_register_number(LIR_OprDesc::vreg_base)
    , _vreg_flags(NULL, 0, num_vreg_flags) {
    init();
  }
  Instruction* instruction_for_opr(LIR_Opr opr);
  Instruction* instruction_for_vreg(int reg_num);
  void set_vreg_flag   (int vreg_num, VregFlag f);
  bool is_vreg_flag_set(int vreg_num, VregFlag f);
  void set_vreg_flag   (LIR_Opr opr,  VregFlag f) { set_vreg_flag(opr->vreg_number(), f); }
  bool is_vreg_flag_set(LIR_Opr opr,  VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); }
  static LIR_Opr exceptionOopOpr();
  static LIR_Opr exceptionPcOpr();
  static LIR_Opr divInOpr();
  static LIR_Opr divOutOpr();
  static LIR_Opr remOutOpr();
  static LIR_Opr shiftCountOpr();
  LIR_Opr syncTempOpr();
  LIR_Opr atomicLockOpr();
  LIR_Opr getThreadTemp();
  virtual void do_Phi            (Phi*             x);
  virtual void do_Local          (Local*           x);
  virtual void do_Constant       (Constant*        x);
  virtual void do_LoadField      (LoadField*       x);
  virtual void do_StoreField     (StoreField*      x);
  virtual void do_ArrayLength    (ArrayLength*     x);
  virtual void do_LoadIndexed    (LoadIndexed*     x);
  virtual void do_StoreIndexed   (StoreIndexed*    x);
  virtual void do_NegateOp       (NegateOp*        x);
  virtual void do_ArithmeticOp   (ArithmeticOp*    x);
  virtual void do_ShiftOp        (ShiftOp*         x);
  virtual void do_LogicOp        (LogicOp*         x);
  virtual void do_CompareOp      (CompareOp*       x);
  virtual void do_IfOp           (IfOp*            x);
  virtual void do_Convert        (Convert*         x);
  virtual void do_NullCheck      (NullCheck*       x);
  virtual void do_TypeCast       (TypeCast*        x);
  virtual void do_Invoke         (Invoke*          x);
  virtual void do_NewInstance    (NewInstance*     x);
  virtual void do_NewTypeArray   (NewTypeArray*    x);
  virtual void do_NewObjectArray (NewObjectArray*  x);
  virtual void do_NewMultiArray  (NewMultiArray*   x);
  virtual void do_CheckCast      (CheckCast*       x);
  virtual void do_InstanceOf     (InstanceOf*      x);
  virtual void do_MonitorEnter   (MonitorEnter*    x);
  virtual void do_MonitorExit    (MonitorExit*     x);
  virtual void do_Intrinsic      (Intrinsic*       x);
  virtual void do_BlockBegin     (BlockBegin*      x);
  virtual void do_Goto           (Goto*            x);
  virtual void do_If             (If*              x);
  virtual void do_IfInstanceOf   (IfInstanceOf*    x);
  virtual void do_TableSwitch    (TableSwitch*     x);
  virtual void do_LookupSwitch   (LookupSwitch*    x);
  virtual void do_Return         (Return*          x);
  virtual void do_Throw          (Throw*           x);
  virtual void do_Base           (Base*            x);
  virtual void do_OsrEntry       (OsrEntry*        x);
  virtual void do_ExceptionObject(ExceptionObject* x);
  virtual void do_RoundFP        (RoundFP*         x);
  virtual void do_UnsafeGetRaw   (UnsafeGetRaw*    x);
  virtual void do_UnsafePutRaw   (UnsafePutRaw*    x);
  virtual void do_UnsafeGetObject(UnsafeGetObject* x);
  virtual void do_UnsafePutObject(UnsafePutObject* x);
  virtual void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x);
  virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
  virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
  virtual void do_ProfileCall    (ProfileCall*     x);
  virtual void do_ProfileReturnType (ProfileReturnType* x);
  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
  virtual void do_RuntimeCall    (RuntimeCall*     x);
  virtual void do_MemBar         (MemBar*          x);
  virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
#ifdef ASSERT
  virtual void do_Assert         (Assert*          x);
#endif
#ifdef C1_LIRGENERATOR_MD_HPP
#include C1_LIRGENERATOR_MD_HPP
#endif
};
class LIRItem: public CompilationResourceObj {
 private:
  Value         _value;
  LIRGenerator* _gen;
  LIR_Opr       _result;
  bool          _destroys_register;
  LIR_Opr       _new_result;
  LIRGenerator* gen() const { return _gen; }
 public:
  LIRItem(Value value, LIRGenerator* gen) {
    _destroys_register = false;
    _gen = gen;
    set_instruction(value);
  }
  LIRItem(LIRGenerator* gen) {
    _destroys_register = false;
    _gen = gen;
    _result = LIR_OprFact::illegalOpr;
    set_instruction(NULL);
  }
  void set_instruction(Value value) {
    _value = value;
    _result = LIR_OprFact::illegalOpr;
    if (_value != NULL) {
      _gen->walk(_value);
      _result = _value->operand();
    }
    _new_result = LIR_OprFact::illegalOpr;
  }
  Value value() const          { return _value;          }
  ValueType* type() const      { return value()->type(); }
  LIR_Opr result()             {
    assert(!_destroys_register || (!_result->is_register() || _result->is_virtual()),
           "shouldn't use set_destroys_register with physical regsiters");
    if (_destroys_register && _result->is_register()) {
      if (_new_result->is_illegal()) {
        _new_result = _gen->new_register(type());
        gen()->lir()->move(_result, _new_result);
      }
      return _new_result;
    } else {
      return _result;
    }
    return _result;
  }
  void set_result(LIR_Opr opr);
  void load_item();
  void load_byte_item();
  void load_nonconstant();
  void load_for_store(BasicType store_type);
  void load_item_force(LIR_Opr reg);
  void dont_load_item() {
  }
  void set_destroys_register() {
    _destroys_register = true;
  }
  bool is_constant() const { return value()->as_Constant() != NULL; }
  bool is_stack()          { return result()->is_stack(); }
  bool is_register()       { return result()->is_register(); }
  ciObject* get_jobject_constant() const;
  jint      get_jint_constant() const;
  jlong     get_jlong_constant() const;
  jfloat    get_jfloat_constant() const;
  jdouble   get_jdouble_constant() const;
  jint      get_address_constant() const;
};
#endif // SHARE_VM_C1_C1_LIRGENERATOR_HPP
C:\hotspot-69087d08d473\src\share\vm/c1/c1_MacroAssembler.hpp
#ifndef SHARE_VM_C1_C1_MACROASSEMBLER_HPP
#define SHARE_VM_C1_C1_MACROASSEMBLER_HPP
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
class CodeEmitInfo;
class C1_MacroAssembler: public MacroAssembler {
 public:
  C1_MacroAssembler(CodeBuffer* code) : MacroAssembler(code) { pd_init(); }
  void explicit_null_check(Register base);
  void inline_cache_check(Register receiver, Register iCache);
  void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
  void remove_frame(int frame_size_in_bytes);
  void unverified_entry(Register receiver, Register ic_klass);
  void verified_entry();
  void verify_stack_oop(int offset) PRODUCT_RETURN;
  void verify_not_null_oop(Register r)  PRODUCT_RETURN;
#ifdef TARGET_ARCH_x86
# include "c1_MacroAssembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_MacroAssembler_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_MacroAssembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_MacroAssembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_MacroAssembler_ppc.hpp"
#endif
};
class StubAssembler: public C1_MacroAssembler {
 private:
  const char* _name;
  bool        _must_gc_arguments;
  int         _frame_size;
  int         _num_rt_args;
  int         _stub_id;
 public:
  StubAssembler(CodeBuffer* code, const char * name, int stub_id);
  void set_info(const char* name, bool must_gc_arguments);
  void set_frame_size(int size);
  void set_num_rt_args(int args);
  const char* name() const                       { return _name; }
  bool  must_gc_arguments() const                { return _must_gc_arguments; }
  int frame_size() const                         { return _frame_size; }
  int num_rt_args() const                        { return _num_rt_args; }
  int stub_id() const                            { return _stub_id; }
  int call_RT(Register oop_result1, Register metadata_result, address entry, int args_size = 0);
  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1);
  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2);
  int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
};
#endif // SHARE_VM_C1_C1_MACROASSEMBLER_HPP
C:\hotspot-69087d08d473\src\share\vm/c1/c1_Optimizer.cpp
#include "precompiled.hpp"
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_Optimizer.hpp"
#include "c1/c1_ValueMap.hpp"
#include "c1/c1_ValueSet.hpp"
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
#include "compiler/compileLog.hpp"
define_array(ValueSetArray, ValueSet*);
define_stack(ValueSetList, ValueSetArray);
Optimizer::Optimizer(IR* ir) {
  assert(ir->is_valid(), "IR must be valid");
  _ir = ir;
}
class CE_Eliminator: public BlockClosure {
 private:
  IR* _hir;
  int _cee_count;                                // the number of CEs successfully eliminated
  int _ifop_count;                               // the number of IfOps successfully simplified
  int _has_substitution;
 public:
  CE_Eliminator(IR* hir) : _cee_count(0), _ifop_count(0), _hir(hir) {
    _has_substitution = false;
    _hir->iterate_preorder(this);
    if (_has_substitution) {
      SubstitutionResolver sr(_hir);
    }
    CompileLog* log = _hir->compilation()->log();
    if (log != NULL)
      log->set_context("optimize name='cee'");
  }
  ~CE_Eliminator() {
    CompileLog* log = _hir->compilation()->log();
    if (log != NULL)
      log->clear_context(); // skip marker if nothing was printed
  }
  int cee_count() const                          { return _cee_count; }
  int ifop_count() const                         { return _ifop_count; }
  void adjust_exception_edges(BlockBegin* block, BlockBegin* sux) {
    int e = sux->number_of_exception_handlers();
    for (int i = 0; i < e; i++) {
      BlockBegin* xhandler = sux->exception_handler_at(i);
      block->add_exception_handler(xhandler);
      assert(xhandler->is_predecessor(sux), "missing predecessor");
      if (sux->number_of_preds() == 0) {
        xhandler->remove_predecessor(sux);
      }
      if (!xhandler->is_predecessor(block)) {
        xhandler->add_predecessor(block);
      }
    }
  }
  virtual void block_do(BlockBegin* block);
 private:
  Value make_ifop(Value x, Instruction::Condition cond, Value y, Value tval, Value fval);
};
void CE_Eliminator::block_do(BlockBegin* block) {
  If* if_ = block->end()->as_If();
  if (if_ == NULL) return;
  ValueType* if_type = if_->x()->type();
  if (!if_type->is_int() && !if_type->is_object()) return;
  BlockBegin* t_block = if_->tsux();
  BlockBegin* f_block = if_->fsux();
  Instruction* t_cur = t_block->next();
  Instruction* f_cur = f_block->next();
  Value t_const = NULL;
  Value f_const = NULL;
  if (t_cur->as_Constant() != NULL && !t_cur->can_trap()) {
    t_const = t_cur;
    t_cur = t_cur->next();
  }
  if (f_cur->as_Constant() != NULL && !f_cur->can_trap()) {
    f_const = f_cur;
    f_cur = f_cur->next();
  }
  Goto* t_goto = t_cur->as_Goto();
  if (t_goto == NULL) return;
  Goto* f_goto = f_cur->as_Goto();
  if (f_goto == NULL) return;
  BlockBegin* sux = t_goto->default_sux();
  if (sux != f_goto->default_sux()) return;
  ValueStack* if_state = if_->state();
  ValueStack* sux_state = sux->state();
  if (if_state->scope()->level() > sux_state->scope()->level()) {
    while (sux_state->scope() != if_state->scope()) {
      if_state = if_state->caller_state();
      assert(if_state != NULL, "states do not match up");
    }
  } else if (if_state->scope()->level() < sux_state->scope()->level()) {
    while (sux_state->scope() != if_state->scope()) {
      sux_state = sux_state->caller_state();
      assert(sux_state != NULL, "states do not match up");
    }
  }
  if (sux_state->stack_size() <= if_state->stack_size()) return;
  Value sux_phi = sux_state->stack_at(if_state->stack_size());
  if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
  if (sux_phi->type()->size() != sux_state->stack_size() - if_state->stack_size()) return;
  Value t_value = t_goto->state()->stack_at(if_state->stack_size());
  Value f_value = f_goto->state()->stack_at(if_state->stack_size());
  assert(t_value->type()->base() == f_value->type()->base(), "incompatible types");
  if (t_value->type()->is_float_kind()) return;
  for_each_phi_fun(sux, phi,
                   if (phi != sux_phi) return;
                   );
  for_each_phi_fun(t_block, phi, return; );
  for_each_phi_fun(f_block, phi, return; );
  bool is_safepoint = if_->is_safepoint();
  if (!is_safepoint && (t_goto->is_safepoint() || f_goto->is_safepoint())) {
    return;
  }
  Instruction* cur_end = if_->prev();
  assert((t_value != f_const && f_value != t_const) || t_const == f_const, "mismatch");
  if (t_value == t_const) {
    t_value = new Constant(t_const->type());
    NOT_PRODUCT(t_value->set_printable_bci(if_->printable_bci()));
    cur_end = cur_end->set_next(t_value);
  }
  if (f_value == f_const) {
    f_value = new Constant(f_const->type());
    NOT_PRODUCT(f_value->set_printable_bci(if_->printable_bci()));
    cur_end = cur_end->set_next(f_value);
  }
  Value result = make_ifop(if_->x(), if_->cond(), if_->y(), t_value, f_value);
  assert(result != NULL, "make_ifop must return a non-null instruction");
  if (!result->is_linked() && result->can_be_linked()) {
    NOT_PRODUCT(result->set_printable_bci(if_->printable_bci()));
    cur_end = cur_end->set_next(result);
  }
  ValueStack* state_before = if_->state_before();
  Goto* goto_ = new Goto(sux, state_before, is_safepoint);
  ValueStack* goto_state = if_state;
  goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
  goto_state->push(result->type(), result);
  assert(goto_state->is_same(sux_state), "states must match now");
  goto_->set_state(goto_state);
  cur_end = cur_end->set_next(goto_, goto_state->bci());
  BlockBegin::disconnect_edge(block, t_block);
  BlockBegin::disconnect_edge(block, f_block);
  if (t_block->number_of_preds() == 0) {
    BlockBegin::disconnect_edge(t_block, sux);
  }
  adjust_exception_edges(block, t_block);
  if (f_block->number_of_preds() == 0) {
    BlockBegin::disconnect_edge(f_block, sux);
  }
  adjust_exception_edges(block, f_block);
  block->set_end(goto_);
  if (sux_phi->as_Phi()->operand_count() == 1) {
    assert(sux_phi->as_Phi()->operand_at(0) == result, "screwed up phi");
    sux_phi->set_subst(result);
    _has_substitution = true;
  }
  _cee_count++;
  if (PrintCEE) {
    tty->print_cr("%d. CEE in B%d (B%d B%d)", cee_count(), block->block_id(), t_block->block_id(), f_block->block_id());
    tty->print_cr("%d. IfOp in B%d", ifop_count(), block->block_id());
  }
  _hir->verify();
}
Value CE_Eliminator::make_ifop(Value x, Instruction::Condition cond, Value y, Value tval, Value fval) {
  if (!OptimizeIfOps) {
    return new IfOp(x, cond, y, tval, fval);
  }
  tval = tval->subst();
  fval = fval->subst();
  if (tval == fval) {
    _ifop_count++;
    return tval;
  }
  x = x->subst();
  y = y->subst();
  Constant* y_const = y->as_Constant();
  if (y_const != NULL) {
    IfOp* x_ifop = x->as_IfOp();
    if (x_ifop != NULL) {                 // x is an ifop, y is a constant
      Constant* x_tval_const = x_ifop->tval()->subst()->as_Constant();
      Constant* x_fval_const = x_ifop->fval()->subst()->as_Constant();
      if (x_tval_const != NULL && x_fval_const != NULL) {
        Instruction::Condition x_ifop_cond = x_ifop->cond();
        Constant::CompareResult t_compare_res = x_tval_const->compare(cond, y_const);
        Constant::CompareResult f_compare_res = x_fval_const->compare(cond, y_const);
        if (t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable) {
          Value new_tval = t_compare_res == Constant::cond_true ? tval : fval;
          Value new_fval = f_compare_res == Constant::cond_true ? tval : fval;
          _ifop_count++;
          if (new_tval == new_fval) {
            return new_tval;
          } else {
            return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval);
          }
        }
      }
    } else {
      Constant* x_const = x->as_Constant();
      if (x_const != NULL) {         // x and y are constants
        Constant::CompareResult x_compare_res = x_const->compare(cond, y_const);
        if (x_compare_res != Constant::not_comparable) {
          _ifop_count++;
          return x_compare_res == Constant::cond_true ? tval : fval;
        }
      }
    }
  }
  return new IfOp(x, cond, y, tval, fval);
}
void Optimizer::eliminate_conditional_expressions() {
  CE_Eliminator ce(ir());
}
class BlockMerger: public BlockClosure {
 private:
  IR* _hir;
  int _merge_count;              // the number of block pairs successfully merged
 public:
  BlockMerger(IR* hir)
  : _hir(hir)
  , _merge_count(0)
  {
    _hir->iterate_preorder(this);
    CompileLog* log = _hir->compilation()->log();
    if (log != NULL)
      log->set_context("optimize name='eliminate_blocks'");
  }
  ~BlockMerger() {
    CompileLog* log = _hir->compilation()->log();
    if (log != NULL)
      log->clear_context(); // skip marker if nothing was printed
  }
  bool try_merge(BlockBegin* block) {
    BlockEnd* end = block->end();
    if (end->as_Goto() != NULL) {
      assert(end->number_of_sux() == 1, "end must have exactly one successor");
      BlockBegin* sux = end->default_sux();
      if (sux->number_of_preds() == 1 && !sux->is_entry_block() && !end->is_safepoint()) {
#ifdef ASSERT
        ValueStack* sux_state = sux->state();
        ValueStack* end_state = end->state();
        assert(end_state->scope() == sux_state->scope(), "scopes must match");
        assert(end_state->stack_size() == sux_state->stack_size(), "stack not equal");
        assert(end_state->locals_size() == sux_state->locals_size(), "locals not equal");
        int index;
        Value sux_value;
        for_each_stack_value(sux_state, index, sux_value) {
          assert(sux_value == end_state->stack_at(index), "stack not equal");
        }
        for_each_local_value(sux_state, index, sux_value) {
          assert(sux_value == end_state->local_at(index), "locals not equal");
        }
        assert(sux_state->caller_state() == end_state->caller_state(), "caller not equal");
#endif
        Instruction* prev = end->prev();
        Instruction* next = sux->next();
        assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
        prev->set_next(next);
        prev->fixup_block_pointers();
        sux->disconnect_from_graph();
        block->set_end(sux->end());
        for (int k = 0; k < sux->number_of_exception_handlers(); k++) {
          BlockBegin* xhandler = sux->exception_handler_at(k);
          block->add_exception_handler(xhandler);
          assert(xhandler->is_predecessor(sux), "missing predecessor");
          xhandler->remove_predecessor(sux);
          if (!xhandler->is_predecessor(block)) {
            xhandler->add_predecessor(block);
          }
        }
        _merge_count++;
        if (PrintBlockElimination) {
          tty->print_cr("%d. merged B%d & B%d (stack size = %d)",
                        _merge_count, block->block_id(), sux->block_id(), sux->state()->stack_size());
        }
        _hir->verify();
        If* if_ = block->end()->as_If();
        if (if_) {
          IfOp* ifop    = if_->x()->as_IfOp();
          Constant* con = if_->y()->as_Constant();
          bool swapped = false;
          if (!con || !ifop) {
            ifop = if_->y()->as_IfOp();
            con  = if_->x()->as_Constant();
            swapped = true;
          }
          if (con && ifop) {
            Constant* tval = ifop->tval()->as_Constant();
            Constant* fval = ifop->fval()->as_Constant();
            if (tval && fval) {
              Value prev = ifop;
              while (prev != NULL && prev->next() != if_) {
                prev = prev->next();
              }
              if (prev != NULL) {
                Instruction::Condition cond = if_->cond();
                BlockBegin* tsux = if_->tsux();
                BlockBegin* fsux = if_->fsux();
                if (swapped) {
                  cond = Instruction::mirror(cond);
                }
                BlockBegin* tblock = tval->compare(cond, con, tsux, fsux);
                BlockBegin* fblock = fval->compare(cond, con, tsux, fsux);
                if (tblock != fblock && !if_->is_safepoint()) {
                  If* newif = new If(ifop->x(), ifop->cond(), false, ifop->y(),
                                     tblock, fblock, if_->state_before(), if_->is_safepoint());
                  newif->set_state(if_->state()->copy());
                  assert(prev->next() == if_, "must be guaranteed by above search");
                  NOT_PRODUCT(newif->set_printable_bci(if_->printable_bci()));
                  prev->set_next(newif);
                  block->set_end(newif);
                  _merge_count++;
                  if (PrintBlockElimination) {
                    tty->print_cr("%d. replaced If and IfOp at end of B%d with single If", _merge_count, block->block_id());
                  }
                  _hir->verify();
                }
              }
            }
          }
        }
        return true;
      }
    }
    return false;
  }
  virtual void block_do(BlockBegin* block) {
    _hir->verify();
    while (try_merge(block)) {
      _hir->verify();
    }
  }
};
void Optimizer::eliminate_blocks() {
  BlockMerger bm(ir());
}
class NullCheckEliminator;
class NullCheckVisitor: public InstructionVisitor {
private:
  NullCheckEliminator* _nce;
  NullCheckEliminator* nce() { return _nce; }
public:
  NullCheckVisitor() {}
  void set_eliminator(NullCheckEliminator* nce) { _nce = nce; }
  void do_Phi            (Phi*             x);
  void do_Local          (Local*           x);
  void do_Constant       (Constant*        x);
  void do_LoadField      (LoadField*       x);
  void do_StoreField     (StoreField*      x);
  void do_ArrayLength    (ArrayLength*     x);
  void do_LoadIndexed    (LoadIndexed*     x);
  void do_StoreIndexed   (StoreIndexed*    x);
  void do_NegateOp       (NegateOp*        x);
  void do_ArithmeticOp   (ArithmeticOp*    x);
  void do_ShiftOp        (ShiftOp*         x);
  void do_LogicOp        (LogicOp*         x);
  void do_CompareOp      (CompareOp*       x);
  void do_IfOp           (IfOp*            x);
  void do_Convert        (Convert*         x);
  void do_NullCheck      (NullCheck*       x);
  void do_TypeCast       (TypeCast*        x);
  void do_Invoke         (Invoke*          x);
  void do_NewInstance    (NewInstance*     x);
  void do_NewTypeArray   (NewTypeArray*    x);
  void do_NewObjectArray (NewObjectArray*  x);
  void do_NewMultiArray  (NewMultiArray*   x);
  void do_CheckCast      (CheckCast*       x);
  void do_InstanceOf     (InstanceOf*      x);
  void do_MonitorEnter   (MonitorEnter*    x);
  void do_MonitorExit    (MonitorExit*     x);
  void do_Intrinsic      (Intrinsic*       x);
  void do_BlockBegin     (BlockBegin*      x);
  void do_Goto           (Goto*            x);
  void do_If             (If*              x);
  void do_IfInstanceOf   (IfInstanceOf*    x);
  void do_TableSwitch    (TableSwitch*     x);
  void do_LookupSwitch   (LookupSwitch*    x);
  void do_Return         (Return*          x);
  void do_Throw          (Throw*           x);
  void do_Base           (Base*            x);
  void do_OsrEntry       (OsrEntry*        x);
  void do_ExceptionObject(ExceptionObject* x);
  void do_RoundFP        (RoundFP*         x);
  void do_UnsafeGetRaw   (UnsafeGetRaw*    x);
  void do_UnsafePutRaw   (UnsafePutRaw*    x);
  void do_UnsafeGetObject(UnsafeGetObject* x);
  void do_UnsafePutObject(UnsafePutObject* x);
  void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x);
  void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
  void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
  void do_ProfileCall    (ProfileCall*     x);
  void do_ProfileReturnType (ProfileReturnType*  x);
  void do_ProfileInvoke  (ProfileInvoke*   x);
  void do_RuntimeCall    (RuntimeCall*     x);
  void do_MemBar         (MemBar*          x);
  void do_RangeCheckPredicate(RangeCheckPredicate* x);
#ifdef ASSERT
  void do_Assert         (Assert*          x);
#endif
};
class NullCheckEliminator: public ValueVisitor {
 private:
  Optimizer*        _opt;
  ValueSet*         _visitable_instructions;        // Visit each instruction only once per basic block
  BlockList*        _work_list;                   // Basic blocks to visit
  bool visitable(Value x) {
    assert(_visitable_instructions != NULL, "check");
    return _visitable_instructions->contains(x);
  }
  void mark_visited(Value x) {
    assert(_visitable_instructions != NULL, "check");
    _visitable_instructions->remove(x);
  }
  void mark_visitable(Value x) {
    assert(_visitable_instructions != NULL, "check");
    _visitable_instructions->put(x);
  }
  void clear_visitable_state() {
    assert(_visitable_instructions != NULL, "check");
    _visitable_instructions->clear();
  }
  ValueSet*         _set;                         // current state, propagated to subsequent BlockBegins
  ValueSetList      _block_states;                // BlockBegin null-check states for all processed blocks
  NullCheckVisitor  _visitor;
  NullCheck*        _last_explicit_null_check;
  bool set_contains(Value x)                      { assert(_set != NULL, "check"); return _set->contains(x); }
  void set_put     (Value x)                      { assert(_set != NULL, "check"); _set->put(x); }
  void set_remove  (Value x)                      { assert(_set != NULL, "check"); _set->remove(x); }
  BlockList* work_list()                          { return _work_list; }
  void iterate_all();
  void iterate_one(BlockBegin* block);
  ValueSet* state()                               { return _set; }
  void      set_state_from (ValueSet* state)      { _set->set_from(state); }
  ValueSet* state_for      (BlockBegin* block)    { return _block_states[block->block_id()]; }
  void      set_state_for  (BlockBegin* block, ValueSet* stack) { _block_states[block->block_id()] = stack; }
  bool      merge_state_for(BlockBegin* block,
                            ValueSet*   incoming_state);
 public:
  NullCheckEliminator(Optimizer* opt)
    : _opt(opt)
    , _set(new ValueSet())
    , _last_explicit_null_check(NULL)
    , _block_states(BlockBegin::number_of_blocks(), NULL)
    , _work_list(new BlockList()) {
    _visitable_instructions = new ValueSet();
    _visitor.set_eliminator(this);
    CompileLog* log = _opt->ir()->compilation()->log();
    if (log != NULL)
      log->set_context("optimize name='null_check_elimination'");
  }
  ~NullCheckEliminator() {
    CompileLog* log = _opt->ir()->compilation()->log();
    if (log != NULL)
      log->clear_context(); // skip marker if nothing was printed
  }
  Optimizer*  opt()                               { return _opt; }
  IR*         ir ()                               { return opt()->ir(); }
  void iterate(BlockBegin* root);
  void visit(Value* f);
  void        set_last_explicit_null_check(NullCheck* check) { _last_explicit_null_check = check; }
  NullCheck*  last_explicit_null_check()                     { return _last_explicit_null_check; }
  Value       last_explicit_null_check_obj()                 { return (_last_explicit_null_check
                                                                         ? _last_explicit_null_check->obj()
                                                                         : NULL); }
  NullCheck*  consume_last_explicit_null_check() {
    _last_explicit_null_check->unpin(Instruction::PinExplicitNullCheck);
    _last_explicit_null_check->set_can_trap(false);
    return _last_explicit_null_check;
  }
  void        clear_last_explicit_null_check()               { _last_explicit_null_check = NULL; }
  void handle_AccessField     (AccessField* x);
  void handle_ArrayLength     (ArrayLength* x);
  void handle_LoadIndexed     (LoadIndexed* x);
  void handle_StoreIndexed    (StoreIndexed* x);
  void handle_NullCheck       (NullCheck* x);
  void handle_Invoke          (Invoke* x);
  void handle_NewInstance     (NewInstance* x);
  void handle_NewArray        (NewArray* x);
  void handle_AccessMonitor   (AccessMonitor* x);
  void handle_Intrinsic       (Intrinsic* x);
  void handle_ExceptionObject (ExceptionObject* x);
  void handle_Phi             (Phi* x);
  void handle_ProfileCall     (ProfileCall* x);
  void handle_ProfileReturnType (ProfileReturnType* x);
};
void NullCheckVisitor::do_Phi            (Phi*             x) { nce()->handle_Phi(x);      }
void NullCheckVisitor::do_Local          (Local*           x) {}
void NullCheckVisitor::do_Constant       (Constant*        x) { /* FIXME: handle object constants */ }
void NullCheckVisitor::do_LoadField      (LoadField*       x) { nce()->handle_AccessField(x); }
void NullCheckVisitor::do_StoreField     (StoreField*      x) { nce()->handle_AccessField(x); }
void NullCheckVisitor::do_ArrayLength    (ArrayLength*     x) { nce()->handle_ArrayLength(x); }
void NullCheckVisitor::do_LoadIndexed    (LoadIndexed*     x) { nce()->handle_LoadIndexed(x); }
void NullCheckVisitor::do_StoreIndexed   (StoreIndexed*    x) { nce()->handle_StoreIndexed(x); }
void NullCheckVisitor::do_NegateOp       (NegateOp*        x) {}
void NullCheckVisitor::do_ArithmeticOp   (ArithmeticOp*    x) { if (x->can_trap()) nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_ShiftOp        (ShiftOp*         x) {}
void NullCheckVisitor::do_LogicOp        (LogicOp*         x) {}
void NullCheckVisitor::do_CompareOp      (CompareOp*       x) {}
void NullCheckVisitor::do_IfOp           (IfOp*            x) {}
void NullCheckVisitor::do_Convert        (Convert*         x) {}
void NullCheckVisitor::do_NullCheck      (NullCheck*       x) { nce()->handle_NullCheck(x); }
void NullCheckVisitor::do_TypeCast       (TypeCast*        x) {}
void NullCheckVisitor::do_Invoke         (Invoke*          x) { nce()->handle_Invoke(x); }
void NullCheckVisitor::do_NewInstance    (NewInstance*     x) { nce()->handle_NewInstance(x); }
void NullCheckVisitor::do_NewTypeArray   (NewTypeArray*    x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_NewObjectArray (NewObjectArray*  x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_NewMultiArray  (NewMultiArray*   x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_CheckCast      (CheckCast*       x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_InstanceOf     (InstanceOf*      x) {}
void NullCheckVisitor::do_MonitorEnter   (MonitorEnter*    x) { nce()->handle_AccessMonitor(x); }
void NullCheckVisitor::do_MonitorExit    (MonitorExit*     x) { nce()->handle_AccessMonitor(x); }
void NullCheckVisitor::do_Intrinsic      (Intrinsic*       x) { nce()->handle_Intrinsic(x);     }
void NullCheckVisitor::do_BlockBegin     (BlockBegin*      x) {}
void NullCheckVisitor::do_Goto           (Goto*            x) {}
void NullCheckVisitor::do_If             (If*              x) {}
void NullCheckVisitor::do_IfInstanceOf   (IfInstanceOf*    x) {}
void NullCheckVisitor::do_TableSwitch    (TableSwitch*     x) {}
void NullCheckVisitor::do_LookupSwitch   (LookupSwitch*    x) {}
void NullCheckVisitor::do_Return         (Return*          x) {}
void NullCheckVisitor::do_Throw          (Throw*           x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_Base           (Base*            x) {}
void NullCheckVisitor::do_OsrEntry       (OsrEntry*        x) {}
void NullCheckVisitor::do_ExceptionObject(ExceptionObject* x) { nce()->handle_ExceptionObject(x); }
void NullCheckVisitor::do_RoundFP        (RoundFP*         x) {}
void NullCheckVisitor::do_UnsafeGetRaw   (UnsafeGetRaw*    x) {}
void NullCheckVisitor::do_UnsafePutRaw   (UnsafePutRaw*    x) {}
void NullCheckVisitor::do_UnsafeGetObject(UnsafeGetObject* x) {}
void NullCheckVisitor::do_UnsafePutObject(UnsafePutObject* x) {}
void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
                                                                nce()->handle_ProfileCall(x); }
void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
#ifdef ASSERT
void NullCheckVisitor::do_Assert         (Assert*          x) {}
#endif
void NullCheckEliminator::visit(Value* p) {
  assert(*p != NULL, "should not find NULL instructions");
  if (visitable(*p)) {
    mark_visited(*p);
    (*p)->visit(&_visitor);
  }
}
bool NullCheckEliminator::merge_state_for(BlockBegin* block, ValueSet* incoming_state) {
  ValueSet* state = state_for(block);
  if (state == NULL) {
    state = incoming_state->copy();
    set_state_for(block, state);
    return true;
  } else {
    bool changed = state->set_intersect(incoming_state);
    if (PrintNullCheckElimination && changed) {
      tty->print_cr("Block %d's null check state changed", block->block_id());
    }
    return changed;
  }
}
void NullCheckEliminator::iterate_all() {
  while (work_list()->length() > 0) {
    iterate_one(work_list()->pop());
  }
}
void NullCheckEliminator::iterate_one(BlockBegin* block) {
  clear_visitable_state();
  set_last_explicit_null_check(NULL);
  if (PrintNullCheckElimination) {
    tty->print_cr(" ...iterating block %d in null check elimination for %s::%s%s",
                  block->block_id(),
                  ir()->method()->holder()->name()->as_utf8(),
                  ir()->method()->name()->as_utf8(),
                  ir()->method()->signature()->as_symbol()->as_utf8());
  }
  if (state_for(block) == NULL) {
    ValueSet* tmp_state = new ValueSet();
    set_state_for(block, tmp_state);
    ValueStack* stack  = block->state();
    IRScope*    scope  = stack->scope();
    ciMethod*   method = scope->method();
    if (!method->is_static()) {
      Local* local0 = stack->local_at(0)->as_Local();
      assert(local0 != NULL, "must be");
      assert(local0->type() == objectType, "invalid type of receiver");
      if (local0 != NULL) {
        tmp_state->put(local0);
        if (PrintNullCheckElimination) {
          tty->print_cr("Local 0 (value %d) proven non-null upon entry", local0->id());
        }
      }
    }
  }
  set_state_from(state_for(block));
  for_each_phi_fun(block, phi,
                   mark_visitable(phi);
                   );
  BlockEnd* e = block->end();
  assert(e != NULL, "incomplete graph");
  int i;
  for (i = 0; i < block->number_of_exception_handlers(); i++) {
    BlockBegin* next = block->exception_handler_at(i);
    if (merge_state_for(next, state())) {
      if (!work_list()->contains(next)) {
        work_list()->push(next);
      }
    }
  }
  for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
    mark_visitable(instr);
    if (instr->is_pinned() || instr->can_trap() || (instr->as_NullCheck() != NULL)) {
      mark_visited(instr);
      instr->input_values_do(this);
      instr->visit(&_visitor);
    }
  }
  for (i = 0; i < e->number_of_sux(); i++) {
    BlockBegin* next = e->sux_at(i);
    if (merge_state_for(next, state())) {
      if (!work_list()->contains(next)) {
        work_list()->push(next);
      }
    }
  }
}
void NullCheckEliminator::iterate(BlockBegin* block) {
  work_list()->push(block);
  iterate_all();
}
void NullCheckEliminator::handle_AccessField(AccessField* x) {
  if (x->is_static()) {
    if (x->as_LoadField() != NULL) {
      ciField* field = x->field();
      if (field->is_constant()) {
        ciConstant field_val = field->constant_value();
        BasicType field_type = field_val.basic_type();
        if (field_type == T_OBJECT || field_type == T_ARRAY) {
          ciObject* obj_val = field_val.as_object();
          if (!obj_val->is_null_object()) {
            if (PrintNullCheckElimination) {
              tty->print_cr("AccessField %d proven non-null by static final non-null oop check",
                            x->id());
            }
            set_put(x);
          }
        }
      }
    }
    clear_last_explicit_null_check();
    return;
  }
  Value obj = x->obj();
  if (set_contains(obj)) {
    if (last_explicit_null_check_obj() == obj && !x->needs_patching()) {
      x->set_explicit_null_check(consume_last_explicit_null_check());
      x->set_needs_null_check(true);
      if (PrintNullCheckElimination) {
        tty->print_cr("Folded NullCheck %d into AccessField %d's null check for value %d",
                      x->explicit_null_check()->id(), x->id(), obj->id());
      }
    } else {
      x->set_explicit_null_check(NULL);
      x->set_needs_null_check(false);
      if (PrintNullCheckElimination) {
        tty->print_cr("Eliminated AccessField %d's null check for value %d", x->id(), obj->id());
      }
    }
  } else {
    set_put(obj);
    if (PrintNullCheckElimination) {
      tty->print_cr("AccessField %d of value %d proves value to be non-null", x->id(), obj->id());
    }
    x->set_needs_null_check(true);
    x->set_explicit_null_check(NULL);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_ArrayLength(ArrayLength* x) {
  Value array = x->array();
  if (set_contains(array)) {
    if (last_explicit_null_check_obj() == array) {
      x->set_explicit_null_check(consume_last_explicit_null_check());
      x->set_needs_null_check(true);
      if (PrintNullCheckElimination) {
        tty->print_cr("Folded NullCheck %d into ArrayLength %d's null check for value %d",
                      x->explicit_null_check()->id(), x->id(), array->id());
      }
    } else {
      x->set_explicit_null_check(NULL);
      x->set_needs_null_check(false);
      if (PrintNullCheckElimination) {
        tty->print_cr("Eliminated ArrayLength %d's null check for value %d", x->id(), array->id());
      }
    }
  } else {
    set_put(array);
    if (PrintNullCheckElimination) {
      tty->print_cr("ArrayLength %d of value %d proves value to be non-null", x->id(), array->id());
    }
    x->set_needs_null_check(true);
    x->set_explicit_null_check(NULL);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_LoadIndexed(LoadIndexed* x) {
  Value array = x->array();
  if (set_contains(array)) {
    if (last_explicit_null_check_obj() == array) {
      x->set_explicit_null_check(consume_last_explicit_null_check());
      x->set_needs_null_check(true);
      if (PrintNullCheckElimination) {
        tty->print_cr("Folded NullCheck %d into LoadIndexed %d's null check for value %d",
                      x->explicit_null_check()->id(), x->id(), array->id());
      }
    } else {
      x->set_explicit_null_check(NULL);
      x->set_needs_null_check(false);
      if (PrintNullCheckElimination) {
        tty->print_cr("Eliminated LoadIndexed %d's null check for value %d", x->id(), array->id());
      }
    }
  } else {
    set_put(array);
    if (PrintNullCheckElimination) {
      tty->print_cr("LoadIndexed %d of value %d proves value to be non-null", x->id(), array->id());
    }
    x->set_needs_null_check(true);
    x->set_explicit_null_check(NULL);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_StoreIndexed(StoreIndexed* x) {
  Value array = x->array();
  if (set_contains(array)) {
    if (PrintNullCheckElimination) {
      tty->print_cr("Eliminated StoreIndexed %d's null check for value %d", x->id(), array->id());
    }
    x->set_needs_null_check(false);
  } else {
    set_put(array);
    if (PrintNullCheckElimination) {
      tty->print_cr("StoreIndexed %d of value %d proves value to be non-null", x->id(), array->id());
    }
    x->set_needs_null_check(true);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_NullCheck(NullCheck* x) {
  Value obj = x->obj();
  if (set_contains(obj)) {
    if (PrintNullCheckElimination) {
      tty->print_cr("Eliminated NullCheck %d for value %d", x->id(), obj->id());
    }
    x->set_can_trap(false);
  } else {
    x->set_can_trap(true);
    x->pin(Instruction::PinExplicitNullCheck);
    set_put(obj);
    set_last_explicit_null_check(x);
    if (PrintNullCheckElimination) {
      tty->print_cr("NullCheck %d of value %d proves value to be non-null", x->id(), obj->id());
    }
  }
}
void NullCheckEliminator::handle_Invoke(Invoke* x) {
  if (!x->has_receiver()) {
    clear_last_explicit_null_check();
    return;
  }
  Value recv = x->receiver();
  if (!set_contains(recv)) {
    set_put(recv);
    if (PrintNullCheckElimination) {
      tty->print_cr("Invoke %d of value %d proves value to be non-null", x->id(), recv->id());
    }
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_NewInstance(NewInstance* x) {
  set_put(x);
  if (PrintNullCheckElimination) {
    tty->print_cr("NewInstance %d is non-null", x->id());
  }
}
void NullCheckEliminator::handle_NewArray(NewArray* x) {
  set_put(x);
  if (PrintNullCheckElimination) {
    tty->print_cr("NewArray %d is non-null", x->id());
  }
}
void NullCheckEliminator::handle_ExceptionObject(ExceptionObject* x) {
  set_put(x);
  if (PrintNullCheckElimination) {
    tty->print_cr("ExceptionObject %d is non-null", x->id());
  }
}
void NullCheckEliminator::handle_AccessMonitor(AccessMonitor* x) {
  Value obj = x->obj();
  if (set_contains(obj)) {
    if (PrintNullCheckElimination) {
      tty->print_cr("Eliminated AccessMonitor %d's null check for value %d", x->id(), obj->id());
    }
    x->set_needs_null_check(false);
  } else {
    set_put(obj);
    if (PrintNullCheckElimination) {
      tty->print_cr("AccessMonitor %d of value %d proves value to be non-null", x->id(), obj->id());
    }
    x->set_needs_null_check(true);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_Intrinsic(Intrinsic* x) {
  if (!x->has_receiver()) {
    if (x->id() == vmIntrinsics::_arraycopy) {
      for (int i = 0; i < x->number_of_arguments(); i++) {
        x->set_arg_needs_null_check(i, !set_contains(x->argument_at(i)));
      }
    }
    clear_last_explicit_null_check();
    return;
  }
  Value recv = x->receiver();
  if (set_contains(recv)) {
    if (PrintNullCheckElimination) {
      tty->print_cr("Eliminated Intrinsic %d's null check for value %d", x->id(), recv->id());
    }
    x->set_needs_null_check(false);
  } else {
    set_put(recv);
    if (PrintNullCheckElimination) {
      tty->print_cr("Intrinsic %d of value %d proves value to be non-null", x->id(), recv->id());
    }
    x->set_needs_null_check(true);
  }
  clear_last_explicit_null_check();
}
void NullCheckEliminator::handle_Phi(Phi* x) {
  int i;
  bool all_non_null = true;
  if (x->is_illegal()) {
    all_non_null = false;
  } else {
    for (i = 0; i < x->operand_count(); i++) {
      Value input = x->operand_at(i);
      if (!set_contains(input)) {
        all_non_null = false;
      }
    }
  }
  if (all_non_null) {
    if (PrintNullCheckElimination) {
      tty->print_cr("Eliminated Phi %d's null check for phifun because all inputs are non-null", x->id());
    }
    x->set_needs_null_check(false);
  } else if (set_contains(x)) {
    set_remove(x);
  }
}
void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
  for (int i = 0; i < x->nb_profiled_args(); i++) {
    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
  }
}
void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
  x->set_needs_null_check(!set_contains(x->ret()));
}
void Optimizer::eliminate_null_checks() {
  ResourceMark rm;
  NullCheckEliminator nce(this);
  if (PrintNullCheckElimination) {
    tty->print_cr("Starting null check elimination for method %s::%s%s",
                  ir()->method()->holder()->name()->as_utf8(),
                  ir()->method()->name()->as_utf8(),
                  ir()->method()->signature()->as_symbol()->as_utf8());
  }
  nce.iterate(ir()->start());
  int nblocks = BlockBegin::number_of_blocks();
  BlockList blocks(nblocks);
  boolArray visited_block(nblocks, false);
  blocks.push(ir()->start());
  visited_block[ir()->start()->block_id()] = true;
  for (int i = 0; i < blocks.length(); i++) {
    BlockBegin* b = blocks[i];
    for (int e = b->number_of_exception_handlers(); e-- > 0; ) {
      BlockBegin* excp = b->exception_handler_at(e);
      int id = excp->block_id();
      if (!visited_block[id]) {
        blocks.push(excp);
        visited_block[id] = true;
        nce.iterate(excp);
      }
    }
    BlockEnd *end = b->end();
    for (int s = end->number_of_sux(); s-- > 0; ) {
      BlockBegin* next = end->sux_at(s);
      int id = next->block_id();
      if (!visited_block[id]) {
        blocks.push(next);
        visited_block[id] = true;
      }
    }
  }
  if (PrintNullCheckElimination) {
    tty->print_cr("Done with null check elimination for method %s::%s%s",
                  ir()->method()->holder()->name()->as_utf8(),
                  ir()->method()->name()->as_utf8(),
                  ir()->method()->signature()->as_symbol()->as_utf8());
  }
}
C:\hotspot-69087d08d473\src\share\vm/c1/c1_Optimizer.hpp
#ifndef SHARE_VM_C1_C1_OPTIMIZER_HPP
#define SHARE_VM_C1_C1_OPTIMIZER_HPP
#include "c1/c1_IR.hpp"
#include "c1/c1_Instruction.hpp"
#include "memory/allocation.hpp"
class Optimizer VALUE_OBJ_CLASS_SPEC {
 private:
  IR* _ir;
 public:
  Optimizer(IR* ir);
  IR* ir() const                                 { return _ir; }
  void eliminate_conditional_expressions();
  void eliminate_blocks();
  void eliminate_null_checks();
};
#endif // SHARE_VM_C1_C1_OPTIMIZER_HPP
C:\hotspot-69087d08d473\src\share\vm/c1/c1_RangeCheckElimination.cpp
#include "precompiled.hpp"
#include "c1/c1_ValueStack.hpp"
#include "c1/c1_RangeCheckElimination.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_ValueMap.hpp"
#include "ci/ciMethodData.hpp"
#include "runtime/deoptimization.hpp"
#ifdef ASSERT
#define TRACE_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination) { code; }
#define ASSERT_RANGE_CHECK_ELIMINATION(code) if (AssertRangeCheckElimination) { code; }
#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination || AssertRangeCheckElimination) { code; }
#else
#define TRACE_RANGE_CHECK_ELIMINATION(code)
#define ASSERT_RANGE_CHECK_ELIMINATION(code)
#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code)
#endif
void RangeCheckElimination::eliminate(IR *ir) {
  bool do_elimination = ir->compilation()->has_access_indexed();
  ASSERT_RANGE_CHECK_ELIMINATION(do_elimination = true);
  if (do_elimination) {
    RangeCheckEliminator rce(ir);
  }
}
RangeCheckEliminator::RangeCheckEliminator(IR *ir) :
  _bounds(Instruction::number_of_instructions(), NULL),
  _access_indexed_info(Instruction::number_of_instructions(), NULL)
{
  _visitor.set_range_check_eliminator(this);
  _ir = ir;
  _number_of_instructions = Instruction::number_of_instructions();
  _optimistic = ir->compilation()->is_optimistic();
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->cr();
    tty->print_cr("Range check elimination");
    ir->method()->print_name(tty);
    tty->cr();
  );
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->print_cr("optimistic=%d", (int)_optimistic);
  );
#ifdef ASSERT
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->print_cr("Verification of IR . . .");
  );
  Verification verification(ir);
#endif
  set_process_block_flags(ir->start());
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->print_cr("Starting pass over dominator tree . . .")
  );
  calc_bounds(ir->start(), NULL);
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->print_cr("Finished!")
  );
}
void RangeCheckEliminator::Visitor::do_Constant(Constant *c) {
  IntConstant *ic = c->type()->as_IntConstant();
  if (ic != NULL) {
    int value = ic->value();
    _bound = new Bound(value, NULL, value, NULL);
  }
}
void RangeCheckEliminator::Visitor::do_LogicOp(LogicOp *lo) {
  if (lo->type()->as_IntType() && lo->op() == Bytecodes::_iand && (lo->x()->as_Constant() || lo->y()->as_Constant())) {
    int constant = 0;
    Constant *c = lo->x()->as_Constant();
    if (c != NULL) {
      constant = c->type()->as_IntConstant()->value();
    } else {
      constant = lo->y()->as_Constant()->type()->as_IntConstant()->value();
    }
    if (constant >= 0) {
      _bound = new Bound(0, NULL, constant, NULL);
    }
  }
}
void RangeCheckEliminator::Visitor::do_Phi(Phi *phi) {
  if (!phi->type()->as_IntType() && !phi->type()->as_ObjectType()) return;
  BlockBegin *block = phi->block();
  int op_count = phi->operand_count();
  bool has_upper = true;
  bool has_lower = true;
  assert(phi, "Phi must not be null");
  Bound *bound = NULL;
  for (int i=0; i<op_count; i++) {
    Value v = phi->operand_at(i);
    if (v == phi) continue;
    Op2 *op2 = v->as_Op2();
    if (op2 != NULL) {
      Value x = op2->x();
      Value y = op2->y();
      if ((x == phi || y == phi)) {
        Value other = x;
        if (other == phi) {
          other = y;
        }
        ArithmeticOp *ao = v->as_ArithmeticOp();
        if (ao != NULL && ao->op() == Bytecodes::_iadd) {
          assert(ao->op() == Bytecodes::_iadd, "Has to be add!");
          if (ao->type()->as_IntType()) {
            Constant *c = other->as_Constant();
            if (c != NULL) {
              assert(c->type()->as_IntConstant(), "Constant has to be of type integer");
              int value = c->type()->as_IntConstant()->value();
              if (value == 1) {
                has_upper = false;
              } else if (value > 1) {
                has_upper = false;
                has_lower = false;
              } else if (value < 0) {
                has_lower = false;
              }
              continue;
            }
          }
        }
      }
    }
    Bound *v_bound = _rce->get_bound(v);
    Bound *cur_bound;
    int cur_constant = 0;
    Value cur_value = v;
    if (v->type()->as_IntConstant()) {
      cur_constant = v->type()->as_IntConstant()->value();
      cur_value = NULL;
    }
    if (!v_bound->has_upper() || !v_bound->has_lower()) {
      cur_bound = new Bound(cur_constant, cur_value, cur_constant, cur_value);
    } else {
      cur_bound = v_bound;
    }
    if (cur_bound) {
      if (!bound) {
        bound = cur_bound->copy();
      } else {
        bound->or_op(cur_bound);
      }
    } else {
      bound = NULL;
      break;
    }
  }
  if (bound) {
    if (!has_upper) {
      bound->remove_upper();
    }
    if (!has_lower) {
      bound->remove_lower();
    }
    _bound = bound;
  } else {
    _bound = new Bound();
  }
}
void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) {
  Value x = ao->x();
  Value y = ao->y();
  if (ao->op() == Bytecodes::_irem) {
    Bound* x_bound = _rce->get_bound(x);
    Bound* y_bound = _rce->get_bound(y);
    if (x_bound->lower() >= 0 && x_bound->lower_instr() == NULL && y->as_ArrayLength() != NULL) {
      _bound = new Bound(0, NULL, -1, y);
    } else {
      _bound = new Bound();
    }
  } else if (!x->as_Constant() || !y->as_Constant()) {
    assert(!x->as_Constant() || !y->as_Constant(), "One of the operands must be non-constant!");
    if (((x->as_Constant() || y->as_Constant()) && (ao->op() == Bytecodes::_iadd)) || (y->as_Constant() && ao->op() == Bytecodes::_isub)) {
      assert(ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub, "Operand must be iadd or isub");
      if (y->as_Constant()) {
        Value tmp = x;
        x = y;
        y = tmp;
      }
      assert(x->as_Constant()->type()->as_IntConstant(), "Constant must be int constant!");
      int const_value = x->as_Constant()->type()->as_IntConstant()->value();
      if (ao->op() == Bytecodes::_iadd || const_value != min_jint) {
        if (ao->op() == Bytecodes::_isub) {
          const_value = -const_value;
        }
        Bound * bound = _rce->get_bound(y);
        if (bound->has_upper() && bound->has_lower()) {
          int new_lower = bound->lower() + const_value;
          jlong new_lowerl = ((jlong)bound->lower()) + const_value;
          int new_upper = bound->upper() + const_value;
          jlong new_upperl = ((jlong)bound->upper()) + const_value;
          if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) {
            Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr());
            _bound = newBound;
          } else {
            _bound = new Bound();
          }
        } else {
          _bound = new Bound();
        }
      } else {
        _bound = new Bound();
      }
    } else {
      Bound *bound = _rce->get_bound(x);
      if (ao->op() == Bytecodes::_isub) {
        if (bound->lower_instr() == y) {
          _bound = new Bound(Instruction::geq, NULL, bound->lower());
        } else {
          _bound = new Bound();
        }
      } else {
        _bound = new Bound();
      }
    }
  }
}
void RangeCheckEliminator::Visitor::do_IfOp(IfOp *ifOp)
{
  if (ifOp->tval()->type()->as_IntConstant() && ifOp->fval()->type()->as_IntConstant()) {
    int min = ifOp->tval()->type()->as_IntConstant()->value();
    int max = ifOp->fval()->type()->as_IntConstant()->value();
    if (min > max) {
      int tmp = min;
      min = max;
      max = tmp;
    }
    _bound = new Bound(min, NULL, max, NULL);
  }
}
RangeCheckEliminator::Bound *RangeCheckEliminator::get_bound(Value v) {
  if (!v || (!v->type()->as_IntType() && !v->type()->as_ObjectType())) return NULL;
  if (!_bounds[v->id()]) {
    _bounds[v->id()] = new BoundStack();
    _visitor.clear_bound();
    Value visit_value = v;
    visit_value->visit(&_visitor);
    Bound *bound = _visitor.bound();
    if (bound) {
      _bounds[v->id()]->push(bound);
    }
    if (_bounds[v->id()]->length() == 0) {
      assert(!(v->as_Constant() && v->type()->as_IntConstant()), "constants not handled here");
      _bounds[v->id()]->push(new Bound());
    }
  } else if (_bounds[v->id()]->length() == 0) {
    return new Bound();
  }
  return _bounds[v->id()]->top();
}
void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant) {
  if (cond == Instruction::gtr) {
    cond = Instruction::geq;
    constant++;
  } else if (cond == Instruction::lss) {
    cond = Instruction::leq;
    constant--;
  }
  Bound *bound = new Bound(cond, value, constant);
  update_bound(pushed, v, bound);
}
bool RangeCheckEliminator::loop_invariant(BlockBegin *loop_header, Instruction *instruction) {
  assert(loop_header, "Loop header must not be null!");
  if (!instruction) return true;
  return instruction->dominator_depth() < loop_header->dominator_depth();
}
void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Bound *bound) {
  if (v->as_Constant()) {
    return;
  }
  if (!_bounds[v->id()]) {
    get_bound(v);
    assert(_bounds[v->id()], "Now Stack must exist");
  }
  Bound *top = NULL;
  if (_bounds[v->id()]->length() > 0) {
    top = _bounds[v->id()]->top();
  }
  if (top) {
    bound->and_op(top);
  }
  _bounds[v->id()]->push(bound);
  pushed.append(v->id());
}
void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int idx, Value instruction, AccessIndexed *ai) {
  int id = instruction->id();
  AccessIndexedInfo *aii = _access_indexed_info[id];
  if (aii == NULL) {
    aii = new AccessIndexedInfo();
    _access_indexed_info[id] = aii;
    indices.append(instruction);
    aii->_min = idx;
    aii->_max = idx;
    aii->_list = new AccessIndexedList();
  } else if (idx >= aii->_min && idx <= aii->_max) {
    remove_range_check(ai);
    return;
  }
  aii->_min = MIN2(aii->_min, idx);
  aii->_max = MAX2(aii->_max, idx);
  aii->_list->append(ai);
}
void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays) {
  InstructionList indices;
  for (int i=0; i<arrays.length(); i++) {
    int max_constant = -1;
    AccessIndexedList list_constant;
    Value array = arrays.at(i);
    for(int j=0; j<accessIndexed.length(); j++) {
      AccessIndexed *ai = accessIndexed.at(j);
      if (ai->array() != array || !ai->check_flag(Instruction::NeedsRangeCheckFlag)) continue;
      Value index = ai->index();
      Constant *c = index->as_Constant();
      if (c != NULL) {
        int constant_value = c->type()->as_IntConstant()->value();
        if (constant_value >= 0) {
          if (constant_value <= max_constant) {
            remove_range_check(ai);
          } else {
            max_constant = constant_value;
            list_constant.append(ai);
          }
        }
      } else {
        int last_integer = 0;
        Instruction *last_instruction = index;
        int base = 0;
        ArithmeticOp *ao = index->as_ArithmeticOp();
        while (ao != NULL && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {
          c = ao->y()->as_Constant();
          Instruction *other = ao->x();
          if (!c && ao->op() == Bytecodes::_iadd) {
            c = ao->x()->as_Constant();
            other = ao->y();
          }
          if (c) {
            int value = c->type()->as_IntConstant()->value();
            if (value != min_jint) {
              if (ao->op() == Bytecodes::_isub) {
                value = -value;
              }
              base += value;
              last_integer = base;
              last_instruction = other;
            }
            index = other;
          } else {
            break;
          }
          ao = index->as_ArithmeticOp();
        }
        add_access_indexed_info(indices, last_integer, last_instruction, ai);
      }
    }
    if (_optimistic) {
      for (int i = 0; i < indices.length(); i++) {
        Instruction *index_instruction = indices.at(i);
        AccessIndexedInfo *info = _access_indexed_info[index_instruction->id()];
        assert(info != NULL, "Info must not be null");
        bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);
        if (info->_list->length() > 2 && range_cond) {
          AccessIndexed *first = info->_list->at(0);
          Instruction *insert_position = first->prev();
          assert(insert_position->next() == first, "prev was calculated");
          ValueStack *state = first->state_before();
          Constant *min_constant = NULL;
          if (info->_min != 0) {
            min_constant = new Constant(new IntConstant(info->_min));
            NOT_PRODUCT(min_constant->set_printable_bci(first->printable_bci()));
            insert_position = insert_position->insert_after(min_constant);
          }
          Constant *max_constant = NULL;
          if (info->_max != 0) {
            max_constant = new Constant(new IntConstant(info->_max));
            NOT_PRODUCT(max_constant->set_printable_bci(first->printable_bci()));
            insert_position = insert_position->insert_after(max_constant);
          }
          Value length_instr = first->length();
          if (!length_instr) {
            ArrayLength *length = new ArrayLength(array, first->state_before()->copy());
            length->set_exception_state(length->state_before());
            length->set_flag(Instruction::DeoptimizeOnException, true);
            insert_position = insert_position->insert_after_same_bci(length);
            length_instr = length;
          }
          Instruction *lower_compare = index_instruction;
          if (min_constant) {
            ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, min_constant, lower_compare, false, NULL);
            insert_position = insert_position->insert_after_same_bci(ao);
            lower_compare = ao;
          }
          Instruction *upper_compare = index_instruction;
          if (max_constant) {
            ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, max_constant, upper_compare, false, NULL);
            insert_position = insert_position->insert_after_same_bci(ao);
            upper_compare = ao;
          }
          int bci = NOT_PRODUCT(first->printable_bci()) PRODUCT_ONLY(-1);
          insert_position = predicate(upper_compare, Instruction::aeq, length_instr, state, insert_position, bci);
          insert_position = predicate_cmp_with_const(lower_compare, Instruction::leq, -1, state, insert_position);
          for (int j = 0; j<info->_list->length(); j++) {
            AccessIndexed *ai = info->_list->at(j);
            remove_range_check(ai);
          }
        }
      }
      if (list_constant.length() > 1) {
        AccessIndexed *first = list_constant.at(0);
        Instruction *insert_position = first->prev();
        ValueStack *state = first->state_before();
        Constant *constant = new Constant(new IntConstant(max_constant));
        NOT_PRODUCT(constant->set_printable_bci(first->printable_bci()));
        insert_position = insert_position->insert_after(constant);
        Instruction *compare_instr = constant;
        Value length_instr = first->length();
        if (!length_instr) {
          ArrayLength *length = new ArrayLength(array, state->copy());
          length->set_exception_state(length->state_before());
          length->set_flag(Instruction::DeoptimizeOnException, true);
          insert_position = insert_position->insert_after_same_bci(length);
          length_instr = length;
        }
        insert_position = predicate(compare_instr, Instruction::geq, length_instr, state, insert_position);
        for (int j = 0; j<list_constant.length(); j++) {
          AccessIndexed *ai = list_constant.at(j);
          remove_range_check(ai);
        }
      }
    }
    for (int i = 0; i < indices.length(); i++) {
      Instruction *index_instruction = indices.at(i);
      _access_indexed_info[index_instruction->id()] = NULL;
    }
    indices.clear();
  }
}
bool RangeCheckEliminator::set_process_block_flags(BlockBegin *block) {
  Instruction *cur = block;
  bool process = false;
  while (cur) {
    process |= (cur->as_AccessIndexed() != NULL);
    cur = cur->next();
  }
  BlockList *dominates = block->dominates();
  for (int i=0; i<dominates->length(); i++) {
    BlockBegin *next = dominates->at(i);
    process |= set_process_block_flags(next);
  }
  if (!process) {
    block->set(BlockBegin::donot_eliminate_range_checks);
  }
  return process;
}
bool RangeCheckEliminator::is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper) {
  bool upper_check = true;
  assert(lower_instr || lower >= 0, "If no lower_instr present, lower must be greater 0");
  assert(!lower_instr || lower_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
  assert(!upper_instr || upper_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
  assert(array_instr, "Array instruction must exist");
  assert(array_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
  assert(!length_instr || length_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
  if (upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr) {
    if (upper >= 0) return false; // would always trigger a deopt:
    upper_check = false;
  }
  if (lower_instr && lower_instr->as_ArrayLength() && lower_instr->as_ArrayLength()->array() == array_instr) {
    if (lower > 0) return false;
  }
  if (upper_check && upper_instr && upper_instr->type()->as_ObjectType() && upper_instr == array_instr) {
    return false;
  }
  return true;
}
Instruction* RangeCheckEliminator::insert_after(Instruction* insert_position, Instruction* instr, int bci) {
  if (bci != -1) {
    NOT_PRODUCT(instr->set_printable_bci(bci));
    return insert_position->insert_after(instr);
  } else {
    return insert_position->insert_after_same_bci(instr);
  }
}
Instruction* RangeCheckEliminator::predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
  RangeCheckPredicate *deoptimize = new RangeCheckPredicate(left, cond, true, right, state->copy());
  return insert_after(insert_position, deoptimize, bci);
}
Instruction* RangeCheckEliminator::predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
  Constant *const_instr = new Constant(new IntConstant(constant));
  insert_position = insert_after(insert_position, const_instr, bci);
  return predicate(instr, cond, const_instr, state, insert_position);
}
Instruction* RangeCheckEliminator::predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
  Constant *constant = new Constant(new IntConstant(left_const));
  insert_position = insert_after(insert_position, constant, bci);
  ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, left, false, NULL);
  insert_position = insert_position->insert_after_same_bci(ao);
  return predicate(ao, cond, right, state, insert_position);
}
Instruction* RangeCheckEliminator::predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
  Constant *const_instr = new Constant(new IntConstant(constant));
  insert_position = insert_after(insert_position, const_instr, bci);
  return predicate_add(left, left_const, cond, const_instr, state, insert_position);
}
void RangeCheckEliminator::insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper, AccessIndexed *ai) {
  assert(is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, lower, upper_instr, upper), "should have been tested before");
  bool upper_check = !(upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr);
  int bci = NOT_PRODUCT(ai->printable_bci()) PRODUCT_ONLY(-1);
  if (lower_instr) {
    assert(!lower_instr->type()->as_ObjectType(), "Must not be object type");
    if (lower == 0) {
      insert_position = predicate_cmp_with_const(lower_instr, Instruction::lss, 0, state, insert_position, bci);
    } else if (lower > 0) {
      insert_position = predicate_add_cmp_with_const(lower_instr, lower, Instruction::lss, 0, state, insert_position, bci);
    } else {
      assert(lower < 0, "");
      lower++;
      lower = -lower;
      insert_position = predicate_cmp_with_const(lower_instr, Instruction::leq, lower, state, insert_position, bci);
    }
  }
  if (!upper_check) return;
  if (!length_instr) {
    ArrayLength *length = new ArrayLength(array_instr, state->copy());
    NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
    length->set_exception_state(length->state_before());
    length->set_flag(Instruction::DeoptimizeOnException, true);
    insert_position = insert_position->insert_after(length);
    length_instr = length;
  }
  if (!upper_instr) {
    insert_position = predicate_cmp_with_const(length_instr, Instruction::leq, upper, state, insert_position, bci);
  } else {
    if (upper_instr->type()->as_ObjectType()) {
      assert(state, "must not be null");
      assert(upper_instr != array_instr, "should be");
      ArrayLength *length = new ArrayLength(upper_instr, state->copy());
      NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
      length->set_flag(Instruction::DeoptimizeOnException, true);
      length->set_exception_state(length->state_before());
      insert_position = insert_position->insert_after(length);
      upper_instr = length;
    }
    assert(upper_instr->type()->as_IntType(), "Must not be object type!");
    if (upper == 0) {
      insert_position = predicate(upper_instr, Instruction::geq, length_instr, state, insert_position, bci);
    } else if (upper < 0) {
      insert_position = predicate_add(upper_instr, upper, Instruction::geq, length_instr, state, insert_position, bci);
    } else {
      assert(upper > 0, "");
      upper = -upper;
      insert_position = predicate_add(length_instr, upper, Instruction::leq, upper_instr, state, insert_position, bci);
    }
  }
}
void RangeCheckEliminator::add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition) {
  if (y->as_Constant()) return;
  int const_value = 0;
  Value instr_value = x;
  Constant *c = x->as_Constant();
  ArithmeticOp *ao = x->as_ArithmeticOp();
  if (c != NULL) {
    const_value = c->type()->as_IntConstant()->value();
    instr_value = NULL;
  } else if (ao != NULL &&  (!ao->x()->as_Constant() || !ao->y()->as_Constant()) && ((ao->op() == Bytecodes::_isub && ao->y()->as_Constant()) || ao->op() == Bytecodes::_iadd)) {
    assert(!ao->x()->as_Constant() || !ao->y()->as_Constant(), "At least one operator must be non-constant!");
    assert(ao->op() == Bytecodes::_isub || ao->op() == Bytecodes::_iadd, "Operation has to be add or sub!");
    c = ao->x()->as_Constant();
    if (c != NULL) {
      const_value = c->type()->as_IntConstant()->value();
      instr_value = ao->y();
    } else {
      c = ao->y()->as_Constant();
      if (c != NULL) {
        const_value = c->type()->as_IntConstant()->value();
        instr_value = ao->x();
      }
    }
    if (ao->op() == Bytecodes::_isub) {
      assert(ao->y()->as_Constant(), "1 - x not supported, only x - 1 is valid!");
      if (const_value > min_jint) {
        const_value = -const_value;
      } else {
        const_value = 0;
        instr_value = x;
      }
    }
  }
  update_bound(pushed, y, condition, instr_value, const_value);
}
void RangeCheckEliminator::process_if(IntegerStack &pushed, BlockBegin *block, If *cond) {
  if ((cond->tsux() == block || cond->fsux() == block) && cond->tsux() != cond->fsux()) {
    Instruction::Condition condition = cond->cond();
    if (cond->fsux() == block) {
      condition = Instruction::negate(condition);
    }
    Value x = cond->x();
    Value y = cond->y();
    if (x->type()->as_IntType() && y->type()->as_IntType()) {
      add_if_condition(pushed, y, x, condition);
      add_if_condition(pushed, x, y, Instruction::mirror(condition));
    }
  }
}
void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai) {
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->fill_to(block->dominator_depth()*2)
  );
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->print_cr("Access indexed: index=%d length=%d", ai->index()->id(), (ai->length() != NULL ? ai->length()->id() :-1 ))
  );
  if (ai->check_flag(Instruction::NeedsRangeCheckFlag)) {
    Bound *index_bound = get_bound(ai->index());
    if (!index_bound->has_lower() || !index_bound->has_upper()) {
      TRACE_RANGE_CHECK_ELIMINATION(
        tty->fill_to(block->dominator_depth()*2);
        tty->print_cr("Index instruction %d has no lower and/or no upper bound!", ai->index()->id())
      );
      return;
    }
    Bound *array_bound;
    if (ai->length()) {
      array_bound = get_bound(ai->length());
    } else {
      array_bound = get_bound(ai->array());
    }
    if (in_array_bound(index_bound, ai->array()) ||
      (index_bound && array_bound && index_bound->is_smaller(array_bound) && !index_bound->lower_instr() && index_bound->lower() >= 0)) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Bounds check for instruction %d in block B%d can be fully eliminated!", ai->id(), ai->block()->block_id())
        );
        remove_range_check(ai);
    } else if (_optimistic && loop_header) {
      assert(ai->array(), "Array must not be null!");
      assert(ai->index(), "Index must not be null!");
      Instruction *array_instr = ai->array();
      if (!loop_invariant(loop_header, array_instr)) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Array %d is not loop invariant to header B%d", ai->array()->id(), loop_header->block_id())
        );
        return;
      }
      Value index_instr = ai->index();
      Value lower_instr = index_bound->lower_instr();
      if (!loop_invariant(loop_header, lower_instr)) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Lower instruction %d not loop invariant!", lower_instr->id())
        );
        return;
      }
      if (!lower_instr && index_bound->lower() < 0) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Lower bound smaller than 0 (%d)!", index_bound->lower())
        );
        return;
      }
      Value upper_instr = index_bound->upper_instr();
      if (!loop_invariant(loop_header, upper_instr)) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Upper instruction %d not loop invariant!", upper_instr->id())
        );
        return;
      }
      Value length_instr = ai->length();
      if (!loop_invariant(loop_header, length_instr)) {
        length_instr = NULL;
      }
      TRACE_RANGE_CHECK_ELIMINATION(
        tty->fill_to(block->dominator_depth()*2);
        tty->print_cr("LOOP INVARIANT access indexed %d found in block B%d!", ai->id(), ai->block()->block_id())
      );
      BlockBegin *pred_block = loop_header->dominator();
      assert(pred_block != NULL, "Every loop header has a dominator!");
      BlockEnd *pred_block_end = pred_block->end();
      Instruction *insert_position = pred_block_end->prev();
      ValueStack *state = pred_block_end->state_before();
      if (pred_block_end->as_Goto() && state == NULL) state = pred_block_end->state();
      assert(state, "State must not be null");
      TRACE_RANGE_CHECK_ELIMINATION(
        tty->fill_to(block->dominator_depth()*2);
        tty->print_cr("Inserting deopt at bci %d in block B%d!", state->bci(), insert_position->block()->block_id())
      );
      if (!is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper())) {
        TRACE_RANGE_CHECK_ELIMINATION(
          tty->fill_to(block->dominator_depth()*2);
          tty->print_cr("Could not eliminate because of static analysis!")
        );
        return;
      }
      insert_deoptimization(state, insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper(), ai);
      remove_range_check(ai);
    }
  }
}
void RangeCheckEliminator::remove_range_check(AccessIndexed *ai) {
  ai->set_flag(Instruction::NeedsRangeCheckFlag, false);
  ai->clear_length();
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->fill_to(ai->dominator_depth()*2);
    tty->print_cr("Range check for instruction %d eliminated!", ai->id());
  );
  ASSERT_RANGE_CHECK_ELIMINATION(
    Value array_length = ai->length();
    if (!array_length) {
      array_length = ai->array();
      assert(array_length->type()->as_ObjectType(), "Has to be object type!");
    }
    int cur_constant = -1;
    Value cur_value = array_length;
    if (cur_value->type()->as_IntConstant()) {
      cur_constant += cur_value->type()->as_IntConstant()->value();
      cur_value = NULL;
    }
    Bound *new_index_bound = new Bound(0, NULL, cur_constant, cur_value);
    add_assertions(new_index_bound, ai->index(), ai);
  );
}
void RangeCheckEliminator::calc_bounds(BlockBegin *block, BlockBegin *loop_header) {
  assert(!loop_header || loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Loop header has to be real !");
  TRACE_RANGE_CHECK_ELIMINATION(
    tty->fill_to(block->dominator_depth()*2);
    tty->print_cr("Block B%d", block->block_id());
  );
  IntegerStack pushed;
  BlockBegin *parent = block->dominator();
  if (parent != NULL) {
    If *cond = parent->end()->as_If();
    if (cond != NULL) {
      process_if(pushed, block, cond);
    }
  }
  InstructionList arrays;
  AccessIndexedList accessIndexed;
  Instruction *cur = block;
  while (cur) {
    if (cur->id() < this->_bounds.length()) {
      AccessIndexed *ai = cur->as_AccessIndexed();
      if (ai != NULL) {
        process_access_indexed(loop_header, block, ai);
        accessIndexed.append(ai);
        if (!arrays.contains(ai->array())) {
          arrays.append(ai->array());
        }
        Bound *b = get_bound(ai->index());
        if (!b->lower_instr()) {
          update_bound(pushed, ai->index(), Instruction::geq, NULL, 0);
        }
        if (!b->has_upper()) {
          if (ai->length() && ai->length()->type()->as_IntConstant()) {
            int value = ai->length()->type()->as_IntConstant()->value();
            update_bound(pushed, ai->index(), Instruction::lss, NULL, value);
          } else {
            Instruction *instr = ai->length();
            if (instr == NULL) instr = ai->array();
            update_bound(pushed, ai->index(), Instruction::lss, instr, 0);
          }
        }
      }
    }
    cur = cur->next();
  }
  TRACE_RANGE_CHECK_ELIMINATION(dump_condition_stack(block));
  in_block_motion(block, accessIndexed, arrays);
  for (int i=0; i<block->dominates()->length(); i++) {
    BlockBegin *next = block->dominates()->at(i);
    if (!next->is_set(BlockBegin::donot_eliminate_range_checks)) {
      if (block->is_set(BlockBegin::linear_scan_loop_header_flag) && (block->loop_index() == next->loop_index() || next->loop_depth() > block->loop_depth())) {
        calc_bounds(next, block);
      } else {
        calc_bounds(next, loop_header);
      }
    }
  }
  for (int i=0; i<pushed.length(); i++) {
    _bounds[pushed[i]]->pop();
  }
}
#ifndef PRODUCT
void RangeCheckEliminator::dump_condition_stack(BlockBegin *block) {
  for (int i=0; i<_ir->linear_scan_order()->length(); i++) {
    BlockBegin *cur_block = _ir->linear_scan_order()->at(i);
    Instruction *instr = cur_block;
    for_each_phi_fun(cur_block, phi,
                     BoundStack *bound_stack = _bounds.at(phi->id());
                     if (bound_stack && bound_stack->length() > 0) {
                       Bound *bound = bound_stack->top();
                       if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != phi || bound->upper_instr() != phi || bound->lower() != 0 || bound->upper() != 0)) {
                           TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
                                                         tty->print("i%d", phi->id());
                                                         tty->print(": ");
                                                         bound->print();
                                                         tty->cr();
                           );
                         }
                     });
    while (!instr->as_BlockEnd()) {
      if (instr->id() < _bounds.length()) {
        BoundStack *bound_stack = _bounds.at(instr->id());
        if (bound_stack && bound_stack->length() > 0) {
          Bound *bound = bound_stack->top();
          if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != instr || bound->upper_instr() != instr || bound->lower() != 0 || bound->upper() != 0)) {
              TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
                                            tty->print("i%d", instr->id());
                                            tty->print(": ");
                                            bound->print();
                                            tty->cr();
              );
          }
        }
      }
      instr = instr->next();
    }
  }
}
#endif
RangeCheckEliminator::Verification::Verification(IR *ir) : _used(BlockBegin::number_of_blocks(), false) {
  this->_ir = ir;
  ir->iterate_linear_scan_order(this);
}
void RangeCheckEliminator::Verification::block_do(BlockBegin *block) {
  If *cond = block->end()->as_If();
  if (block->number_of_sux() > 1) {
    for (int i=0; i<block->number_of_sux(); i++) {
      BlockBegin *sux = block->sux_at(i);
      BlockBegin *pred = NULL;
      for (int j=0; j<sux->number_of_preds(); j++) {
        BlockBegin *cur = sux->pred_at(j);
        assert(cur != NULL, "Predecessor must not be null");
        if (!pred) {
          pred = cur;
        }
        assert(cur == pred, "Block must not have more than one predecessor if its predecessor has more than one successor");
      }
      assert(sux->number_of_preds() >= 1, "Block must have at least one predecessor");
      assert(sux->pred_at(0) == block, "Wrong successor");
    }
  }
  BlockBegin *dominator = block->dominator();
  if (dominator) {
    assert(block != _ir->start(), "Start block must not have a dominator!");
    assert(can_reach(dominator, block), "Dominator can't reach his block !");
    assert(can_reach(_ir->start(), dominator), "Dominator is unreachable !");
    assert(!can_reach(_ir->start(), block, dominator), "Wrong dominator ! Block can be reached anyway !");
    BlockList *all_blocks = _ir->linear_scan_order();
    for (int i=0; i<all_blocks->length(); i++) {
      BlockBegin *cur = all_blocks->at(i);
      if (cur != dominator && cur != block) {
        assert(can_reach(dominator, block, cur), "There has to be another dominator!");
      }
    }
  } else {
    assert(block == _ir->start(), "Only start block must not have a dominator");
  }
  if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
    int loop_index = block->loop_index();
    BlockList *all_blocks = _ir->linear_scan_order();
    assert(block->number_of_preds() >= 1, "Block must have at least one predecessor");
    assert(!block->is_set(BlockBegin::exception_entry_flag), "Loop header must not be exception handler!");
    bool loop_through_xhandler = false;
    for (int i = 0; i < block->number_of_exception_handlers(); i++) {
      BlockBegin *xhandler = block->exception_handler_at(i);
      for (int j = 0; j < block->number_of_preds(); j++) {
        if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {
          loop_through_xhandler = true;
        }
      }
    }
    for (int i=0; i<block->number_of_sux(); i++) {
      BlockBegin *sux = block->sux_at(i);
      assert(sux->loop_depth() != block->loop_depth() || sux->loop_index() == block->loop_index() || loop_through_xhandler, "Loop index has to be same");
      assert(sux->loop_depth() == block->loop_depth() || sux->loop_index() != block->loop_index(), "Loop index has to be different");
    }
    for (int i=0; i<all_blocks->length(); i++) {
      BlockBegin *cur = all_blocks->at(i);
      if (cur->loop_index() == loop_index && cur != block) {
        assert(dominates(block->dominator(), cur), "Dominator of loop header must dominate all loop blocks");
      }
    }
  }
  Instruction *cur = block;
  while (cur) {
    assert(cur->block() == block, "Block begin has to be set correctly!");
    cur = cur->next();
  }
}
bool RangeCheckEliminator::Verification::dominates(BlockBegin *dominator, BlockBegin *block) {
  BlockBegin *cur = block->dominator();
  while (cur && cur != dominator) {
    cur = cur->dominator();
  }
  return cur == dominator;
}
bool RangeCheckEliminator::Verification::can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use /* = NULL */) {
  if (start == end) return start != dont_use;
  for (int i=0; i<_used.length(); i++) {
    _used[i] = false;
  }
  _current.truncate(0);
  _successors.truncate(0);
  if (start != dont_use) {
    _current.push(start);
    _used[start->block_id()] = true;
  }
  while (_current.length() > 0) {
    BlockBegin *cur = _current.pop();
    for (int i=0; i<cur->number_of_exception_handlers(); i++) {
      BlockBegin *xhandler = cur->exception_handler_at(i);
      _successors.push(xhandler);
      for (int j=0; j<xhandler->number_of_exception_handlers(); j++) {
        BlockBegin *sux_xhandler = xhandler->exception_handler_at(j);
        _successors.push(sux_xhandler);
      }
    }
    for (int i=0; i<cur->number_of_sux(); i++) {
      BlockBegin *sux = cur->sux_at(i);
      _successors.push(sux);
      for (int j=0; j<sux->number_of_exception_handlers(); j++) {
        BlockBegin *xhandler = sux->exception_handler_at(j);
        _successors.push(xhandler);
      }
    }
    for (int i=0; i<_successors.length(); i++) {
      BlockBegin *sux = _successors[i];
      assert(sux != NULL, "Successor must not be NULL!");
      if (sux == end) {
        return true;
      }
      if (sux != dont_use && !_used[sux->block_id()]) {
        _used[sux->block_id()] = true;
        _current.push(sux);
      }
    }
    _successors.truncate(0);
  }
  return false;
}
RangeCheckEliminator::Bound::~Bound() {
}
RangeCheckEliminator::Bound::Bound() {
  init();
  this->_lower = min_jint;
  this->_upper = max_jint;
  this->_lower_instr = NULL;
  this->_upper_instr = NULL;
}
RangeCheckEliminator::Bound::Bound(int lower, Value lower_instr, int upper, Value upper_instr) {
  init();
  assert(!lower_instr || !lower_instr->as_Constant() || !lower_instr->type()->as_IntConstant(), "Must not be constant!");
  assert(!upper_instr || !upper_instr->as_Constant() || !upper_instr->type()->as_IntConstant(), "Must not be constant!");
  this->_lower = lower;
  this->_upper = upper;
  this->_lower_instr = lower_instr;
  this->_upper_instr = upper_instr;
}
RangeCheckEliminator::Bound::Bound(Instruction::Condition cond, Value v, int constant) {
  assert(!v || (v->type() && (v->type()->as_IntType() || v->type()->as_ObjectType())), "Type must be array or integer!");
  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
  init();
  if (cond == Instruction::eql) {
    _lower = constant;
    _lower_instr = v;
    _upper = constant;
    _upper_instr = v;
  } else if (cond == Instruction::neq) {
    _lower = min_jint;
    _upper = max_jint;
    _lower_instr = NULL;
    _upper_instr = NULL;
    if (v == NULL) {
      if (constant == min_jint) {
        _lower++;
      }
      if (constant == max_jint) {
        _upper--;
      }
    }
  } else if (cond == Instruction::geq) {
    _lower = constant;
    _lower_instr = v;
    _upper = max_jint;
    _upper_instr = NULL;
  } else if (cond == Instruction::leq) {
    _lower = min_jint;
    _lower_instr = NULL;
    _upper = constant;
    _upper_instr = v;
  } else {
    ShouldNotReachHere();
  }
}
void RangeCheckEliminator::Bound::set_lower(int value, Value v) {
  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
  this->_lower = value;
  this->_lower_instr = v;
}
void RangeCheckEliminator::Bound::set_upper(int value, Value v) {
  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
  this->_upper = value;
  this->_upper_instr = v;
}
void RangeCheckEliminator::Bound::add_constant(int value) {
  this->_lower += value;
  this->_upper += value;
}
void RangeCheckEliminator::Bound::init() {
}
void RangeCheckEliminator::Bound::or_op(Bound *b) {
  if (_lower_instr != b->_lower_instr || (_lower_instr && _lower != b->_lower)) {
    _lower_instr = NULL;
    _lower = min_jint;
  } else {
    _lower = MIN2(_lower, b->_lower);
  }
  if (_upper_instr != b->_upper_instr || (_upper_instr && _upper != b->_upper)) {
    _upper_instr = NULL;
    _upper = max_jint;
  } else {
    _upper = MAX2(_upper, b->_upper);
  }
}
void RangeCheckEliminator::Bound::and_op(Bound *b) {
  if (_lower_instr == b->_lower_instr) {
    _lower = MAX2(_lower, b->_lower);
  }
  if (b->has_lower()) {
    bool set = true;
    if (_lower_instr != NULL && b->_lower_instr != NULL) {
      set = (_lower_instr->dominator_depth() > b->_lower_instr->dominator_depth());
    }
    if (set) {
      _lower = b->_lower;
      _lower_instr = b->_lower_instr;
    }
  }
  if (_upper_instr == b->_upper_instr) {
    _upper = MIN2(_upper, b->_upper);
  }
  if (b->has_upper()) {
    bool set = true;
    if (_upper_instr != NULL && b->_upper_instr != NULL) {
      set = (_upper_instr->dominator_depth() > b->_upper_instr->dominator_depth());
    }
    if (set) {
      _upper = b->_upper;
      _upper_instr = b->_upper_instr;
    }
  }
}
bool RangeCheckEliminator::Bound::has_upper() {
  return _upper_instr != NULL || _upper < max_jint;
}
bool RangeCheckEliminator::Bound::is_smaller(Bound *b) {
  if (b->_lower_instr != _upper_instr) {
    return false;
  }
  return _upper < b->_lower;
}
bool RangeCheckEliminator::Bound::has_lower() {
  return _lower_instr != NULL || _lower > min_jint;
}
bool RangeCheckEliminator::in_array_bound(Bound *bound, Value array){
  if (!bound) return false;
  assert(array != NULL, "Must not be null!");
  assert(bound != NULL, "Must not be null!");
  if (bound->lower() >=0 && bound->lower_instr() == NULL && bound->upper() < 0 && bound->upper_instr() != NULL) {
    ArrayLength *len = bound->upper_instr()->as_ArrayLength();
    if (bound->upper_instr() == array || (len != NULL && len->array() == array)) {
      return true;
    }
  }
  return false;
}
void RangeCheckEliminator::Bound::remove_lower() {
  _lower = min_jint;
  _lower_instr = NULL;
}
void RangeCheckEliminator::Bound::remove_upper() {
  _upper = max_jint;
  _upper_instr = NULL;
}
int RangeCheckEliminator::Bound::upper() {
  return _upper;
}
int RangeCheckEliminator::Bound::lower() {
  return _lower;
}
Value RangeCheckEliminator::Bound::upper_instr() {
  return _upper_instr;
}
Value RangeCheckEliminator::Bound::lower_instr() {
  return _lower_instr;
}
void RangeCheckEliminator::Bound::print() {
  tty->print("%s", "");
  if (this->_lower_instr || this->_lower != min_jint) {
    if (this->_lower_instr) {
      tty->print("i%d", this->_lower_instr->id());
      if (this->_lower > 0) {
        tty->print("+%d", _lower);
      }
      if (this->_lower < 0) {
        tty->print("%d", _lower);
      }
    } else {
      tty->print("%d", _lower);
    }
    tty->print(" <= ");
  }
  tty->print("x");
  if (this->_upper_instr || this->_upper != max_jint) {
    tty->print(" <= ");
    if (this->_upper_instr) {
      tty->print("i%d", this->_upper_instr->id());
      if (this->_upper > 0) {
        tty->print("+%d", _upper);
      }
      if (this->_upper < 0) {
        tty->print("%d", _upper);
      }
    } else {
      tty->print("%d", _upper);
    }
  }
}
RangeCheckEliminator::Bound *RangeCheckEliminator::Bound::copy() {
  Bound *b = new Bound();
  b->_lower = _lower;
  b->_lower_instr = _lower_instr;
  b->_upper = _upper;
  b->_upper_instr = _upper_instr;
  return b;
}
#ifdef ASSERT
void RangeCheckEliminator::Bound::add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond) {
  Instruction *result = position;
  Instruction *compare_with = NULL;
  ValueStack *state = position->state_before();
  if (position->as_BlockEnd() && !position->as_Goto()) {
    state = position->as_BlockEnd()->state_before();
  }
  Instruction *instruction_before = position->prev();
  if (position->as_Return() && Compilation::current()->method()->is_synchronized() && instruction_before->as_MonitorExit()) {
    instruction_before = instruction_before->prev();
  }
  result = instruction_before;
  Constant *constant = NULL;
  if (i != 0 || !instr) {
    constant = new Constant(new IntConstant(i));
    NOT_PRODUCT(constant->set_printable_bci(position->printable_bci()));
    result = result->insert_after(constant);
    compare_with = constant;
  }
  if (instr) {
    assert(instr->type()->as_ObjectType() || instr->type()->as_IntType(), "Type must be array or integer!");
    compare_with = instr;
    Instruction *op = instr;
    if (instr->type()->as_ObjectType()) {
      assert(state, "must not be null");
      ArrayLength *length = new ArrayLength(instr, state->copy());
      NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
      length->set_exception_state(length->state_before());
      result = result->insert_after(length);
      op = length;
      compare_with = length;
    }
    if (constant) {
      ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, op, false, NULL);
      NOT_PRODUCT(ao->set_printable_bci(position->printable_bci()));
      result = result->insert_after(ao);
      compare_with = ao;
    }
  }
  assert(compare_with != NULL, "You have to compare with something!");
  assert(instruction != NULL, "Instruction must not be null!");
  if (instruction->type()->as_ObjectType()) {
    Instruction *op = instruction;
    assert(state, "must not be null");
    ArrayLength *length = new ArrayLength(instruction, state->copy());
    length->set_exception_state(length->state_before());
    NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
    result = result->insert_after(length);
    instruction = length;
  }
  Assert *assert = new Assert(instruction, cond, false, compare_with);
  NOT_PRODUCT(assert->set_printable_bci(position->printable_bci()));
  result->insert_after(assert);
}
void RangeCheckEliminator::add_assertions(Bound *bound, Instruction *instruction, Instruction *position) {
  if (bound->has_lower()) {
    bound->add_assertion(instruction, position, bound->lower(), bound->lower_instr(), Instruction::geq);
  }
  if (bound->has_upper()) {
    bound->add_assertion(instruction, position, bound->upper(), bound->upper_instr(), Instruction::leq);
  }
}
#endif
C:\hotspot-69087d08d473\src\share\vm/c1/c1_RangeCheckElimination.hpp
#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#include "c1/c1_Instruction.hpp"
class RangeCheckElimination : AllStatic {
public:
  static void eliminate(IR *ir);
};
class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
private:
  int _number_of_instructions;
  bool _optimistic; // Insert predicates and deoptimize when they fail
  IR *_ir;
  define_array(BlockBeginArray, BlockBegin*)
  define_stack(BlockBeginList, BlockBeginArray)
  define_stack(IntegerStack, intArray)
  define_array(IntegerMap, IntegerStack*)
  class Verification : public _ValueObj /*VALUE_OBJ_CLASS_SPEC*/, public BlockClosure {
  private:
    IR *_ir;
    boolArray _used;
    BlockBeginList _current;
    BlockBeginList _successors;
  public:
    Verification(IR *ir);
    virtual void block_do(BlockBegin *block);
    bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
    bool dominates(BlockBegin *dominator, BlockBegin *block);
  };
public:
  class Bound : public CompilationResourceObj {
  private:
    int _upper;
    Value _upper_instr;
    int _lower;
    Value _lower_instr;
  public:
    Bound();
    Bound(Value v);
    Bound(Instruction::Condition cond, Value v, int constant = 0);
    Bound(int lower, Value lower_instr, int upper, Value upper_instr);
    ~Bound();
#ifdef ASSERT
    void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
#endif
    int upper();
    Value upper_instr();
    int lower();
    Value lower_instr();
    void print();
    bool check_no_overflow(int const_value);
    void or_op(Bound *b);
    void and_op(Bound *b);
    bool has_upper();
    bool has_lower();
    void set_upper(int upper, Value upper_instr);
    void set_lower(int lower, Value lower_instr);
    bool is_smaller(Bound *b);
    void remove_upper();
    void remove_lower();
    void add_constant(int value);
    Bound *copy();
  private:
    void init();
  };
  class Visitor : public InstructionVisitor {
  private:
    Bound *_bound;
    RangeCheckEliminator *_rce;
  public:
    void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
    Bound *bound() const { return _bound; }
    void clear_bound() { _bound = NULL; }
  protected:
    void do_Constant       (Constant*        x);
    void do_IfOp           (IfOp*            x);
    void do_LogicOp        (LogicOp*         x);
    void do_ArithmeticOp   (ArithmeticOp*    x);
    void do_Phi            (Phi*             x);
    void do_StoreField     (StoreField*      x) { /* nothing to do */ };
    void do_StoreIndexed   (StoreIndexed*    x) { /* nothing to do */ };
    void do_MonitorEnter   (MonitorEnter*    x) { /* nothing to do */ };
    void do_MonitorExit    (MonitorExit*     x) { /* nothing to do */ };
    void do_Invoke         (Invoke*          x) { /* nothing to do */ };
    void do_UnsafePutRaw   (UnsafePutRaw*    x) { /* nothing to do */ };
    void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
    void do_Intrinsic      (Intrinsic*       x) { /* nothing to do */ };
    void do_Local          (Local*           x) { /* nothing to do */ };
    void do_LoadField      (LoadField*       x) { /* nothing to do */ };
    void do_ArrayLength    (ArrayLength*     x) { /* nothing to do */ };
    void do_LoadIndexed    (LoadIndexed*     x) { /* nothing to do */ };
    void do_NegateOp       (NegateOp*        x) { /* nothing to do */ };
    void do_ShiftOp        (ShiftOp*         x) { /* nothing to do */ };
    void do_CompareOp      (CompareOp*       x) { /* nothing to do */ };
    void do_Convert        (Convert*         x) { /* nothing to do */ };
    void do_NullCheck      (NullCheck*       x) { /* nothing to do */ };
    void do_TypeCast       (TypeCast*        x) { /* nothing to do */ };
    void do_NewInstance    (NewInstance*     x) { /* nothing to do */ };
    void do_NewTypeArray   (NewTypeArray*    x) { /* nothing to do */ };
    void do_NewObjectArray (NewObjectArray*  x) { /* nothing to do */ };
    void do_NewMultiArray  (NewMultiArray*   x) { /* nothing to do */ };
    void do_CheckCast      (CheckCast*       x) { /* nothing to do */ };
    void do_InstanceOf     (InstanceOf*      x) { /* nothing to do */ };
    void do_BlockBegin     (BlockBegin*      x) { /* nothing to do */ };
    void do_Goto           (Goto*            x) { /* nothing to do */ };
    void do_If             (If*              x) { /* nothing to do */ };
    void do_IfInstanceOf   (IfInstanceOf*    x) { /* nothing to do */ };
    void do_TableSwitch    (TableSwitch*     x) { /* nothing to do */ };
    void do_LookupSwitch   (LookupSwitch*    x) { /* nothing to do */ };
    void do_Return         (Return*          x) { /* nothing to do */ };
    void do_Throw          (Throw*           x) { /* nothing to do */ };
    void do_Base           (Base*            x) { /* nothing to do */ };
    void do_OsrEntry       (OsrEntry*        x) { /* nothing to do */ };
    void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
    void do_RoundFP        (RoundFP*         x) { /* nothing to do */ };
    void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ };
    void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
    void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
#ifdef ASSERT
    void do_Assert         (Assert*          x) { /* nothing to do */ };
#endif
  };
#ifdef ASSERT
  void add_assertions(Bound *bound, Instruction *instruction, Instruction *position);
#endif
  define_array(BoundArray, Bound *)
  define_stack(BoundStack, BoundArray)
  define_array(BoundMap, BoundStack *)
  define_array(AccessIndexedArray, AccessIndexed *)
  define_stack(AccessIndexedList, AccessIndexedArray)
  define_array(InstructionArray, Instruction *)
  define_stack(InstructionList, InstructionArray)
  class AccessIndexedInfo : public CompilationResourceObj  {
  public:
    AccessIndexedList *_list;
    int _min;
    int _max;
  };
  define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
  BoundMap _bounds; // Mapping from Instruction's id to current bound
  AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
  Visitor _visitor;
public:
  RangeCheckEliminator(IR *ir);
  IR *ir() const { return _ir; }
  bool set_process_block_flags(BlockBegin *block);
  void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
  void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays);
  void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
  void update_bound(IntegerStack &pushed, Value v, Bound *bound);
  Bound *get_bound(Value v);
  bool loop_invariant(BlockBegin *loop_header, Instruction *instruction);                                    // check for loop invariance
  void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
  void remove_range_check(AccessIndexed *ai);                                                                // Mark this instructions as not needing a range check
  void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition);           // Update bound for an If
  bool in_array_bound(Bound *bound, Value array);                                                            // Check whether bound is known to fall within array
  Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
  Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
  Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
  Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
  Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1);
  void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr,      // Add predicate
                             Instruction *length_instruction, Instruction *lower_instr, int lower,
                             Instruction *upper_instr, int upper, AccessIndexed *ai);
  bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr,                      // Can we safely add a predicate?
                                Instruction *length_instr, Instruction *lower_instr,
                                int lower, Instruction *upper_instr, int upper);
  void process_if(IntegerStack &pushed, BlockBegin *block, If *cond);                                        // process If Instruction
  void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai);                // process indexed access
  void dump_condition_stack(BlockBegin *cur_block);
  static void print_statistics();
};
#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
C:\hotspot-69087d08d473\src\share\vm/c1/c1_Runtime1.cpp
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeBlob.hpp"
#include "code/compiledIC.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
#include "jfr/support/jfrIntrinsics.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/barrierSet.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  _name = name;
  _must_gc_arguments = false;
  _frame_size = no_frame_size;
  _num_rt_args = 0;
  _stub_id = stub_id;
}
void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  _name = name;
  _must_gc_arguments = must_gc_arguments;
}
void StubAssembler::set_frame_size(int size) {
  if (_frame_size == no_frame_size) {
    _frame_size = size;
  }
  assert(_frame_size == size, "can't change the frame size");
}
void StubAssembler::set_num_rt_args(int args) {
  if (_num_rt_args == 0) {
    _num_rt_args = args;
  }
  assert(_num_rt_args == args, "can't change the number of args");
}
CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
const char *Runtime1::_blob_names[] = {
  RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
};
#ifndef PRODUCT
int Runtime1::_generic_arraycopy_cnt = 0;
int Runtime1::_primitive_arraycopy_cnt = 0;
int Runtime1::_oop_arraycopy_cnt = 0;
int Runtime1::_generic_arraycopystub_cnt = 0;
int Runtime1::_arraycopy_slowcase_cnt = 0;
int Runtime1::_arraycopy_checkcast_cnt = 0;
int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
int Runtime1::_new_type_array_slowcase_cnt = 0;
int Runtime1::_new_object_array_slowcase_cnt = 0;
int Runtime1::_new_instance_slowcase_cnt = 0;
int Runtime1::_new_multi_array_slowcase_cnt = 0;
int Runtime1::_monitorenter_slowcase_cnt = 0;
int Runtime1::_monitorexit_slowcase_cnt = 0;
int Runtime1::_patch_code_slowcase_cnt = 0;
int Runtime1::_throw_range_check_exception_count = 0;
int Runtime1::_throw_index_exception_count = 0;
int Runtime1::_throw_div0_exception_count = 0;
int Runtime1::_throw_null_pointer_exception_count = 0;
int Runtime1::_throw_class_cast_exception_count = 0;
int Runtime1::_throw_incompatible_class_change_error_count = 0;
int Runtime1::_throw_array_store_exception_count = 0;
int Runtime1::_throw_count = 0;
static int _byte_arraycopy_cnt = 0;
static int _short_arraycopy_cnt = 0;
static int _int_arraycopy_cnt = 0;
static int _long_arraycopy_cnt = 0;
static int _oop_arraycopy_cnt = 0;
address Runtime1::arraycopy_count_address(BasicType type) {
  switch (type) {
  case T_BOOLEAN:
  case T_BYTE:   return (address)&_byte_arraycopy_cnt;
  case T_CHAR:
  case T_SHORT:  return (address)&_short_arraycopy_cnt;
  case T_FLOAT:
  case T_INT:    return (address)&_int_arraycopy_cnt;
  case T_DOUBLE:
  case T_LONG:   return (address)&_long_arraycopy_cnt;
  case T_ARRAY:
  case T_OBJECT: return (address)&_oop_arraycopy_cnt;
  default:
    ShouldNotReachHere();
    return NULL;
  }
}
#endif
static bool caller_is_deopted() {
  JavaThread* thread = JavaThread::current();
  RegisterMap reg_map(thread, false);
  frame runtime_frame = thread->last_frame();
  frame caller_frame = runtime_frame.sender(&reg_map);
  assert(caller_frame.is_compiled_frame(), "must be compiled");
  return caller_frame.is_deoptimized_frame();
}
static void deopt_caller() {
  if ( !caller_is_deopted()) {
    JavaThread* thread = JavaThread::current();
    RegisterMap reg_map(thread, false);
    frame runtime_frame = thread->last_frame();
    frame caller_frame = runtime_frame.sender(&reg_map);
    Deoptimization::deoptimize_frame(thread, caller_frame.id());
    assert(caller_is_deopted(), "Must be deoptimized");
  }
}
void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
  assert(0 <= id && id < number_of_ids, "illegal stub id");
  ResourceMark rm;
  CodeBuffer code(buffer_blob);
  Compilation::setup_code_buffer(&code, 0);
  StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
  OopMapSet* oop_maps;
  oop_maps = generate_code_for(id, sasm);
  assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
         "if stub has an oop map it must have a valid frame size");
#ifdef ASSERT
  switch (id) {
    case dtrace_object_alloc_id:
    case g1_pre_barrier_slow_id:
    case g1_post_barrier_slow_id:
    case slow_subtype_check_id:
    case fpu2long_stub_id:
    case unwind_exception_id:
    case counter_overflow_id:
#if defined(SPARC) || defined(PPC)
    case handle_exception_nofpu_id:  // Unused on sparc
#endif
      break;
    default:
      assert(oop_maps != NULL, "must have an oopmap");
  }
#endif
  sasm->align(BytesPerWord);
  sasm->flush();
  CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
                                                 &code,
                                                 CodeOffsets::frame_never_safe,
                                                 sasm->frame_size(),
                                                 oop_maps,
                                                 sasm->must_gc_arguments());
  assert(blob != NULL, "blob must exist");
  _blobs[id] = blob;
}
void Runtime1::initialize(BufferBlob* blob) {
  initialize_pd();
  for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
#ifndef PRODUCT
  if (PrintSimpleStubs) {
    ResourceMark rm;
    for (int id = 0; id < number_of_ids; id++) {
      _blobs[id]->print();
      if (_blobs[id]->oop_maps() != NULL) {
        _blobs[id]->oop_maps()->print();
      }
    }
  }
#endif
}
CodeBlob* Runtime1::blob_for(StubID id) {
  assert(0 <= id && id < number_of_ids, "illegal stub id");
  return _blobs[id];
}
const char* Runtime1::name_for(StubID id) {
  assert(0 <= id && id < number_of_ids, "illegal stub id");
  return _blob_names[id];
}
const char* Runtime1::name_for_address(address entry) {
  for (int id = 0; id < number_of_ids; id++) {
    if (entry == entry_for((StubID)id)) return name_for((StubID)id);
  }
#define FUNCTION_CASE(a, f) \
  if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
  FUNCTION_CASE(entry, os::javaTimeMillis);
  FUNCTION_CASE(entry, os::javaTimeNanos);
  FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
  FUNCTION_CASE(entry, SharedRuntime::d2f);
  FUNCTION_CASE(entry, SharedRuntime::d2i);
  FUNCTION_CASE(entry, SharedRuntime::d2l);
  FUNCTION_CASE(entry, SharedRuntime::dcos);
  FUNCTION_CASE(entry, SharedRuntime::dexp);
  FUNCTION_CASE(entry, SharedRuntime::dlog);
  FUNCTION_CASE(entry, SharedRuntime::dlog10);
  FUNCTION_CASE(entry, SharedRuntime::dpow);
  FUNCTION_CASE(entry, SharedRuntime::drem);
  FUNCTION_CASE(entry, SharedRuntime::dsin);
  FUNCTION_CASE(entry, SharedRuntime::dtan);
  FUNCTION_CASE(entry, SharedRuntime::f2i);
  FUNCTION_CASE(entry, SharedRuntime::f2l);
  FUNCTION_CASE(entry, SharedRuntime::frem);
  FUNCTION_CASE(entry, SharedRuntime::l2d);
  FUNCTION_CASE(entry, SharedRuntime::l2f);
  FUNCTION_CASE(entry, SharedRuntime::ldiv);
  FUNCTION_CASE(entry, SharedRuntime::lmul);
  FUNCTION_CASE(entry, SharedRuntime::lrem);
  FUNCTION_CASE(entry, SharedRuntime::lrem);
  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
  FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
  FUNCTION_CASE(entry, is_instance_of);
  FUNCTION_CASE(entry, trace_block_entry);
#ifdef JFR_HAVE_INTRINSICS
  FUNCTION_CASE(entry, JFR_TIME_FUNCTION);
#endif
  FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
#undef FUNCTION_CASE
  return pd_name_for_address(entry);
}
JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
  NOT_PRODUCT(_new_instance_slowcase_cnt++;)
  assert(klass->is_klass(), "not a class");
  Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
  instanceKlassHandle h(thread, klass);
  h->check_valid_for_instantiation(true, CHECK);
  h->initialize(CHECK);
  oop obj = h->allocate_instance(CHECK);
  thread->set_vm_result(obj);
JRT_END
JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
  NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
  assert(klass->is_klass(), "not a class");
  BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
  oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
  thread->set_vm_result(obj);
  if (DeoptimizeALot) {
    deopt_caller();
  }
JRT_END
JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
  NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
  assert(array_klass->is_klass(), "not a class");
  Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
  Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
  objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
  thread->set_vm_result(obj);
  if (DeoptimizeALot) {
    deopt_caller();
  }
JRT_END
JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
  NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
  assert(klass->is_klass(), "not a class");
  assert(rank >= 1, "rank must be nonzero");
  Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
  oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
  thread->set_vm_result(obj);
JRT_END
JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
  tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
JRT_END
JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj))
  ResourceMark rm(thread);
  const char* klass_name = obj->klass()->external_name();
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);
JRT_END
static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {
  nmethod* osr_nm = NULL;
  methodHandle method(THREAD, m);
  RegisterMap map(THREAD, false);
  frame fr =  THREAD->last_frame().sender(&map);
  nmethod* nm = (nmethod*) fr.cb();
  assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
  methodHandle enclosing_method(THREAD, nm->method());
  CompLevel level = (CompLevel)nm->comp_level();
  int bci = InvocationEntryBci;
  if (branch_bci != InvocationEntryBci) {
    address pc = method()->code_base() + branch_bci;
    Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
    int offset = 0;
    switch (branch) {
      case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
      case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
      case Bytecodes::_if_icmple: case Bytecodes::_ifle:
      case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
      case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
      case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
      case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
        offset = (int16_t)Bytes::get_Java_u2(pc + 1);
        break;
      case Bytecodes::_goto_w:
        offset = Bytes::get_Java_u4(pc + 1);
        break;
      default: ;
    }
    bci = branch_bci + offset;
  }
  assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
  assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
  return osr_nm;
}
JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))
  nmethod* osr_nm;
  JRT_BLOCK
    osr_nm = counter_overflow_helper(thread, bci, method);
    if (osr_nm != NULL) {
      RegisterMap map(thread, false);
      frame fr =  thread->last_frame().sender(&map);
      Deoptimization::deoptimize_frame(thread, fr.id());
    }
  JRT_BLOCK_END
  return NULL;
JRT_END
extern void vm_exit(int code);
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
  thread->set_is_method_handle_return(false);
  Handle exception(thread, ex);
  nm = CodeCache::find_nmethod(pc);
  assert(nm != NULL, "this is not an nmethod");
  if (nm->is_deopt_pc(pc)) {
    RegisterMap map(thread, false);
    frame exception_frame = thread->last_frame().sender(&map);
    assert(exception_frame.is_deoptimized_frame(), "must be deopted");
    pc = exception_frame.pc();
  }
#ifdef ASSERT
  assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
  assert(exception->is_oop(), "just checking");
  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
    if (ExitVMOnVerifyError) vm_exit(-1);
    ShouldNotReachHere();
  }
#endif
  bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
  if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
  if (JvmtiExport::can_post_on_exceptions()) {
    RegisterMap reg_map(thread);
    frame stub_frame = thread->last_frame();
    frame caller_frame = stub_frame.sender(&reg_map);
    Deoptimization::deoptimize_frame(thread, caller_frame.id());
    assert(caller_is_deopted(), "Must be deoptimized");
    return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
  }
  if (guard_pages_enabled) {
    address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
    if (fast_continuation != NULL) {
      thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
      return fast_continuation;
    }
  }
  address continuation = NULL;
  if (guard_pages_enabled) {
    if (TraceExceptions) {
      ttyLocker ttyl;
      ResourceMark rm;
      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ") thrown in compiled method <%s> at PC " INTPTR_FORMAT " for thread " INTPTR_FORMAT "",
                    exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
    }
    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
    thread->clear_exception_oop_and_pc();
    bool recursive_exception = false;
    continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
    thread->set_exception_oop(exception());
    thread->set_exception_pc(pc);
    if (continuation != NULL && !recursive_exception) {
      nm->add_handler_for_exception_and_pc(exception, pc, continuation);
    }
  }
  thread->set_vm_result(exception());
  thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
  if (TraceExceptions) {
    ttyLocker ttyl;
    ResourceMark rm;
    tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
                  p2i(thread), p2i(continuation), p2i(pc));
  }
  return continuation;
JRT_END
address Runtime1::exception_handler_for_pc(JavaThread* thread) {
  oop exception = thread->exception_oop();
  address pc = thread->exception_pc();
  DEBUG_ONLY(ResetNoHandleMark rnhm);
  nmethod* nm = NULL;
  address continuation = NULL;
  {
    ResetNoHandleMark rnhm;
    continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
  }
  if (nm != NULL && caller_is_deopted()) {
    continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
  }
  assert(continuation != NULL, "no handler found");
  return continuation;
}
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
  NOT_PRODUCT(_throw_range_check_exception_count++;)
  char message[jintAsStringSize];
  sprintf(message, "%d", index);
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
JRT_END
JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
  NOT_PRODUCT(_throw_index_exception_count++;)
  char message[16];
  sprintf(message, "%d", index);
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
JRT_END
JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread))
  NOT_PRODUCT(_throw_div0_exception_count++;)
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
JRT_END
JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread))
  NOT_PRODUCT(_throw_null_pointer_exception_count++;)
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
JRT_END
JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object))
  NOT_PRODUCT(_throw_class_cast_exception_count++;)
  ResourceMark rm(thread);
  char* message = SharedRuntime::generate_class_cast_message(
    thread, object->klass()->external_name());
  SharedRuntime::throw_and_post_jvmti_exception(
    thread, vmSymbols::java_lang_ClassCastException(), message);
JRT_END
JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
  NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
  ResourceMark rm(thread);
  SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
JRT_END
JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
  NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
  if (PrintBiasedLockingStatistics) {
    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
  }
  Handle h_obj(thread, obj);
  assert(h_obj()->is_oop(), "must be NULL or an object");
  if (UseBiasedLocking) {
    ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
  } else {
    if (UseFastLocking) {
      assert(obj == lock->obj(), "must match");
      ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
    } else {
      lock->set_obj(obj);
      ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
    }
  }
JRT_END
JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
  NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
  assert(thread == JavaThread::current(), "threads must correspond");
  assert(thread->last_Java_sp(), "last_Java_sp must be set");
  EXCEPTION_MARK;
  oop obj = lock->obj();
  assert(obj->is_oop(), "must be NULL or an object");
  if (UseFastLocking) {
    ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
  } else {
    ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
  }
JRT_END
JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
  RegisterMap reg_map(thread, false);
  frame stub_frame = thread->last_frame();
  assert(stub_frame.is_runtime_frame(), "sanity check");
  frame caller_frame = stub_frame.sender(&reg_map);
  assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");
  Deoptimization::deoptimize_frame(thread, caller_frame.id());
JRT_END
static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
  Bytecode_field field_access(caller, bci);
  Bytecodes::Code code       = field_access.code();
  fieldDescriptor result; // initialize class if needed
  constantPoolHandle constants(THREAD, caller->constants());
  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
  return result.field_holder();
}
JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
  NOT_PRODUCT(_patch_code_slowcase_cnt++;)
#ifdef AARCH64
  ShouldNotReachHere();
#endif
  ResourceMark rm(thread);
  RegisterMap reg_map(thread, false);
  frame runtime_frame = thread->last_frame();
  frame caller_frame = runtime_frame.sender(&reg_map);
  vframeStream vfst(thread, true);
  assert(!vfst.at_end(), "Java frame must exist");
  methodHandle caller_method(THREAD, vfst.method());
  int bci = vfst.bci();
  Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT
  BasicType patch_field_type = T_ILLEGAL;
#endif // PRODUCT
  bool deoptimize_for_volatile = false;
  int patch_field_offset = -1;
  KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
  KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
  Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
  Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
  bool load_klass_or_mirror_patch_id =
    (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
  if (stub_id == Runtime1::access_field_patching_id) {
    Bytecode_field field_access(caller_method, bci);
    fieldDescriptor result; // initialize class if needed
    Bytecodes::Code code = field_access.code();
    constantPoolHandle constants(THREAD, caller_method->constants());
    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
    patch_field_offset = result.offset();
    deoptimize_for_volatile = result.access_flags().is_volatile();
#ifndef PRODUCT
    patch_field_type = result.field_type();
#endif
  } else if (load_klass_or_mirror_patch_id) {
    Klass* k = NULL;
    switch (code) {
      case Bytecodes::_putstatic:
      case Bytecodes::_getstatic:
        { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
          init_klass = KlassHandle(THREAD, klass);
          mirror = Handle(THREAD, klass->java_mirror());
        }
        break;
      case Bytecodes::_new:
        { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
          k = caller_method->constants()->klass_at(bnew.index(), CHECK);
        }
        break;
      case Bytecodes::_multianewarray:
        { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
          k = caller_method->constants()->klass_at(mna.index(), CHECK);
        }
        break;
      case Bytecodes::_instanceof:
        { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
          k = caller_method->constants()->klass_at(io.index(), CHECK);
        }
        break;
      case Bytecodes::_checkcast:
        { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
          k = caller_method->constants()->klass_at(cc.index(), CHECK);
        }
        break;
      case Bytecodes::_anewarray:
        { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
          Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
          k = ek->array_klass(CHECK);
        }
        break;
      case Bytecodes::_ldc:
      case Bytecodes::_ldc_w:
        {
          Bytecode_loadconstant cc(caller_method, bci);
          oop m = cc.resolve_constant(CHECK);
          mirror = Handle(THREAD, m);
        }
        break;
      default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
    }
    load_klass = KlassHandle(THREAD, k);
  } else if (stub_id == load_appendix_patching_id) {
    Bytecode_invoke bytecode(caller_method, bci);
    Bytecodes::Code bc = bytecode.invoke_code();
    CallInfo info;
    constantPoolHandle pool(thread, caller_method->constants());
    int index = bytecode.index();
    LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
    appendix = info.resolved_appendix();
    switch (bc) {
      case Bytecodes::_invokehandle: {
        int cache_index = ConstantPool::decode_cpcache_index(index, true);
        assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
        pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
        break;
      }
      case Bytecodes::_invokedynamic: {
        pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
        break;
      }
      default: fatal("unexpected bytecode for load_appendix_patching_id");
    }
  } else {
    ShouldNotReachHere();
  }
  if (deoptimize_for_volatile) {
    if (TracePatching) {
      tty->print_cr("Deoptimizing for patching volatile field reference");
    }
    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
    if (nm != NULL) {
      nm->make_not_entrant();
    }
    Deoptimization::deoptimize_frame(thread, caller_frame.id());
  }
  {
    MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
    if (!caller_is_deopted()) {
      NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
      address instr_pc = jump->jump_destination();
      NativeInstruction* ni = nativeInstruction_at(instr_pc);
      if (ni->is_jump() ) {
        address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
        unsigned char* byte_count = (unsigned char*) (stub_location - 1);
        unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
        unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
        address copy_buff = stub_location - *byte_skip - *byte_count;
        address being_initialized_entry = stub_location - *being_initialized_entry_offset;
        if (TracePatching) {
          ttyLocker ttyl;
          tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
                        p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
          nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
          assert(caller_code != NULL, "nmethod not found");
          OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
          assert(map != NULL, "null check");
          map->print();
          tty->cr();
          Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
        }
        bool do_patch = true;
        if (stub_id == Runtime1::access_field_patching_id) {
          NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
          assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
          assert(patch_field_offset >= 0, "illegal offset");
          n_move->add_offset_in_bytes(patch_field_offset);
        } else if (load_klass_or_mirror_patch_id) {
          do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
                     InstanceKlass::cast(init_klass())->is_initialized();
          NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
          if (jump->jump_destination() == being_initialized_entry) {
            assert(do_patch == true, "initialization must be complete at this point");
          } else {
            NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
            assert(n_copy->data() == 0 ||
                   n_copy->data() == (intptr_t)Universe::non_oop_word(),
                   "illegal init value");
            if (stub_id == Runtime1::load_klass_patching_id) {
              assert(load_klass() != NULL, "klass not set");
              n_copy->set_data((intx) (load_klass()));
            } else {
              assert(mirror() != NULL, "klass not set");
              n_copy->set_data(cast_from_oop<intx>(mirror()));
            }
            if (TracePatching) {
              Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
            }
          }
        } else if (stub_id == Runtime1::load_appendix_patching_id) {
          NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
          assert(n_copy->data() == 0 ||
                 n_copy->data() == (intptr_t)Universe::non_oop_word(),
                 "illegal init value");
          n_copy->set_data(cast_from_oop<intx>(appendix()));
          if (TracePatching) {
            Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
          }
        } else {
          ShouldNotReachHere();
        }
#if defined(SPARC) || defined(PPC)
        if (load_klass_or_mirror_patch_id ||
            stub_id == Runtime1::load_appendix_patching_id) {
          nmethod* nm = CodeCache::find_nmethod(instr_pc);
          assert(nm != NULL, "invalid nmethod_pc");
          RelocIterator mds(nm, copy_buff, copy_buff + 1);
          bool found = false;
          while (mds.next() && !found) {
            if (mds.type() == relocInfo::oop_type) {
              assert(stub_id == Runtime1::load_mirror_patching_id ||
                     stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
              oop_Relocation* r = mds.oop_reloc();
              oop* oop_adr = r->oop_addr();
              r->fix_oop_relocation();
              found = true;
            } else if (mds.type() == relocInfo::metadata_type) {
              assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
              metadata_Relocation* r = mds.metadata_reloc();
              Metadata** metadata_adr = r->metadata_addr();
              r->fix_metadata_relocation();
              found = true;
            }
          }
          assert(found, "the metadata must exist!");
        }
#endif
        if (do_patch) {
#ifdef ARM
          if((load_klass_or_mirror_patch_id ||
              stub_id == Runtime1::load_appendix_patching_id) &&
              nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
            nmethod* nm = CodeCache::find_nmethod(instr_pc);
            address addr = NULL;
            assert(nm != NULL, "invalid nmethod_pc");
            RelocIterator mds(nm, copy_buff, copy_buff + 1);
            while (mds.next()) {
              if (mds.type() == relocInfo::oop_type) {
                assert(stub_id == Runtime1::load_mirror_patching_id ||
                       stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
                oop_Relocation* r = mds.oop_reloc();
                addr = (address)r->oop_addr();
                break;
              } else if (mds.type() == relocInfo::metadata_type) {
                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
                metadata_Relocation* r = mds.metadata_reloc();
                addr = (address)r->metadata_addr();
                break;
              }
            }
            assert(addr != NULL, "metadata relocation must exist");
            copy_buff -= *byte_count;
            NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
            n_copy2->set_pc_relative_offset(addr, instr_pc);
          }
#endif
          for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
            address ptr = copy_buff + i;
            int a_byte = (*ptr) & 0xFF;
            address dst = instr_pc + i;
          }
          ICache::invalidate_range(instr_pc, *byte_count);
          NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
          if (load_klass_or_mirror_patch_id ||
              stub_id == Runtime1::load_appendix_patching_id) {
            relocInfo::relocType rtype =
              (stub_id == Runtime1::load_klass_patching_id) ?
                                   relocInfo::metadata_type :
                                   relocInfo::oop_type;
            nmethod* nm = CodeCache::find_nmethod(instr_pc);
            assert(nm != NULL, "invalid nmethod_pc");
            RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
            relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
                                                     relocInfo::none, rtype);
#ifdef SPARC
            address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
            RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
            relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
                                                     relocInfo::none, rtype);
#endif
#ifdef PPC
          { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
            RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
            relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
                                                     relocInfo::none, rtype);
          }
#endif
          }
        } else {
          ICache::invalidate_range(copy_buff, *byte_count);
          NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
        }
      }
    }
  }
  if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
                              (appendix.not_null() && appendix->is_scavengable()))) {
    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
    if (!nm->on_scavenge_root_list()) {
      CodeCache::add_scavenge_root_nmethod(nm);
    }
    Universe::heap()->register_nmethod(nm);
  }
JRT_END
#ifndef TARGET_ARCH_aarch64
int Runtime1::move_klass_patching(JavaThread* thread) {
  Thread* THREAD = thread;
  debug_only(NoHandleMark nhm;)
  {
    ResetNoHandleMark rnhm;
    patch_code(thread, load_klass_patching_id);
  }
  return caller_is_deopted();
}
int Runtime1::move_mirror_patching(JavaThread* thread) {
  Thread* THREAD = thread;
  debug_only(NoHandleMark nhm;)
  {
    ResetNoHandleMark rnhm;
    patch_code(thread, load_mirror_patching_id);
  }
  return caller_is_deopted();
}
int Runtime1::move_appendix_patching(JavaThread* thread) {
  Thread* THREAD = thread;
  debug_only(NoHandleMark nhm;)
  {
    ResetNoHandleMark rnhm;
    patch_code(thread, load_appendix_patching_id);
  }
  return caller_is_deopted();
}
int Runtime1::access_field_patching(JavaThread* thread) {
  Thread* THREAD = thread;
  debug_only(NoHandleMark nhm;)
  {
    ResetNoHandleMark rnhm;
    patch_code(thread, access_field_patching_id);
  }
  return caller_is_deopted();
JRT_END
#endif
JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
  tty->print("%d ", block_id);
JRT_END
enum {
  ac_failed = -1, // arraycopy failed
  ac_ok = 0       // arraycopy succeeded
};
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
                                          oopDesc* dst, T* dst_addr,
                                          int length) {
  BarrierSet* bs = Universe::heap()->barrier_set();
  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
  if (src == dst) {
    bs->write_ref_array_pre(dst_addr, length);
    Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
    bs->write_ref_array((HeapWord*)dst_addr, length);
    return ac_ok;
  } else {
    Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
    Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
    if (stype == bound || stype->is_subtype_of(bound)) {
      bs->write_ref_array_pre(dst_addr, length);
      Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
      bs->write_ref_array((HeapWord*)dst_addr, length);
      return ac_ok;
    }
  }
  return ac_failed;
}
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
#ifndef PRODUCT
  _generic_arraycopy_cnt++;        // Slow-path oop array copy
#endif
  if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
  if (!dst->is_array() || !src->is_array()) return ac_failed;
  if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
  if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
  if (length == 0) return ac_ok;
  if (src->is_typeArray()) {
    Klass* klass_oop = src->klass();
    if (klass_oop != dst->klass()) return ac_failed;
    TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
    const int l2es = klass->log2_element_size();
    const int ihs = klass->array_header_in_bytes() / wordSize;
    char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
    char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
    memmove(dst_addr, src_addr, length << l2es);
    return ac_ok;
  } else if (src->is_objArray() && dst->is_objArray()) {
    if (UseCompressedOops) {
      narrowOop *src_addr  = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
      narrowOop *dst_addr  = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
      return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
    } else {
      oop *src_addr  = objArrayOop(src)->obj_at_addr<oop>(src_pos);
      oop *dst_addr  = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
      return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
    }
  }
  return ac_failed;
JRT_END
JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
#ifndef PRODUCT
  _primitive_arraycopy_cnt++;
#endif
  if (length == 0) return;
  Copy::conjoint_jbytes(src, dst, length);
JRT_END
JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
#ifndef PRODUCT
  _oop_arraycopy_cnt++;
#endif
  if (num == 0) return;
  BarrierSet* bs = Universe::heap()->barrier_set();
  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
  if (UseCompressedOops) {
    bs->write_ref_array_pre((narrowOop*)dst, num);
    Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
  } else {
    bs->write_ref_array_pre((oop*)dst, num);
    Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
  }
  bs->write_ref_array(dst, num);
JRT_END
JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
  assert(mirror != NULL, "should null-check on mirror before calling");
  Klass* k = java_lang_Class::as_Klass(mirror);
  return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
JRT_END
JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
  ResourceMark rm;
  assert(!TieredCompilation, "incompatible with tiered compilation");
  RegisterMap reg_map(thread, false);
  frame runtime_frame = thread->last_frame();
  frame caller_frame = runtime_frame.sender(&reg_map);
  nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
  assert (nm != NULL, "no more nmethod?");
  nm->make_not_entrant();
  methodHandle m(nm->method());
  MethodData* mdo = m->method_data();
  if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
    Method::build_interpreter_method_data(m, THREAD);
    if (HAS_PENDING_EXCEPTION) {
      assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
      CLEAR_PENDING_EXCEPTION;
    }
    mdo = m->method_data();
  }
  if (mdo != NULL) {
    mdo->inc_trap_count(Deoptimization::Reason_none);
  }
  if (TracePredicateFailedTraps) {
    stringStream ss1, ss2;
    vframeStream vfst(thread);
    methodHandle inlinee = methodHandle(vfst.method());
    inlinee->print_short_name(&ss1);
    m->print_short_name(&ss2);
    tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
  }
  Deoptimization::deoptimize_frame(thread, caller_frame.id());
JRT_END
#ifndef PRODUCT
void Runtime1::print_statistics() {
  tty->print_cr("C1 Runtime statistics:");
  tty->print_cr(" _resolve_invoke_virtual_cnt:     %d", SharedRuntime::_resolve_virtual_ctr);
  tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
  tty->print_cr(" _resolve_invoke_static_cnt:      %d", SharedRuntime::_resolve_static_ctr);
  tty->print_cr(" _handle_wrong_method_cnt:        %d", SharedRuntime::_wrong_method_ctr);
  tty->print_cr(" _ic_miss_cnt:                    %d", SharedRuntime::_ic_miss_ctr);
  tty->print_cr(" _generic_arraycopy_cnt:          %d", _generic_arraycopy_cnt);
  tty->print_cr(" _generic_arraycopystub_cnt:      %d", _generic_arraycopystub_cnt);
  tty->print_cr(" _byte_arraycopy_cnt:             %d", _byte_arraycopy_cnt);
  tty->print_cr(" _short_arraycopy_cnt:            %d", _short_arraycopy_cnt);
  tty->print_cr(" _int_arraycopy_cnt:              %d", _int_arraycopy_cnt);
  tty->print_cr(" _long_arraycopy_cnt:             %d", _long_arraycopy_cnt);
  tty->print_cr(" _primitive_arraycopy_cnt:        %d", _primitive_arraycopy_cnt);
  tty->print_cr(" _oop_arraycopy_cnt (C):          %d", Runtime1::_oop_arraycopy_cnt);
  tty->print_cr(" _oop_arraycopy_cnt (stub):       %d", _oop_arraycopy_cnt);
  tty->print_cr(" _arraycopy_slowcase_cnt:         %d", _arraycopy_slowcase_cnt);
  tty->print_cr(" _arraycopy_checkcast_cnt:        %d", _arraycopy_checkcast_cnt);
  tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
  tty->print_cr(" _new_type_array_slowcase_cnt:    %d", _new_type_array_slowcase_cnt);
  tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
  tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
  tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
  tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
  tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
  tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
  tty->print_cr(" _throw_range_check_exception_count:            %d:", _throw_range_check_exception_count);
  tty->print_cr(" _throw_index_exception_count:                  %d:", _throw_index_exception_count);
  tty->print_cr(" _throw_div0_exception_count:                   %d:", _throw_div0_exception_count);
  tty->print_cr(" _throw_null_pointer_exception_count:           %d:", _throw_null_pointer_exception_count);
  tty->print_cr(" _throw_class_cast_exception_count:             %d:", _throw_class_cast_exception_count);
  tty->print_cr(" _throw_incompatible_class_change_error_count:  %d:", _throw_incompatible_class_change_error_count);
  tty->print_cr(" _throw_array_store_exception_count:            %d:", _throw_array_store_exception_count);
  tty->print_cr(" _throw_count:                                  %d:", _throw_count);
  SharedRuntime::print_ic_miss_histogram();
  tty->cr();
}
#endif // PRODUCT
C:\hotspot-69087d08d473\src\share\vm/c1/c1_Runtime1.hpp
#ifndef SHARE_VM_C1_C1_RUNTIME1_HPP
#define SHARE_VM_C1_C1_RUNTIME1_HPP
#include "c1/c1_FrameMap.hpp"
#include "code/stubs.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.hpp"
#include "runtime/deoptimization.hpp"
class StubAssembler;
#define RUNTIME1_STUBS(stub, last_entry) \
  stub(dtrace_object_alloc)          \
  stub(unwind_exception)             \
  stub(forward_exception)            \
  stub(throw_range_check_failed)       /* throws ArrayIndexOutOfBoundsException */ \
  stub(throw_index_exception)          /* throws IndexOutOfBoundsException */ \
  stub(throw_div0_exception)         \
  stub(throw_null_pointer_exception) \
  stub(register_finalizer)           \
  stub(new_instance)                 \
  stub(fast_new_instance)            \
  stub(fast_new_instance_init_check) \
  stub(new_type_array)               \
  stub(new_object_array)             \
  stub(new_multi_array)              \
  stub(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
  stub(handle_exception)             \
  stub(handle_exception_from_callee) \
  stub(throw_array_store_exception)  \
  stub(throw_class_cast_exception)   \
  stub(throw_incompatible_class_change_error)   \
  stub(slow_subtype_check)           \
  stub(monitorenter)                 \
  stub(monitorenter_nofpu)             /* optimized version that does not preserve fpu registers */ \
  stub(monitorexit)                  \
  stub(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
  stub(deoptimize)                   \
  stub(access_field_patching)        \
  stub(load_klass_patching)          \
  stub(load_mirror_patching)         \
  stub(load_appendix_patching)       \
  stub(g1_pre_barrier_slow)          \
  stub(g1_post_barrier_slow)         \
  stub(fpu2long_stub)                \
  stub(counter_overflow)             \
  stub(predicate_failed_trap)        \
  last_entry(number_of_ids)
#define DECLARE_STUB_ID(x)       x ## _id ,
#define DECLARE_LAST_STUB_ID(x)  x
#define STUB_NAME(x)             #x " Runtime1 stub",
#define LAST_STUB_NAME(x)        #x " Runtime1 stub"
class Runtime1: public AllStatic {
  friend class VMStructs;
  friend class ArrayCopyStub;
 public:
  enum StubID {
    RUNTIME1_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID)
  };
#ifndef PRODUCT
  static int _resolve_invoke_cnt;
  static int _handle_wrong_method_cnt;
  static int _ic_miss_cnt;
  static int _generic_arraycopy_cnt;
  static int _primitive_arraycopy_cnt;
  static int _oop_arraycopy_cnt;
  static int _generic_arraycopystub_cnt;
  static int _arraycopy_slowcase_cnt;
  static int _arraycopy_checkcast_cnt;
  static int _arraycopy_checkcast_attempt_cnt;
  static int _new_type_array_slowcase_cnt;
  static int _new_object_array_slowcase_cnt;
  static int _new_instance_slowcase_cnt;
  static int _new_multi_array_slowcase_cnt;
  static int _monitorenter_slowcase_cnt;
  static int _monitorexit_slowcase_cnt;
  static int _patch_code_slowcase_cnt;
  static int _throw_range_check_exception_count;
  static int _throw_index_exception_count;
  static int _throw_div0_exception_count;
  static int _throw_null_pointer_exception_count;
  static int _throw_class_cast_exception_count;
  static int _throw_incompatible_class_change_error_count;
  static int _throw_array_store_exception_count;
  static int _throw_count;
#endif
 private:
  static CodeBlob* _blobs[number_of_ids];
  static const char* _blob_names[];
  static void       generate_blob_for(BufferBlob* blob, StubID id);
  static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm);
  static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
  static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm);
  static void       generate_unwind_exception(StubAssembler *sasm);
  static OopMapSet* generate_patching(StubAssembler* sasm, address target);
  static OopMapSet* generate_stub_call(StubAssembler* sasm, Register result, address entry,
                                       Register arg1 = noreg, Register arg2 = noreg, Register arg3 = noreg);
  static void new_instance    (JavaThread* thread, Klass* klass);
  static void new_type_array  (JavaThread* thread, Klass* klass, jint length);
  static void new_object_array(JavaThread* thread, Klass* klass, jint length);
  static void new_multi_array (JavaThread* thread, Klass* klass, int rank, jint* dims);
  static address counter_overflow(JavaThread* thread, int bci, Method* method);
  static void unimplemented_entry   (JavaThread* thread, StubID id);
  static address exception_handler_for_pc(JavaThread* thread);
  static void throw_range_check_exception(JavaThread* thread, int index);
  static void throw_index_exception(JavaThread* thread, int index);
  static void throw_div0_exception(JavaThread* thread);
  static void throw_null_pointer_exception(JavaThread* thread);
  static void throw_class_cast_exception(JavaThread* thread, oopDesc* object);
  static void throw_incompatible_class_change_error(JavaThread* thread);
  static void throw_array_store_exception(JavaThread* thread, oopDesc* object);
  static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock);
  static void monitorexit (JavaThread* thread, BasicObjectLock* lock);
  static void deoptimize(JavaThread* thread);
  static int access_field_patching(JavaThread* thread);
  static int move_klass_patching(JavaThread* thread);
  static int move_mirror_patching(JavaThread* thread);
  static int move_appendix_patching(JavaThread* thread);
  static void patch_code(JavaThread* thread, StubID stub_id);
#ifdef TARGET_ARCH_aarch64
  static void patch_code_aarch64(JavaThread* thread, StubID stub_id);
#endif
 public:
  static void initialize(BufferBlob* blob);
  static void initialize_pd();
  static CodeBlob* blob_for (StubID id);
  static address   entry_for(StubID id)          { return blob_for(id)->code_begin(); }
  static const char* name_for (StubID id);
  static const char* name_for_address(address entry);
  static const char* pd_name_for_address(address entry);
  static void trace_block_entry(jint block_id);
#ifndef PRODUCT
  static address throw_count_address()               { return (address)&_throw_count;             }
  static address arraycopy_count_address(BasicType type);
#endif
  static int  arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length);
  static void primitive_arraycopy(HeapWord* src, HeapWord* dst, int length);
  static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
  static int  is_instance_of(oopDesc* mirror, oopDesc* obj);
  static void predicate_failed_trap(JavaThread* thread);
  static void print_statistics()                 PRODUCT_RETURN;
};
#endif // SHARE_VM_C1_C1_RUNTIME1_HPP
C:\hotspot-69087d08d473\src\share\vm/c1/c1_ValueMap.cpp
#include "precompiled.hpp"
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_ValueMap.hpp"
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
#ifndef PRODUCT
  int ValueMap::_number_of_finds = 0;
  int ValueMap::_number_of_hits = 0;
  int ValueMap::_number_of_kills = 0;
  #define TRACE_VALUE_NUMBERING(code) if (PrintValueNumbering) { code; }
#else
  #define TRACE_VALUE_NUMBERING(code)
#endif
ValueMap::ValueMap()
  : _nesting(0)
  , _entries(ValueMapInitialSize, NULL)
  , _killed_values()
  , _entry_count(0)
{
  NOT_PRODUCT(reset_statistics());
}
ValueMap::ValueMap(ValueMap* old)
  : _nesting(old->_nesting + 1)
  , _entries(old->_entries.length())
  , _killed_values()
  , _entry_count(old->_entry_count)
{
  for (int i = size() - 1; i >= 0; i--) {
    _entries.at_put(i, old->entry_at(i));
  }
  _killed_values.set_from(&old->_killed_values);
}
void ValueMap::increase_table_size() {
  int old_size = size();
  int new_size = old_size * 2 + 1;
  ValueMapEntryList worklist(8);
  ValueMapEntryArray new_entries(new_size, NULL);
  int new_entry_count = 0;
  TRACE_VALUE_NUMBERING(tty->print_cr("increasing table size from %d to %d", old_size, new_size));
  for (int i = old_size - 1; i >= 0; i--) {
    ValueMapEntry* entry;
    for (entry = entry_at(i); entry != NULL; entry = entry->next()) {
      if (!is_killed(entry->value())) {
        worklist.push(entry);
      }
    }
    while (!worklist.is_empty()) {
      entry = worklist.pop();
      int new_index = entry_index(entry->hash(), new_size);
      if (entry->nesting() != nesting() && new_entries.at(new_index) != entry->next()) {
        entry = new ValueMapEntry(entry->hash(), entry->value(), entry->nesting(), NULL);
      }
      entry->set_next(new_entries.at(new_index));
      new_entries.at_put(new_index, entry);
      new_entry_count++;
    }
  }
  _entries = new_entries;
  _entry_count = new_entry_count;
}
Value ValueMap::find_insert(Value x) {
  const intx hash = x->hash();
  if (hash != 0) {
    NOT_PRODUCT(_number_of_finds++);
    for (ValueMapEntry* entry = entry_at(entry_index(hash, size())); entry != NULL; entry = entry->next()) {
      if (entry->hash() == hash) {
        Value f = entry->value();
        if (!is_killed(f) && f->is_equal(x)) {
          NOT_PRODUCT(_number_of_hits++);
          TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: %s %c%d equal to %c%d  (size %d, entries %d, nesting-diff %d)", x->name(), x->type()->tchar(), x->id(), f->type()->tchar(), f->id(), size(), entry_count(), nesting() - entry->nesting()));
          if (entry->nesting() != nesting() && f->as_Constant() == NULL) {
            f->pin(Instruction::PinGlobalValueNumbering);
          }
          assert(x->type()->tag() == f->type()->tag(), "should have same type");
          return f;
        }
      }
    }
    if (entry_count() >= size_threshold()) {
      increase_table_size();
    }
    int idx = entry_index(hash, size());
    _entries.at_put(idx, new ValueMapEntry(hash, x, nesting(), entry_at(idx)));
    _entry_count++;
    TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: insert %s %c%d  (size %d, entries %d, nesting %d)", x->name(), x->type()->tchar(), x->id(), size(), entry_count(), nesting()));
  }
  return x;
}
#define GENERIC_KILL_VALUE(must_kill_implementation)                                     \
  NOT_PRODUCT(_number_of_kills++);                                                       \
                                                                                         \
  for (int i = size() - 1; i >= 0; i--) {                                                \
    ValueMapEntry* prev_entry = NULL;                                                    \
    for (ValueMapEntry* entry = entry_at(i); entry != NULL; entry = entry->next()) {     \
      Value value = entry->value();                                                      \
                                                                                         \
      must_kill_implementation(must_kill, entry, value)                                  \
                                                                                         \
      if (must_kill) {                                                                   \
        kill_value(value);                                                               \
                                                                                         \
        if (prev_entry == NULL) {                                                        \
          _entries.at_put(i, entry->next());                                             \
          _entry_count--;                                                                \
        } else if (prev_entry->nesting() == nesting()) {                                 \
          prev_entry->set_next(entry->next());                                           \
          _entry_count--;                                                                \
        } else {                                                                         \
          prev_entry = entry;                                                            \
        }                                                                                \
                                                                                         \
        TRACE_VALUE_NUMBERING(tty->print_cr("Value Numbering: killed %s %c%d  (size %d, entries %d, nesting-diff %d)", value->name(), value->type()->tchar(), value->id(), size(), entry_count(), nesting() - entry->nesting()));   \
      } else {                                                                           \
        prev_entry = entry;                                                              \
      }                                                                                  \
    }                                                                                    \
  }                                                                                      \
#define MUST_KILL_MEMORY(must_kill, entry, value)                                        \
  bool must_kill = value->as_LoadField() != NULL || value->as_LoadIndexed() != NULL;
#define MUST_KILL_ARRAY(must_kill, entry, value)                                         \
  bool must_kill = value->as_LoadIndexed() != NULL                                       \
                   && value->type()->tag() == type->tag();
#define MUST_KILL_FIELD(must_kill, entry, value)                                         \
  LoadField* lf = value->as_LoadField();                                                 \
  bool must_kill = lf != NULL                                                            \
                   && lf->field()->holder() == field->holder()                           \
                   && (all_offsets || lf->field()->offset() == field->offset());
void ValueMap::kill_memory() {
  GENERIC_KILL_VALUE(MUST_KILL_MEMORY);
}
void ValueMap::kill_array(ValueType* type) {
  GENERIC_KILL_VALUE(MUST_KILL_ARRAY);
}
void ValueMap::kill_field(ciField* field, bool all_offsets) {
  GENERIC_KILL_VALUE(MUST_KILL_FIELD);
}
void ValueMap::kill_map(ValueMap* map) {
  assert(is_global_value_numbering(), "only for global value numbering");
  _killed_values.set_union(&map->_killed_values);
}
void ValueMap::kill_all() {
  assert(is_local_value_numbering(), "only for local value numbering");
  for (int i = size() - 1; i >= 0; i--) {
    _entries.at_put(i, NULL);
  }
  _entry_count = 0;
}
#ifndef PRODUCT
void ValueMap::print() {
  tty->print_cr("(size %d, entries %d, nesting %d)", size(), entry_count(), nesting());
  int entries = 0;
  for (int i = 0; i < size(); i++) {
    if (entry_at(i) != NULL) {
      tty->print("  %2d: ", i);
      for (ValueMapEntry* entry = entry_at(i); entry != NULL; entry = entry->next()) {
        Value value = entry->value();
        tty->print("%s %c%d (%s%d) -> ", value->name(), value->type()->tchar(), value->id(), is_killed(value) ? "x" : "", entry->nesting());
        entries++;
      }
      tty->print_cr("NULL");
    }
  }
  _killed_values.print();
  assert(entry_count() == entries, "entry_count incorrect");
}
void ValueMap::reset_statistics() {
  _number_of_finds = 0;
  _number_of_hits = 0;
  _number_of_kills = 0;
}
void ValueMap::print_statistics() {
  float hit_rate = 0;
  if (_number_of_finds != 0) {
    hit_rate = (float)_number_of_hits / _number_of_finds;
  }
  tty->print_cr("finds:%3d  hits:%3d   kills:%3d  hit rate: %1.4f", _number_of_finds, _number_of_hits, _number_of_kills, hit_rate);
}
#endif
class ShortLoopOptimizer : public ValueNumberingVisitor {
 private:
  GlobalValueNumbering* _gvn;
  BlockList             _loop_blocks;
  bool                  _too_complicated_loop;
  bool                  _has_field_store[T_ARRAY + 1];
  bool                  _has_indexed_store[T_ARRAY + 1];
  ValueMap* current_map()                        { return _gvn->current_map(); }
  ValueMap* value_map_of(BlockBegin* block)      { return _gvn->value_map_of(block); }
  void      kill_memory()                                 { _too_complicated_loop = true; }
  void      kill_field(ciField* field, bool all_offsets)  {
    current_map()->kill_field(field, all_offsets);
    assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
    _has_field_store[field->type()->basic_type()] = true;
  }
  void      kill_array(ValueType* type)                   {
    current_map()->kill_array(type);
    BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
    _has_indexed_store[basic_type] = true;
  }
 public:
  ShortLoopOptimizer(GlobalValueNumbering* gvn)
    : _gvn(gvn)
    , _loop_blocks(ValueMapMaxLoopSize)
    , _too_complicated_loop(false)
  {
    for (int i=0; i<= T_ARRAY; i++){
      _has_field_store[i] = false;
      _has_indexed_store[i] = false;
    }
  }
  bool has_field_store(BasicType type) {
    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
    return _has_field_store[type];
  }
  bool has_indexed_store(BasicType type) {
    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
    return _has_indexed_store[type];
  }
  bool process(BlockBegin* loop_header);
};
class LoopInvariantCodeMotion : public StackObj  {
 private:
  GlobalValueNumbering* _gvn;
  ShortLoopOptimizer*   _short_loop_optimizer;
  Instruction*          _insertion_point;
  ValueStack *          _state;
  bool                  _insert_is_pred;
  void set_invariant(Value v) const    { _gvn->set_processed(v); }
  bool is_invariant(Value v) const     { return _gvn->is_processed(v); }
  void process_block(BlockBegin* block);
 public:
  LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks);
};
LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks)
  : _gvn(gvn), _short_loop_optimizer(slo), _insertion_point(NULL), _state(NULL), _insert_is_pred(false) {
  TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
  TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id()));
  BlockBegin* insertion_block = loop_header->dominator();
  if (insertion_block->number_of_preds() == 0) {
    return;  // only the entry block does not have a predecessor
  }
  assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
  _insertion_point = insertion_block->end()->prev();
  _insert_is_pred = loop_header->is_predecessor(insertion_block);
  BlockEnd *block_end = insertion_block->end();
  _state = block_end->state_before();
  if (!_state) {
    assert(block_end->as_Goto(), "Block has to be goto");
    _state = block_end->state();
  }
  assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
  process_block(loop_header);
  for (int i = loop_blocks->length() - 1; i >= 1; i--) {
    process_block(loop_blocks->at(i));
  }
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值