sssssssss53


  Node* elem_shift = NULL;
  if (layout_is_con) {
    int eshift = Klass::layout_helper_log2_element_size(layout_con);
    if (eshift != 0)
      elem_shift = intcon(eshift);
  } else {
    assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
    elem_shift = layout_val;
  }
  Node* lengthx = ConvI2X(length);
  Node* headerx = ConvI2X(header_size);
#ifdef _LP64
  { const TypeInt* tilen = _gvn.find_int_type(length);
    if (tilen != NULL && tilen->_lo < 0) {
      jlong size_max = fast_size_limit;
      if (size_max > tilen->_hi)  size_max = tilen->_hi;
      const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
      IfNode* iff = new (C) IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
      _gvn.transform(iff);
      RegionNode* region = new (C) RegionNode(3);
      _gvn.set_type(region, Type::CONTROL);
      lengthx = new (C) PhiNode(region, TypeLong::LONG);
      _gvn.set_type(lengthx, TypeLong::LONG);
      Node* passed = IfFalse(iff);
      region->init_req(1, passed);
      lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
      region->init_req(2, IfTrue(iff));
      lengthx->init_req(2, ConvI2X(length));
      set_control(region);
      record_for_igvn(region);
      record_for_igvn(lengthx);
    }
  }
#endif
  Node* abody = lengthx;
  if (elem_shift != NULL)
    abody     = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) );
  Node* size  = _gvn.transform( new(C) AddXNode(headerx, abody) );
  if (round_mask != 0) {
    Node* mask = MakeConX(~round_mask);
    size       = _gvn.transform( new(C) AndXNode(size, mask) );
  }
  if (return_size_val != NULL) {
    (*return_size_val) = size;
  }
  Node *mem = reset_memory();
  set_all_memory(mem); // Create new memory state
  if (initial_slow_test->is_Bool()) {
    initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
  }
  AllocateArrayNode* alloc
    = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
                                control(), mem, i_o(),
                                size, klass_node,
                                initial_slow_test,
                                length);
  const TypeInt* length_type = _gvn.find_int_type(length);
  const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
  if (ary_type->isa_aryptr() && length_type != NULL) {
    ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
  }
  Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
  if (map()->find_edge(length) >= 0) {
    Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
    if (ccast != length) {
      _gvn.set_type_bottom(ccast);
      record_for_igvn(ccast);
      replace_in_map(length, ccast);
    }
  }
  return javaoop;
}
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
  if (ptr == NULL) {     // reduce dumb test in callers
    return NULL;
  }
  if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
    ptr = ptr->in(1);
    if (ptr == NULL) return NULL;
  }
  if (ptr->is_Proj()) {
    Node* allo = ptr->in(0);
    if (allo != NULL && allo->is_Allocate()) {
      return allo->as_Allocate();
    }
  }
  return NULL;
}
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
                                             intptr_t& offset) {
  Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
  if (base == NULL)  return NULL;
  return Ideal_allocation(base, phase);
}
AllocateNode* InitializeNode::allocation() {
  Node* rawoop = in(InitializeNode::RawAddress);
  if (rawoop->is_Proj()) {
    Node* alloc = rawoop->in(0);
    if (alloc->is_Allocate()) {
      return alloc->as_Allocate();
    }
  }
  return NULL;
}
InitializeNode* AllocateNode::initialization() {
  ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
  if (rawoop == NULL)  return NULL;
  for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
    Node* init = rawoop->fast_out(i);
    if (init->is_Initialize()) {
      assert(init->as_Initialize()->allocation() == this, "2-way link");
      return init->as_Initialize();
    }
  }
  return NULL;
}
void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
  if (too_many_traps(reason)) {
#ifdef ASSERT
    if (TraceLoopPredicate) {
      int tc = C->trap_count(reason);
      tty->print("too many traps=%s tcount=%d in ",
                    Deoptimization::trap_reason_name(reason), tc);
      method()->print(); // which method has too many predicate traps
      tty->cr();
    }
#endif
    return;
  }
  Node *cont    = _gvn.intcon(1);
  Node* opq     = _gvn.transform(new (C) Opaque1Node(C, cont));
  Node *bol     = _gvn.transform(new (C) Conv2BNode(opq));
  IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
  Node* iffalse = _gvn.transform(new (C) IfFalseNode(iff));
  C->add_predicate_opaq(opq);
  {
    PreserveJVMState pjvms(this);
    set_control(iffalse);
    inc_sp(nargs);
    uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
  }
  Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff));
  set_control(iftrue);
}
void GraphKit::add_predicate(int nargs) {
  if (UseLoopPredicate) {
    add_predicate_impl(Deoptimization::Reason_predicate, nargs);
  }
  if (LoopLimitCheck) {
    add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
  }
}
#define __ ideal.
void GraphKit::sync_kit(IdealKit& ideal) {
  set_all_memory(__ merged_memory());
  set_i_o(__ i_o());
  set_control(__ ctrl());
}
void GraphKit::final_sync(IdealKit& ideal) {
  sync_kit(ideal);
}
void GraphKit::write_barrier_post(Node* oop_store,
                                  Node* obj,
                                  Node* adr,
                                  uint  adr_idx,
                                  Node* val,
                                  bool use_precise) {
  if (val != NULL && val->is_Con()) {
    const Type* t = val->bottom_type();
    if (t == TypePtr::NULL_PTR || t == Type::TOP)
      return;
  }
  if (use_ReduceInitialCardMarks()
      && obj == just_allocated_object(control())) {
    return;
  }
  if (!use_precise) {
    adr = obj;
  }
  assert(adr != NULL, "");
  IdealKit ideal(this, true);
  Node* cast = __ CastPX(__ ctrl(), adr);
  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
         "Only one we handle so far.");
  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
  Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
  int adr_type = Compile::AliasIdxRaw;
  Node*   zero = __ ConI(0); // Dirty card value
  BasicType bt = T_BYTE;
  if (UseCondCardMark) {
    Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
    __ if_then(card_val, BoolTest::ne, zero);
  }
  if( !UseConcMarkSweepGC ) {
#if defined(AARCH64)
    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
#else
    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
#endif
  } else {
    __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
  }
  if (UseCondCardMark) {
    __ end_if();
  }
  final_sync(ideal);
}
void GraphKit::g1_write_barrier_pre(bool do_load,
                                    Node* obj,
                                    Node* adr,
                                    uint alias_idx,
                                    Node* val,
                                    const TypeOopPtr* val_type,
                                    Node* pre_val,
                                    BasicType bt) {
  if (do_load) {
    assert(obj != NULL, "must have a base");
    assert(adr != NULL, "where are loading from?");
    assert(pre_val == NULL, "loaded already?");
    assert(val_type != NULL, "need a type");
  } else {
    assert(pre_val != NULL, "must be loaded already");
    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
    assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
  }
  assert(bt == T_OBJECT, "or we shouldn't be here");
  IdealKit ideal(this, true);
  Node* tls = __ thread(); // ThreadLocalStorage
  Node* no_ctrl = NULL;
  Node* no_base = __ top();
  Node* zero  = __ ConI(0);
  Node* zeroX = __ ConX(0);
  float likely  = PROB_LIKELY(0.999);
  float unlikely  = PROB_UNLIKELY(0.999);
  BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
  assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
  const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
                                          PtrQueue::byte_offset_of_active());
  const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
                                          PtrQueue::byte_offset_of_index());
  const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
                                          PtrQueue::byte_offset_of_buf());
  Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
  Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
  Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
  Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
  __ if_then(marking, BoolTest::ne, zero, unlikely); {
    BasicType index_bt = TypeX_X->basic_type();
    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
    if (do_load) {
      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
    }
    __ if_then(pre_val, BoolTest::ne, null()); {
      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
      __ if_then(index, BoolTest::ne, zeroX, likely); {
        Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
        Node *log_addr = __ AddP(no_base, buffer, next_index);
        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
      } __ else_(); {
        const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
      } __ end_if();  // (!index)
    } __ end_if();  // (pre_val != NULL)
  } __ end_if();  // (!marking)
  final_sync(ideal);
}
void GraphKit::g1_mark_card(IdealKit& ideal,
                            Node* card_adr,
                            Node* oop_store,
                            uint oop_alias_idx,
                            Node* index,
                            Node* index_adr,
                            Node* buffer,
                            const TypeFunc* tf) {
  Node* zero  = __ ConI(0);
  Node* zeroX = __ ConX(0);
  Node* no_base = __ top();
  BasicType card_bt = T_BYTE;
  __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
  __ if_then(index, BoolTest::ne, zeroX); {
    Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
    Node* log_addr = __ AddP(no_base, buffer, next_index);
    __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
  } __ else_(); {
    __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
  } __ end_if();
}
void GraphKit::g1_write_barrier_post(Node* oop_store,
                                     Node* obj,
                                     Node* adr,
                                     uint alias_idx,
                                     Node* val,
                                     BasicType bt,
                                     bool use_precise) {
  if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
    const Type* t = val->bottom_type();
    assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
    return;
  }
  if (!use_precise) {
    adr = obj;
  }
  assert(adr != NULL, "");
  IdealKit ideal(this, true);
  Node* tls = __ thread(); // ThreadLocalStorage
  Node* no_base = __ top();
  float likely  = PROB_LIKELY(0.999);
  float unlikely  = PROB_UNLIKELY(0.999);
  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
  Node* zeroX = __ ConX(0);
  const TypePtr* card_type = TypeRawPtr::BOTTOM;
  const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
  const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
                                     PtrQueue::byte_offset_of_index());
  const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
                                     PtrQueue::byte_offset_of_buf());
  Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
  Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
  Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
  Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
  Node* cast =  __ CastPX(__ ctrl(), adr);
  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
  Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
  if (val != NULL) {
    Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
    __ if_then(xor_res, BoolTest::ne, zeroX); {
      __ if_then(val, BoolTest::ne, null(), unlikely); {
        Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
        __ if_then(card_val, BoolTest::ne, young_card); {
          sync_kit(ideal);
          insert_mem_bar(Op_MemBarVolatile, oop_store);
          __ sync_kit(this);
          Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
          __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
            g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
          } __ end_if();
        } __ end_if();
      } __ end_if();
    } __ end_if();
  } else {
    g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
  }
  final_sync(ideal);
}
#undef __
Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
  if (java_lang_String::has_offset_field()) {
    int offset_offset = java_lang_String::offset_offset_in_bytes();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                       false, NULL, 0);
    const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
    int offset_field_idx = C->get_alias_index(offset_field_type);
    return make_load(ctrl,
                     basic_plus_adr(str, str, offset_offset),
                     TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
  } else {
    return intcon(0);
  }
}
Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
  if (java_lang_String::has_count_field()) {
    int count_offset = java_lang_String::count_offset_in_bytes();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                       false, NULL, 0);
    const TypePtr* count_field_type = string_type->add_offset(count_offset);
    int count_field_idx = C->get_alias_index(count_field_type);
    return make_load(ctrl,
                     basic_plus_adr(str, str, count_offset),
                     TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
  } else {
    return load_array_length(load_String_value(ctrl, str));
  }
}
Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
  int value_offset = java_lang_String::value_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* value_field_type = string_type->add_offset(value_offset);
  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
  int value_field_idx = C->get_alias_index(value_field_type);
  Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
                         value_type, T_OBJECT, value_field_idx, MemNode::unordered);
  if (UseImplicitStableValues) {
    load = cast_array_to_stable(load, value_type);
  }
  return load;
}
void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
  int offset_offset = java_lang_String::offset_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
  int offset_field_idx = C->get_alias_index(offset_field_type);
  store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
                  value, T_INT, offset_field_idx, MemNode::unordered);
}
void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
  int value_offset = java_lang_String::value_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* value_field_type = string_type->add_offset(value_offset);
  store_oop_to_object(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
      value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
}
void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
  int count_offset = java_lang_String::count_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* count_field_type = string_type->add_offset(count_offset);
  int count_field_idx = C->get_alias_index(count_field_type);
  store_to_memory(ctrl, basic_plus_adr(str, count_offset),
                  value, T_INT, count_field_idx, MemNode::unordered);
}
Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
  return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
}
C:\hotspot-69087d08d473\src\share\vm/opto/graphKit.hpp
#ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
#define SHARE_VM_OPTO_GRAPHKIT_HPP
#include "ci/ciEnv.hpp"
#include "ci/ciMethodData.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/divnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
#include "runtime/deoptimization.hpp"
class FastLockNode;
class FastUnlockNode;
class IdealKit;
class LibraryCallKit;
class Parse;
class RootNode;
class GraphKit : public Phase {
  friend class PreserveJVMState;
 protected:
  ciEnv*            _env;       // Compilation environment
  PhaseGVN         &_gvn;       // Some optimizations while parsing
  SafePointNode*    _map;       // Parser map from JVM to Nodes
  SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
  int               _bci;       // JVM Bytecode Pointer
  ciMethod*         _method;    // JVM Current Method
 private:
  int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
 private:
  SafePointNode*     map_not_null() const {
    assert(_map != NULL, "must call stopped() to test for reset compiler map");
    return _map;
  }
 public:
  GraphKit();                   // empty constructor
  GraphKit(JVMState* jvms);     // the JVM state on which to operate
#ifdef ASSERT
  ~GraphKit() {
    assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
  }
#endif
  virtual Parse*          is_Parse()          const { return NULL; }
  virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  ciEnv*        env()           const { return _env; }
  PhaseGVN&     gvn()           const { return _gvn; }
  void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
  Node*         null()          const { return zerocon(T_OBJECT); }
  Node*         top()           const { return C->top(); }
  RootNode*     root()          const { return C->root(); }
  Node* intcon(jint con)        const { return _gvn.intcon(con); }
  Node* longcon(jlong con)      const { return _gvn.longcon(con); }
  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
  Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
  Node* byte_map_base_node() {
    CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
    assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
    if (ct->byte_map_base != NULL) {
      return makecon(TypeRawPtr::make((address)ct->byte_map_base));
    } else {
      return null();
    }
  }
  jint  find_int_con(Node* n, jint value_if_unknown) {
    return _gvn.find_int_con(n, value_if_unknown);
  }
  jlong find_long_con(Node* n, jlong value_if_unknown) {
    return _gvn.find_long_con(n, value_if_unknown);
  }
  SafePointNode*     map()      const { return _map; }
  bool               has_exceptions() const { return _exceptions != NULL; }
  JVMState*          jvms()     const { return map_not_null()->_jvms; }
  int                sp()       const { return _sp; }
  int                bci()      const { return _bci; }
  Bytecodes::Code    java_bc()  const;
  ciMethod*          method()   const { return _method; }
  void set_jvms(JVMState* jvms)       { set_map(jvms->map());
                                        assert(jvms == this->jvms(), "sanity");
                                        _sp = jvms->sp();
                                        _bci = jvms->bci();
                                        _method = jvms->has_method() ? jvms->method() : NULL; }
  void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
  void set_sp(int sp)                 { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
  void clean_stack(int from_sp); // clear garbage beyond from_sp to top
  void inc_sp(int i)                  { set_sp(sp() + i); }
  void dec_sp(int i)                  { set_sp(sp() - i); }
  void set_bci(int bci)               { _bci = bci; }
  JVMState* sync_jvms() const;
  JVMState* sync_jvms_for_reexecute();
#ifdef ASSERT
  bool jvms_in_sync() const;
  void verify_map() const;
  static void verify_exception_state(SafePointNode* ex_map);
#endif
  SafePointNode* clone_map();
  void set_map_clone(SafePointNode* m);
  bool failing() const { return C->failing(); }
  SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
  void stop_and_kill_map();
  bool stopped();
  bool has_ex_handler();
  static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
  static Node* saved_ex_oop(SafePointNode* ex_map);
  static Node* clear_saved_ex_oop(SafePointNode* ex_map);
#ifdef ASSERT
  static bool has_saved_ex_oop(SafePointNode* ex_map);
#endif
  void push_ex_oop(Node* ex_oop) {
    ensure_stack(1);  // ensure room to push the exception
    set_stack(0, ex_oop);
    set_sp(1);
    clean_stack(1);
  }
  SafePointNode* pop_exception_state() {
    SafePointNode* ex_map = _exceptions;
    if (ex_map != NULL) {
      _exceptions = ex_map->next_exception();
      ex_map->set_next_exception(NULL);
      debug_only(verify_exception_state(ex_map));
    }
    return ex_map;
  }
  void push_exception_state(SafePointNode* ex_map) {
    debug_only(verify_exception_state(ex_map));
    ex_map->set_next_exception(_exceptions);
    _exceptions = ex_map;
  }
  SafePointNode* make_exception_state(Node* ex_oop);
  void add_exception_state(SafePointNode* ex_map);
  SafePointNode* combine_and_pop_all_exception_states() {
    if (_exceptions == NULL)  return NULL;
    SafePointNode* phi_map = pop_exception_state();
    SafePointNode* ex_map;
    while ((ex_map = pop_exception_state()) != NULL) {
      combine_exception_states(ex_map, phi_map);
    }
    return phi_map;
  }
  void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
  Node* use_exception_state(SafePointNode* ex_map);
  void add_exception_states_from(JVMState* jvms);
  JVMState* transfer_exceptions_into_jvms();
  void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
  void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
                                                  bool must_throw) ;
  void kill_dead_locals();
#ifdef ASSERT
  bool dead_locals_are_killed();
#endif
  void add_safepoint_edges(SafePointNode* call,
                           bool must_throw = false);
  bool compute_stack_effects(int& inputs, int& depth);
  Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
    return basic_plus_adr(base, ptr, MakeConX(offset));
  }
  Node* basic_plus_adr(Node* base, intptr_t offset) {
    return basic_plus_adr(base, base, MakeConX(offset));
  }
  Node* basic_plus_adr(Node* base, Node* offset) {
    return basic_plus_adr(base, base, offset);
  }
  Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
  Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new (C) IfTrueNode(iff));      }
  Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new (C) IfFalseNode(iff));     }
  Node* AddI(Node* l, Node* r)                { return _gvn.transform(new (C) AddINode(l, r));       }
  Node* SubI(Node* l, Node* r)                { return _gvn.transform(new (C) SubINode(l, r));       }
  Node* MulI(Node* l, Node* r)                { return _gvn.transform(new (C) MulINode(l, r));       }
  Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new (C) DivINode(ctl, l, r));  }
  Node* AndI(Node* l, Node* r)                { return _gvn.transform(new (C) AndINode(l, r));       }
  Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new (C) OrINode(l, r));        }
  Node* XorI(Node* l, Node* r)                { return _gvn.transform(new (C) XorINode(l, r));       }
  Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new (C) MaxINode(l, r));       }
  Node* MinI(Node* l, Node* r)                { return _gvn.transform(new (C) MinINode(l, r));       }
  Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new (C) LShiftINode(l, r));    }
  Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new (C) RShiftINode(l, r));    }
  Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new (C) URShiftINode(l, r));   }
  Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new (C) CmpINode(l, r));       }
  Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new (C) CmpLNode(l, r));       }
  Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new (C) CmpPNode(l, r));       }
  Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); }
  Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new (C) AddPNode(b, a, o));    }
  Node* ConvI2L(Node* offset);
  Node* ConvI2UL(Node* offset);
  Node* ConvL2I(Node* offset);
  Node* load_object_klass(Node* object);
  Node* load_array_length(Node* array);
  Node* null_check_common(Node* value, BasicType type,
                          bool assert_null = false, Node* *null_control = NULL);
  Node* null_check(Node* value, BasicType type = T_OBJECT) {
    return null_check_common(value, type);
  }
  Node* null_check_receiver() {
    assert(argument(0)->bottom_type()->isa_ptr(), "must be");
    return null_check(argument(0));
  }
  Node* zero_check_int(Node* value) {
    assert(value->bottom_type()->basic_type() == T_INT,
        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
    return null_check_common(value, T_INT);
  }
  Node* zero_check_long(Node* value) {
    assert(value->bottom_type()->basic_type() == T_LONG,
        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
    return null_check_common(value, T_LONG);
  }
  Node* null_assert(Node* value, BasicType type = T_OBJECT) {
    return null_check_common(value, type, true);
  }
  Node* null_check_oop(Node* value, Node* *null_control,
                       bool never_see_null = false, bool safe_for_replace = false);
  bool seems_never_null(Node* obj, ciProfileData* data);
  ciKlass* profile_has_unique_klass() {
    ciCallProfile profile = method()->call_profile_at_bci(bci());
    if (profile.count() >= 0 &&         // no cast failures here
        profile.has_receiver(0) &&
        profile.morphism() == 1) {
      return profile.receiver(0);
    }
    return NULL;
  }
  Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
  Node* record_profiled_receiver_for_speculation(Node* n);
  void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
  void record_profiled_parameters_for_speculation();
  Node* maybe_cast_profiled_receiver(Node* not_null_obj,
                                     ciKlass* require_klass,
                                     ciKlass* spec,
                                     bool safe_for_replace);
  Node* maybe_cast_profiled_obj(Node* obj,
                                ciKlass* type,
                                bool not_null = false);
  Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
  void replace_in_map(Node* old, Node* neww);
  void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++        , n); }
  Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp             ); }
  Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1   ); }
  void push_pair(Node* ldval) {
    push(ldval);
    push(top());  // the halfword is merely a placeholder
  }
  void push_pair_local(int i) {
    push(  local(i+0) );  // the real value
    assert(local(i+1) == top(), "");
    push(top());  // halfword placeholder
  }
  Node* pop_pair() {
    Node* halfword = pop();
    assert(halfword == top(), "");
    return pop();
  }
  void set_pair_local(int i, Node* lval) {
    set_local(i+0, lval);
    set_local(i+1, top());
  }
  void push_node(BasicType n_type, Node* n) {
    int n_size = type2size[n_type];
    if      (n_size == 1)  push(      n );  // T_INT, ...
    else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
    else                   { assert(n_size == 0, "must be T_VOID"); }
  }
  Node* pop_node(BasicType n_type) {
    int n_size = type2size[n_type];
    if      (n_size == 1)  return pop();
    else if (n_size == 2)  return pop_pair();
    else                   return NULL;
  }
  Node* control()               const { return map_not_null()->control(); }
  Node* i_o()                   const { return map_not_null()->i_o(); }
  Node* returnadr()             const { return map_not_null()->returnadr(); }
  Node* frameptr()              const { return map_not_null()->frameptr(); }
  Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
  Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
  Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
  Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
  Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
  void set_control  (Node* c)         { map_not_null()->set_control(c); }
  void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
  void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
  void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
  void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
  void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
  Node* memory(uint alias_idx);
  Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
  Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
  Node* immutable_memory() { return C->immutable_memory(); }
  void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
  void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
  void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
  Node* reset_memory();
  MergeMemNode* merged_memory() {
    Node* mem = map_not_null()->memory();
    assert(mem->is_MergeMem(), "parse memory is always pre-split");
    return mem->as_MergeMem();
  }
  void set_all_memory(Node* newmem);
  void set_all_memory_call(Node* call, bool separate_io_proj = false);
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false) {
    return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
                     mo, control_dependency, require_atomic_access,
                     unaligned, mismatched);
  }
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false) {
    assert(adr_type != NULL, "use other make_load factory");
    return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
                     mo, control_dependency, require_atomic_access,
                     unaligned, mismatched);
  }
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false);
  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                        const TypePtr* adr_type,
                        MemNode::MemOrd mo,
                        bool require_atomic_access = false,
                        bool unaligned = false,
                        bool mismatched = false) {
    assert(adr_type != NULL, "use other store_to_memory factory");
    return store_to_memory(ctl, adr, val, bt,
                           C->get_alias_index(adr_type),
                           mo, require_atomic_access,
                           unaligned, mismatched);
  }
  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                        int adr_idx,
                        MemNode::MemOrd,
                        bool require_atomic_access = false,
                        bool unaligned = false,
                        bool mismatched = false);
  Node* store_oop(Node* ctl,
                  Node* obj,   // containing obj
                  Node* adr,   // actual adress to store val at
                  const TypePtr* adr_type,
                  Node* val,
                  const TypeOopPtr* val_type,
                  BasicType bt,
                  bool use_precise,
                  MemNode::MemOrd mo,
                  bool mismatched = false);
  Node* store_oop_to_object(Node* ctl,
                            Node* obj,   // containing obj
                            Node* adr,   // actual adress to store val at
                            const TypePtr* adr_type,
                            Node* val,
                            const TypeOopPtr* val_type,
                            BasicType bt,
                            MemNode::MemOrd mo) {
    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
  }
  Node* store_oop_to_array(Node* ctl,
                           Node* obj,   // containing obj
                           Node* adr,   // actual adress to store val at
                           const TypePtr* adr_type,
                           Node* val,
                           const TypeOopPtr* val_type,
                           BasicType bt,
                           MemNode::MemOrd mo) {
    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
  }
  Node* store_oop_to_unknown(Node* ctl,
                             Node* obj,   // containing obj
                             Node* adr,   // actual adress to store val at
                             const TypePtr* adr_type,
                             Node* val,
                             BasicType bt,
                             MemNode::MemOrd mo,
                             bool mismatched = false);
  void pre_barrier(bool do_load, Node* ctl,
                   Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
                   Node* pre_val,
                   BasicType bt);
  void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
                    Node* val, BasicType bt, bool use_precise);
  Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
                              const TypeInt* sizetype = NULL,
                              Node* ctrl = NULL);
  Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
  void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
  void make_dtrace_method_entry(ciMethod* method) {
    make_dtrace_method_entry_exit(method, true);
  }
  void make_dtrace_method_exit(ciMethod* method) {
    make_dtrace_method_entry_exit(method, false);
  }
 public:
  void gen_stub(address C_function,
                const char *name,
                int is_fancy_jump,
                bool pass_tls,
                bool return_pc);
  Node* null_check_receiver_before_call(ciMethod* callee) {
    assert(!callee->is_static(), "must be a virtual method");
    ciMethod* declared_method = method()->get_method_at_bci(bci());
    const int nargs = declared_method->arg_size();
    inc_sp(nargs);
    Node* n = null_check_receiver();
    dec_sp(nargs);
    return n;
  }
  void  set_arguments_for_java_call(CallJavaNode* call);
  void set_edges_for_java_call(CallJavaNode* call,
                               bool must_throw = false, bool separate_io_proj = false);
  Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
  void  set_predefined_output_for_runtime_call(Node* call) {
    set_predefined_output_for_runtime_call(call, NULL, NULL);
  }
  void  set_predefined_output_for_runtime_call(Node* call,
                                               Node* keep_mem,
                                               const TypePtr* hook_mem);
  Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
  void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
  void increment_counter(address counter_addr);   // increment a debug counter
  void increment_counter(Node*   counter_addr);   // increment a debug counter
  void uncommon_trap(int trap_request,
                     ciKlass* klass = NULL, const char* reason_string = NULL,
                     bool must_throw = false, bool keep_exact_action = false);
  void uncommon_trap(Deoptimization::DeoptReason reason,
                     Deoptimization::DeoptAction action,
                     ciKlass* klass = NULL, const char* reason_string = NULL,
                     bool must_throw = false, bool keep_exact_action = false) {
    uncommon_trap(Deoptimization::make_trap_request(reason, action),
                  klass, reason_string, must_throw, keep_exact_action);
  }
  void uncommon_trap_exact(Deoptimization::DeoptReason reason,
                           Deoptimization::DeoptAction action,
                           ciKlass* klass = NULL, const char* reason_string = NULL,
                           bool must_throw = false) {
    uncommon_trap(Deoptimization::make_trap_request(reason, action),
                  klass, reason_string, must_throw, /*keep_exact_action=*/true);
  }
  virtual int reexecute_sp() { return sp(); }
  bool too_many_traps(Deoptimization::DeoptReason reason) {
    return C->too_many_traps(method(), bci(), reason);
  }
  bool too_many_recompiles(Deoptimization::DeoptReason reason) {
    return C->too_many_recompiles(method(), bci(), reason);
  }
  Node* just_allocated_object(Node* current_control);
  static bool use_ReduceInitialCardMarks() {
    return (ReduceInitialCardMarks
            && Universe::heap()->can_elide_tlab_store_barriers());
  }
  void sync_kit(IdealKit& ideal);
  void final_sync(IdealKit& ideal);
  void write_barrier_post(Node *store, Node* obj,
                          Node* adr,  uint adr_idx, Node* val, bool use_precise);
  bool can_move_pre_barrier() const;
  void g1_write_barrier_pre(bool do_load,
                            Node* obj,
                            Node* adr,
                            uint alias_idx,
                            Node* val,
                            const TypeOopPtr* val_type,
                            Node* pre_val,
                            BasicType bt);
  void g1_write_barrier_post(Node* store,
                             Node* obj,
                             Node* adr,
                             uint alias_idx,
                             Node* val,
                             BasicType bt,
                             bool use_precise);
  private:
  void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
                    Node* index, Node* index_adr,
                    Node* buffer, const TypeFunc* tf);
  public:
  void round_double_arguments(ciMethod* dest_method);
  void round_double_result(ciMethod* dest_method);
  Node* precision_rounding(Node* n);
  Node* dprecision_rounding(Node* n);
  Node* dstore_rounding(Node* n);
  Node* opt_iff(Node* region, Node* iff);
  Node* make_runtime_call(int flags,
                          const TypeFunc* call_type, address call_addr,
                          const char* call_name,
                          const TypePtr* adr_type, // NULL if no memory effects
                          Node* parm0 = NULL, Node* parm1 = NULL,
                          Node* parm2 = NULL, Node* parm3 = NULL,
                          Node* parm4 = NULL, Node* parm5 = NULL,
                          Node* parm6 = NULL, Node* parm7 = NULL);
  enum {  // flag values for make_runtime_call
    RC_NO_FP = 1,               // CallLeafNoFPNode
    RC_NO_IO = 2,               // do not hook IO edges
    RC_NO_LEAF = 4,             // CallStaticJavaNode
    RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
    RC_NARROW_MEM = 16,         // input memory is same as output
    RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
    RC_LEAF = 0                 // null value:  no flags set
  };
  void merge_memory(Node* new_mem, Node* region, int new_path);
  void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);
  int next_monitor();
  Node* insert_mem_bar(int opcode, Node* precedent = NULL);
  Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
  FastLockNode* shared_lock(Node* obj);
  void shared_unlock(Node* box, Node* obj);
  Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
  Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
  Node* gen_checkcast( Node *subobj, Node* superkls,
                       Node* *failure_control = NULL );
  Node* gen_subtype_check(Node* subklass, Node* superklass);
  enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
  int static_subtype_check(ciKlass* superk, ciKlass* subk);
  Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                            Node* *casted_receiver);
  Node* set_output_for_allocation(AllocateNode* alloc,
                                  const TypeOopPtr* oop_type,
                                  bool deoptimize_on_exception=false);
  Node* get_layout_helper(Node* klass_node, jint& constant_value);
  Node* new_instance(Node* klass_node,
                     Node* slow_test = NULL,
                     Node* *return_size_val = NULL,
                     bool deoptimize_on_exception = false);
  Node* new_array(Node* klass_node, Node* count_val, int nargs,
                  Node* *return_size_val = NULL,
                  bool deoptimize_on_exception = false);
  Node* load_String_offset(Node* ctrl, Node* str);
  Node* load_String_length(Node* ctrl, Node* str);
  Node* load_String_value(Node* ctrl, Node* str);
  void store_String_offset(Node* ctrl, Node* str, Node* value);
  void store_String_length(Node* ctrl, Node* str, Node* value);
  void store_String_value(Node* ctrl, Node* str, Node* value);
  IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
    IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
    _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
    return iff;
  }
  IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
    IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
    _gvn.transform(iff);                           // Value may be known at parse-time
    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
    return iff;
  }
  void add_predicate(int nargs = 0);
  void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
  Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
};
class PreserveJVMState: public StackObj {
 protected:
  GraphKit*      _kit;
#ifdef ASSERT
  int            _block;  // PO of current block, if a Parse
  int            _bci;
#endif
  SafePointNode* _map;
  uint           _sp;
 public:
  PreserveJVMState(GraphKit* kit, bool clone_map = true);
  ~PreserveJVMState();
};
class BuildCutout: public PreserveJVMState {
 public:
  BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
  ~BuildCutout();
};
class PreserveReexecuteState: public StackObj {
 protected:
  GraphKit*                 _kit;
  uint                      _sp;
  JVMState::ReexecuteState  _reexecute;
 public:
  PreserveReexecuteState(GraphKit* kit);
  ~PreserveReexecuteState();
};
#endif // SHARE_VM_OPTO_GRAPHKIT_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/idealGraphPrinter.cpp
#include "precompiled.hpp"
#include "opto/chaitin.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/machnode.hpp"
#include "opto/parse.hpp"
#include "runtime/threadCritical.hpp"
#ifndef PRODUCT
const char *IdealGraphPrinter::INDENT = "  ";
const char *IdealGraphPrinter::TOP_ELEMENT = "graphDocument";
const char *IdealGraphPrinter::GROUP_ELEMENT = "group";
const char *IdealGraphPrinter::GRAPH_ELEMENT = "graph";
const char *IdealGraphPrinter::PROPERTIES_ELEMENT = "properties";
const char *IdealGraphPrinter::EDGES_ELEMENT = "edges";
const char *IdealGraphPrinter::PROPERTY_ELEMENT = "p";
const char *IdealGraphPrinter::EDGE_ELEMENT = "edge";
const char *IdealGraphPrinter::NODE_ELEMENT = "node";
const char *IdealGraphPrinter::NODES_ELEMENT = "nodes";
const char *IdealGraphPrinter::REMOVE_EDGE_ELEMENT = "removeEdge";
const char *IdealGraphPrinter::REMOVE_NODE_ELEMENT = "removeNode";
const char *IdealGraphPrinter::METHOD_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::METHOD_IS_PUBLIC_PROPERTY = "public";
const char *IdealGraphPrinter::METHOD_IS_STATIC_PROPERTY = "static";
const char *IdealGraphPrinter::TRUE_VALUE = "true";
const char *IdealGraphPrinter::NODE_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::EDGE_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::NODE_ID_PROPERTY = "id";
const char *IdealGraphPrinter::FROM_PROPERTY = "from";
const char *IdealGraphPrinter::TO_PROPERTY = "to";
const char *IdealGraphPrinter::PROPERTY_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::GRAPH_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::INDEX_PROPERTY = "index";
const char *IdealGraphPrinter::METHOD_ELEMENT = "method";
const char *IdealGraphPrinter::INLINE_ELEMENT = "inline";
const char *IdealGraphPrinter::BYTECODES_ELEMENT = "bytecodes";
const char *IdealGraphPrinter::METHOD_BCI_PROPERTY = "bci";
const char *IdealGraphPrinter::METHOD_SHORT_NAME_PROPERTY = "shortName";
const char *IdealGraphPrinter::CONTROL_FLOW_ELEMENT = "controlFlow";
const char *IdealGraphPrinter::BLOCK_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::BLOCK_DOMINATOR_PROPERTY = "dom";
const char *IdealGraphPrinter::BLOCK_ELEMENT = "block";
const char *IdealGraphPrinter::SUCCESSORS_ELEMENT = "successors";
const char *IdealGraphPrinter::SUCCESSOR_ELEMENT = "successor";
const char *IdealGraphPrinter::ASSEMBLY_ELEMENT = "assembly";
int IdealGraphPrinter::_file_count = 0;
IdealGraphPrinter *IdealGraphPrinter::printer() {
  if (PrintIdealGraphLevel == 0) return NULL;
  JavaThread *thread = JavaThread::current();
  if (!thread->is_Compiler_thread()) return NULL;
  CompilerThread *compiler_thread = (CompilerThread *)thread;
  if (compiler_thread->ideal_graph_printer() == NULL) {
    IdealGraphPrinter *printer = new IdealGraphPrinter();
    compiler_thread->set_ideal_graph_printer(printer);
  }
  return compiler_thread->ideal_graph_printer();
}
void IdealGraphPrinter::clean_up() {
  JavaThread *p;
  for (p = Threads::first(); p; p = p->next()) {
    if (p->is_Compiler_thread()) {
      CompilerThread *c = (CompilerThread *)p;
      IdealGraphPrinter *printer = c->ideal_graph_printer();
      if (printer) {
        delete printer;
      }
      c->set_ideal_graph_printer(NULL);
    }
  }
}
IdealGraphPrinter::IdealGraphPrinter() {
  _traverse_outs = true;
  _should_send_method = true;
  _output = NULL;
  buffer[0] = 0;
  _depth = 0;
  _current_method = NULL;
  assert(!_current_method, "current method must be initialized to NULL");
  _stream = NULL;
  if (PrintIdealGraphFile != NULL) {
    ThreadCritical tc;
    if (_file_count != 0) {
      ResourceMark rm;
      stringStream st;
      const char* dot = strrchr(PrintIdealGraphFile, '.');
      if (dot) {
        st.write(PrintIdealGraphFile, dot - PrintIdealGraphFile);
        st.print("%d%s", _file_count, dot);
      } else {
        st.print("%s%d", PrintIdealGraphFile, _file_count);
      }
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(st.as_string());
      _output = stream;
    } else {
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(PrintIdealGraphFile);
      _output = stream;
    }
    _file_count++;
  } else {
    _stream = new (ResourceObj::C_HEAP, mtCompiler) networkStream();
    if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) {
      char c = 0;
      _stream->read(&c, 1);
      if (c != 'y') {
        tty->print_cr("Client available, but does not want to receive data!");
        _stream->close();
        delete _stream;
        _stream = NULL;
        return;
      }
      _output = _stream;
    } else {
      fatal(err_msg_res("Couldn't connect to visualizer at %s:" INTX_FORMAT,
                        PrintIdealGraphAddress, PrintIdealGraphPort));
    }
  }
  _xml = new (ResourceObj::C_HEAP, mtCompiler) xmlStream(_output);
  head(TOP_ELEMENT);
}
IdealGraphPrinter::~IdealGraphPrinter() {
  tail(TOP_ELEMENT);
  if(_xml) {
    delete _xml;
    _xml = NULL;
  }
  if (_stream) {
    delete _stream;
    if (_stream == _output) {
      _output = NULL;
    }
    _stream = NULL;
  }
  if (_output) {
    delete _output;
    _output = NULL;
  }
}
void IdealGraphPrinter::begin_elem(const char *s) {
  _xml->begin_elem("%s", s);
}
void IdealGraphPrinter::end_elem() {
  _xml->end_elem();
}
void IdealGraphPrinter::begin_head(const char *s) {
  _xml->begin_head("%s", s);
}
void IdealGraphPrinter::end_head() {
  _xml->end_head();
}
void IdealGraphPrinter::print_attr(const char *name, intptr_t val) {
  stringStream stream;
  stream.print(INTX_FORMAT, val);
  print_attr(name, stream.as_string());
}
void IdealGraphPrinter::print_attr(const char *name, const char *val) {
  _xml->print(" %s='", name);
  text(val);
  _xml->print("'");
}
void IdealGraphPrinter::head(const char *name) {
  _xml->head("%s", name);
}
void IdealGraphPrinter::tail(const char *name) {
  _xml->tail(name);
}
void IdealGraphPrinter::text(const char *s) {
  _xml->text("%s", s);
}
void IdealGraphPrinter::print_prop(const char *name, int val) {
  stringStream stream;
  stream.print("%d", val);
  print_prop(name, stream.as_string());
}
void IdealGraphPrinter::print_prop(const char *name, const char *val) {
  begin_head(PROPERTY_ELEMENT);
  print_attr(PROPERTY_NAME_PROPERTY, name);
  end_head();
  text(val);
  tail(PROPERTY_ELEMENT);
}
void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree) {
  begin_head(METHOD_ELEMENT);
  stringStream str;
  method->print_name(&str);
  stringStream shortStr;
  method->print_short_name(&shortStr);
  print_attr(METHOD_NAME_PROPERTY, str.as_string());
  print_attr(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string());
  print_attr(METHOD_BCI_PROPERTY, bci);
  end_head();
  head(BYTECODES_ELEMENT);
  output()->print_cr("<![CDATA[");
  method->print_codes_on(output());
  output()->print_cr("]]>");
  tail(BYTECODES_ELEMENT);
  head(INLINE_ELEMENT);
  if (tree != NULL) {
    GrowableArray<InlineTree *> subtrees = tree->subtrees();
    for (int i = 0; i < subtrees.length(); i++) {
      print_inline_tree(subtrees.at(i));
    }
  }
  tail(INLINE_ELEMENT);
  tail(METHOD_ELEMENT);
  output()->flush();
}
void IdealGraphPrinter::print_inline_tree(InlineTree *tree) {
  if (tree == NULL) return;
  ciMethod *method = tree->method();
  print_method(tree->method(), tree->caller_bci(), tree);
}
void IdealGraphPrinter::print_inlining(Compile* compile) {
  if (_should_send_method) {
    InlineTree *inlineTree = compile->ilt();
    if (inlineTree != NULL) {
      print_inline_tree(inlineTree);
    } else {
    }
  }
}
void IdealGraphPrinter::begin_method(Compile* compile) {
  ciMethod *method = compile->method();
  assert(_output, "output stream must exist!");
  assert(method, "null methods are not allowed!");
  assert(!_current_method, "current method must be null!");
  head(GROUP_ELEMENT);
  head(PROPERTIES_ELEMENT);
  stringStream strStream;
  method->print_name(&strStream);
  print_prop(METHOD_NAME_PROPERTY, strStream.as_string());
  if (method->flags().is_public()) {
    print_prop(METHOD_IS_PUBLIC_PROPERTY, TRUE_VALUE);
  }
  if (method->flags().is_static()) {
    print_prop(METHOD_IS_STATIC_PROPERTY, TRUE_VALUE);
  }
  tail(PROPERTIES_ELEMENT);
  if (_stream) {
    char answer = 0;
    _xml->flush();
    int result = _stream->read(&answer, 1);
    _should_send_method = (answer == 'y');
  }
  this->_current_method = method;
  _xml->flush();
}
void IdealGraphPrinter::end_method() {
  nmethod* method = (nmethod*)this->_current_method->code();
  tail(GROUP_ELEMENT);
  _current_method = NULL;
  _xml->flush();
}
void IdealGraphPrinter::print_indent() {
  tty->print_cr("printing ident %d", _depth);
  for (int i = 0; i < _depth; i++) {
    _xml->print("%s", INDENT);
  }
}
bool IdealGraphPrinter::traverse_outs() {
  return _traverse_outs;
}
void IdealGraphPrinter::set_traverse_outs(bool b) {
  _traverse_outs = b;
}
intptr_t IdealGraphPrinter::get_node_id(Node *n) {
  return (intptr_t)(n);
}
void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
  if (edges) {
    intptr_t dest_id = get_node_id(n);
    for ( uint i = 0; i < n->len(); i++ ) {
      if ( n->in(i) ) {
        Node *source = n->in(i);
        begin_elem(EDGE_ELEMENT);
        intptr_t source_id = get_node_id(source);
        print_attr(FROM_PROPERTY, source_id);
        print_attr(TO_PROPERTY, dest_id);
        print_attr(INDEX_PROPERTY, i);
        end_elem();
      }
    }
  } else {
    begin_head(NODE_ELEMENT);
    print_attr(NODE_ID_PROPERTY, get_node_id(n));
    end_head();
    head(PROPERTIES_ELEMENT);
    Node *node = n;
#ifndef PRODUCT
    Compile::current()->_in_dump_cnt++;
    print_prop(NODE_NAME_PROPERTY, (const char *)node->Name());
    const Type *t = node->bottom_type();
    print_prop("type", t->msg());
    print_prop("idx", node->_idx);
#ifdef ASSERT
    print_prop("debug_idx", node->_debug_idx);
#endif
    if (C->cfg() != NULL) {
      Block* block = C->cfg()->get_block_for_node(node);
      if (block == NULL) {
        print_prop("block", C->cfg()->get_block(0)->_pre_order);
      } else {
        print_prop("block", block->_pre_order);
      }
    }
    const jushort flags = node->flags();
    if (flags & Node::Flag_is_Copy) {
      print_prop("is_copy", "true");
    }
    if (flags & Node::Flag_rematerialize) {
      print_prop("rematerialize", "true");
    }
    if (flags & Node::Flag_needs_anti_dependence_check) {
      print_prop("needs_anti_dependence_check", "true");
    }
    if (flags & Node::Flag_is_macro) {
      print_prop("is_macro", "true");
    }
    if (flags & Node::Flag_is_Con) {
      print_prop("is_con", "true");
    }
    if (flags & Node::Flag_is_cisc_alternate) {
      print_prop("is_cisc_alternate", "true");
    }
    if (flags & Node::Flag_is_dead_loop_safe) {
      print_prop("is_dead_loop_safe", "true");
    }
    if (flags & Node::Flag_may_be_short_branch) {
      print_prop("may_be_short_branch", "true");
    }
    if (flags & Node::Flag_has_call) {
      print_prop("has_call", "true");
    }
    if (C->matcher() != NULL) {
      if (C->matcher()->is_shared(node)) {
        print_prop("is_shared", "true");
      } else {
        print_prop("is_shared", "false");
      }
      if (C->matcher()->is_dontcare(node)) {
        print_prop("is_dontcare", "true");
      } else {
        print_prop("is_dontcare", "false");
      }
#ifdef ASSERT
      Node* old = C->matcher()->find_old_node(node);
      if (old != NULL) {
        print_prop("old_node_idx", old->_idx);
      }
#endif
    }
    if (node->is_Proj()) {
      print_prop("con", (int)node->as_Proj()->_con);
    }
    if (node->is_Mach()) {
      print_prop("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()]);
    }
    buffer[0] = 0;
    stringStream s2(buffer, sizeof(buffer) - 1);
    node->dump_spec(&s2);
    if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
      const TypeInstPtr  *toop = t->isa_instptr();
      const TypeKlassPtr *tkls = t->isa_klassptr();
      ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
      if( klass && klass->is_loaded() && klass->is_interface() ) {
        s2.print("  Interface:");
      } else if( toop ) {
        s2.print("  Oop:");
      } else if( tkls ) {
        s2.print("  Klass:");
      }
      t->dump_on(&s2);
    } else if( t == Type::MEMORY ) {
      s2.print("  Memory:");
      MemNode::dump_adr_type(node, node->adr_type(), &s2);
    }
    assert(s2.size() < sizeof(buffer), "size in range");
    print_prop("dump_spec", buffer);
    if (node->is_block_proj()) {
      print_prop("is_block_proj", "true");
    }
    if (node->is_block_start()) {
      print_prop("is_block_start", "true");
    }
    const char *short_name = "short_name";
    if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) {
      int index = node->as_Proj()->_con - TypeFunc::Parms;
      if (index >= 10) {
        print_prop(short_name, "PA");
      } else {
        sprintf(buffer, "P%d", index);
        print_prop(short_name, buffer);
      }
    } else if (strcmp(node->Name(), "IfTrue") == 0) {
      print_prop(short_name, "T");
    } else if (strcmp(node->Name(), "IfFalse") == 0) {
      print_prop(short_name, "F");
    } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) {
      if (t->base() == Type::Int && t->is_int()->is_con()) {
        const TypeInt *typeInt = t->is_int();
        assert(typeInt->is_con(), "must be constant");
        jint value = typeInt->get_con();
        if (value >= -9 && value <= 99) {
          sprintf(buffer, "%d", value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "I");
        }
      } else if (t == Type::TOP) {
        print_prop(short_name, "^");
      } else if (t->base() == Type::Long && t->is_long()->is_con()) {
        const TypeLong *typeLong = t->is_long();
        assert(typeLong->is_con(), "must be constant");
        jlong value = typeLong->get_con();
        if (value >= -9 && value <= 99) {
          sprintf(buffer, JLONG_FORMAT, value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "L");
        }
      } else if (t->base() == Type::KlassPtr) {
        const TypeKlassPtr *typeKlass = t->is_klassptr();
        print_prop(short_name, "CP");
      } else if (t->base() == Type::Control) {
        print_prop(short_name, "C");
      } else if (t->base() == Type::Memory) {
        print_prop(short_name, "M");
      } else if (t->base() == Type::Abio) {
        print_prop(short_name, "IO");
      } else if (t->base() == Type::Return_Address) {
        print_prop(short_name, "RA");
      } else if (t->base() == Type::AnyPtr) {
        print_prop(short_name, "P");
      } else if (t->base() == Type::RawPtr) {
        print_prop(short_name, "RP");
      } else if (t->base() == Type::AryPtr) {
        print_prop(short_name, "AP");
      }
    }
    JVMState* caller = NULL;
    if (node->is_SafePoint()) {
      caller = node->as_SafePoint()->jvms();
    } else {
      Node_Notes* notes = C->node_notes_at(node->_idx);
      if (notes != NULL) {
        caller = notes->jvms();
      }
    }
    if (caller != NULL) {
      stringStream bciStream;
      ciMethod* last = NULL;
      int last_bci;
      while(caller) {
        if (caller->has_method()) {
          last = caller->method();
          last_bci = caller->bci();
        }
        bciStream.print("%d ", caller->bci());
        caller = caller->caller();
      }
      print_prop("bci", bciStream.as_string());
      if (last != NULL && last->has_linenumber_table() && last_bci >= 0) {
        print_prop("line", last->line_number_from_bci(last_bci));
      }
    }
#ifdef ASSERT
    if (node->debug_orig() != NULL) {
      temp_set->Clear();
      stringStream dorigStream;
      Node* dorig = node->debug_orig();
      while (dorig && temp_set->test_set(dorig->_idx)) {
        dorigStream.print("%d ", dorig->_idx);
      }
      print_prop("debug_orig", dorigStream.as_string());
    }
#endif
    if (_chaitin && _chaitin != (PhaseChaitin *)((intptr_t)0xdeadbeef)) {
      buffer[0] = 0;
      _chaitin->dump_register(node, buffer);
      print_prop("reg", buffer);
      uint lrg_id = 0;
      if (node->_idx < _chaitin->_lrg_map.size()) {
        lrg_id = _chaitin->_lrg_map.live_range_id(node);
      }
      print_prop("lrg", lrg_id);
    }
    Compile::current()->_in_dump_cnt--;
#endif
    tail(PROPERTIES_ELEMENT);
    tail(NODE_ELEMENT);
  }
}
void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set) {
  VectorSet visited(Thread::current()->resource_area());
  GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
  nodeStack.push(start);
  visited.test_set(start->_idx);
  if (C->cfg() != NULL) {
    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
      Block* block = C->cfg()->get_block(i);
      for (uint s = 0; s < block->number_of_nodes(); s++) {
        nodeStack.push(block->get_node(s));
      }
    }
  }
  while(nodeStack.length() > 0) {
    Node *n = nodeStack.pop();
    visit_node(n, edges, temp_set);
    if (_traverse_outs) {
      for (DUIterator i = n->outs(); n->has_out(i); i++) {
        Node* p = n->out(i);
        if (!visited.test_set(p->_idx)) {
          nodeStack.push(p);
        }
      }
    }
    for ( uint i = 0; i < n->len(); i++ ) {
      if ( n->in(i) ) {
        if (!visited.test_set(n->in(i)->_idx)) {
          nodeStack.push(n->in(i));
        }
      }
    }
  }
}
void IdealGraphPrinter::print_method(Compile* compile, const char *name, int level, bool clear_nodes) {
  print(compile, name, (Node *)compile->root(), level, clear_nodes);
}
void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, int level, bool clear_nodes) {
  if (!_current_method || !_should_send_method || level > PrintIdealGraphLevel) return;
  this->C = compile;
  _chaitin = (PhaseChaitin *)C->regalloc();
  begin_head(GRAPH_ELEMENT);
  print_attr(GRAPH_NAME_PROPERTY, (const char *)name);
  end_head();
  VectorSet temp_set(Thread::current()->resource_area());
  head(NODES_ELEMENT);
  walk_nodes(node, false, &temp_set);
  tail(NODES_ELEMENT);
  head(EDGES_ELEMENT);
  walk_nodes(node, true, &temp_set);
  tail(EDGES_ELEMENT);
  if (C->cfg() != NULL) {
    head(CONTROL_FLOW_ELEMENT);
    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
      Block* block = C->cfg()->get_block(i);
      begin_head(BLOCK_ELEMENT);
      print_attr(BLOCK_NAME_PROPERTY, block->_pre_order);
      end_head();
      head(SUCCESSORS_ELEMENT);
      for (uint s = 0; s < block->_num_succs; s++) {
        begin_elem(SUCCESSOR_ELEMENT);
        print_attr(BLOCK_NAME_PROPERTY, block->_succs[s]->_pre_order);
        end_elem();
      }
      tail(SUCCESSORS_ELEMENT);
      head(NODES_ELEMENT);
      for (uint s = 0; s < block->number_of_nodes(); s++) {
        begin_elem(NODE_ELEMENT);
        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
        end_elem();
      }
      tail(NODES_ELEMENT);
      tail(BLOCK_ELEMENT);
    }
    tail(CONTROL_FLOW_ELEMENT);
  }
  tail(GRAPH_ELEMENT);
  output()->flush();
}
extern const char *NodeClassNames[];
outputStream *IdealGraphPrinter::output() {
  return _xml;
}
#endif
C:\hotspot-69087d08d473\src\share\vm/opto/idealGraphPrinter.hpp
#ifndef SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP
#define SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP
#include "libadt/dict.hpp"
#include "libadt/vectset.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
#include "utilities/xmlstream.hpp"
#ifndef PRODUCT
class Compile;
class PhaseIFG;
class PhaseChaitin;
class Matcher;
class Node;
class InlineTree;
class ciMethod;
class IdealGraphPrinter : public CHeapObj<mtCompiler> {
 private:
  static const char *INDENT;
  static const char *TOP_ELEMENT;
  static const char *GROUP_ELEMENT;
  static const char *GRAPH_ELEMENT;
  static const char *PROPERTIES_ELEMENT;
  static const char *EDGES_ELEMENT;
  static const char *PROPERTY_ELEMENT;
  static const char *EDGE_ELEMENT;
  static const char *NODE_ELEMENT;
  static const char *NODES_ELEMENT;
  static const char *CONTROL_FLOW_ELEMENT;
  static const char *REMOVE_EDGE_ELEMENT;
  static const char *REMOVE_NODE_ELEMENT;
  static const char *METHOD_NAME_PROPERTY;
  static const char *BLOCK_NAME_PROPERTY;
  static const char *BLOCK_DOMINATOR_PROPERTY;
  static const char *BLOCK_ELEMENT;
  static const char *SUCCESSORS_ELEMENT;
  static const char *SUCCESSOR_ELEMENT;
  static const char *METHOD_IS_PUBLIC_PROPERTY;
  static const char *METHOD_IS_STATIC_PROPERTY;
  static const char *TRUE_VALUE;
  static const char *NODE_NAME_PROPERTY;
  static const char *EDGE_NAME_PROPERTY;
  static const char *NODE_ID_PROPERTY;
  static const char *FROM_PROPERTY;
  static const char *TO_PROPERTY;
  static const char *PROPERTY_NAME_PROPERTY;
  static const char *GRAPH_NAME_PROPERTY;
  static const char *INDEX_PROPERTY;
  static const char *METHOD_ELEMENT;
  static const char *INLINE_ELEMENT;
  static const char *BYTECODES_ELEMENT;
  static const char *METHOD_BCI_PROPERTY;
  static const char *METHOD_SHORT_NAME_PROPERTY;
  static const char *ASSEMBLY_ELEMENT;
  elapsedTimer _walk_time;
  elapsedTimer _output_time;
  elapsedTimer _build_blocks_time;
  static int _file_count;
  networkStream *_stream;
  xmlStream *_xml;
  outputStream *_output;
  ciMethod *_current_method;
  int _depth;
  char buffer[128];
  bool _should_send_method;
  PhaseChaitin* _chaitin;
  bool _traverse_outs;
  Compile *C;
  static void pre_node(Node* node, void *env);
  static void post_node(Node* node, void *env);
  void print_indent();
  void print_method(ciMethod *method, int bci, InlineTree *tree);
  void print_inline_tree(InlineTree *tree);
  void visit_node(Node *n, bool edges, VectorSet* temp_set);
  void walk_nodes(Node *start, bool edges, VectorSet* temp_set);
  void begin_elem(const char *s);
  void end_elem();
  void begin_head(const char *s);
  void end_head();
  void print_attr(const char *name, const char *val);
  void print_attr(const char *name, intptr_t val);
  void print_prop(const char *name, const char *val);
  void print_prop(const char *name, int val);
  void tail(const char *name);
  void head(const char *name);
  void text(const char *s);
  intptr_t get_node_id(Node *n);
  IdealGraphPrinter();
  ~IdealGraphPrinter();
 public:
  static void clean_up();
  static IdealGraphPrinter *printer();
  bool traverse_outs();
  void set_traverse_outs(bool b);
  outputStream *output();
  void print_inlining(Compile* compile);
  void begin_method(Compile* compile);
  void end_method();
  void print_method(Compile* compile, const char *name, int level=1, bool clear_nodes = false);
  void print(Compile* compile, const char *name, Node *root, int level=1, bool clear_nodes = false);
  void print_xml(const char *name);
};
#endif
#endif // SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/idealKit.cpp
#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/idealKit.hpp"
#include "opto/runtime.hpp"
const uint IdealKit::first_var = TypeFunc::Parms + 1;
IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarations) :
  _gvn(gkit->gvn()), C(gkit->C) {
  _initial_ctrl = gkit->control();
  _initial_memory = gkit->merged_memory();
  _initial_i_o = gkit->i_o();
  _delay_all_transforms = delay_all_transforms;
  _var_ct = 0;
  _cvstate = NULL;
  assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
  assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
  int init_size = 5;
  _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
  DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
  if (!has_declarations) {
     declarations_done();
  }
}
void IdealKit::sync_kit(GraphKit* gkit) {
  set_all_memory(gkit->merged_memory());
  set_i_o(gkit->i_o());
  set_ctrl(gkit->control());
}
void IdealKit::if_then(Node* left, BoolTest::mask relop,
                       Node* right, float prob, float cnt, bool push_new_state) {
  assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new If");
  Node* bol;
  if (left->bottom_type()->isa_ptr() == NULL) {
    if (left->bottom_type()->isa_int() != NULL) {
      bol = Bool(CmpI(left, right), relop);
    } else {
      assert(left->bottom_type()->isa_long() != NULL, "what else?");
      bol = Bool(CmpL(left, right), relop);
    }
  } else {
    bol = Bool(CmpP(left, right), relop);
  }
  IfNode* iff = delay_transform(new (C) IfNode(ctrl(), bol, prob, cnt))->as_If();
  Node* then  = IfTrue(iff);
  Node* elsen = IfFalse(iff);
  Node* else_cvstate = copy_cvstate();
  else_cvstate->set_req(TypeFunc::Control, elsen);
  _pending_cvstates->push(else_cvstate);
  DEBUG_ONLY(if (push_new_state) _state->push(IfThenS));
  set_ctrl(then);
}
void IdealKit::else_() {
  assert(state() == IfThenS, "bad state for new Else");
  Node* else_cvstate = _pending_cvstates->pop();
  DEBUG_ONLY(_state->pop());
  _pending_cvstates->push(_cvstate);
  DEBUG_ONLY(_state->push(ElseS));
  _cvstate = else_cvstate;
}
void IdealKit::end_if() {
  assert(state() & (IfThenS|ElseS), "bad state for new Endif");
  Node* lab = make_label(1);
  goto_(lab);
  _cvstate = _pending_cvstates->pop();
  bind(lab);
  DEBUG_ONLY(_state->pop());
}
void IdealKit::loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) {
  assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new loop");
  if (UseLoopPredicate) {
    gkit->sync_kit(*this);
    gkit->add_predicate(nargs);
    sync_kit(gkit);
  }
  set(iv, init);
  Node* head = make_label(1);
  bind(head);
  _pending_cvstates->push(head); // push for use at end_loop
  _cvstate = copy_cvstate();
  if_then(value(iv), relop, limit, prob, cnt, false /* no new state */);
  DEBUG_ONLY(_state->push(LoopS));
  assert(ctrl()->is_IfTrue(), "true branch stays in loop");
  assert(_pending_cvstates->top()->in(TypeFunc::Control)->is_IfFalse(), "false branch exits loop");
}
void IdealKit::end_loop() {
  assert((state() == LoopS), "bad state for new end_loop");
  Node* exit = _pending_cvstates->pop();
  Node* head = _pending_cvstates->pop();
  goto_(head);
  clear(head);
  DEBUG_ONLY(_state->pop());
  _cvstate = exit;
}
Node* IdealKit::make_label(int goto_ct) {
  assert(_cvstate != NULL, "must declare variables before labels");
  Node* lab = new_cvstate();
  int sz = 1 + goto_ct + 1 /* fall thru */;
  Node* reg = delay_transform(new (C) RegionNode(sz));
  lab->init_req(TypeFunc::Control, reg);
  return lab;
}
void IdealKit::bind(Node* lab) {
  goto_(lab, true /* bind */);
  _cvstate = lab;
}
void IdealKit::goto_(Node* lab, bool bind) {
  Node* reg = lab->in(TypeFunc::Control);
  uint slot = 1;
  while (slot < reg->req() && reg->in(slot) != NULL) slot++;
  assert(slot < reg->req(), "too many gotos");
  if (slot == reg->req() - 1) bind = false;
  reg->init_req(slot, ctrl());
  assert(first_var + _var_ct == _cvstate->req(), "bad _cvstate size");
  for (uint i = first_var; i < _cvstate->req(); i++) {
    Node* l = lab->in(i);
    Node* m = _cvstate->in(i);
    if (m == NULL) {
      continue;
    } else if (l == NULL || m == l) {
      if (bind) {
        m = promote_to_phi(m, reg);
      }
      lab->set_req(i, m);
    } else {
      if (!was_promoted_to_phi(l, reg)) {
        l = promote_to_phi(l, reg);
        lab->set_req(i, l);
      }
      l->set_req(slot, m);
    }
  }
  do_memory_merge(_cvstate, lab);
  stop();
}
Node* IdealKit::promote_to_phi(Node* n, Node* reg) {
  assert(!was_promoted_to_phi(n, reg), "n already promoted to phi on this region");
  const BasicType bt = n->bottom_type()->basic_type();
  const Type* ct = Type::get_const_basic_type(bt);
  return delay_transform(PhiNode::make(reg, n, ct));
}
void IdealKit::declarations_done() {
  _cvstate = new_cvstate();   // initialize current cvstate
  set_ctrl(_initial_ctrl);    // initialize control in current cvstate
  set_all_memory(_initial_memory);// initialize memory in current cvstate
  set_i_o(_initial_i_o);      // initialize i_o in current cvstate
  DEBUG_ONLY(_state->push(BlockS));
}
Node* IdealKit::transform(Node* n) {
  if (_delay_all_transforms) {
    return delay_transform(n);
  } else {
    n = gvn().transform(n);
    C->record_for_igvn(n);
    return n;
  }
}
Node* IdealKit::delay_transform(Node* n) {
  gvn().set_type(n, n->bottom_type());
  C->record_for_igvn(n);
  return n;
}
Node* IdealKit::new_cvstate() {
  uint sz = _var_ct + first_var;
  return new (C) Node(sz);
}
Node* IdealKit::copy_cvstate() {
  Node* ns = new_cvstate();
  for (uint i = 0; i < ns->req(); i++) ns->init_req(i, _cvstate->in(i));
  ns->set_req(TypeFunc::Memory, MergeMemNode::make(C, ns->in(TypeFunc::Memory)));
  return ns;
}
void IdealKit::clear(Node* m) {
  for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
}
IdealVariable::IdealVariable(IdealKit &k) {
  k.declare(this);
}
Node* IdealKit::memory(uint alias_idx) {
  MergeMemNode* mem = merged_memory();
  Node* p = mem->memory_at(alias_idx);
  _gvn.set_type(p, Type::MEMORY);  // must be mapped
  return p;
}
void IdealKit::set_memory(Node* mem, uint alias_idx) {
  merged_memory()->set_memory_at(alias_idx, mem);
}
Node* IdealKit::load(Node* ctl,
                     Node* adr,
                     const Type* t,
                     BasicType bt,
                     int adr_idx,
                     bool require_atomic_access) {
  assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
  const TypePtr* adr_type = NULL; // debug-mode-only argument
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node* mem = memory(adr_idx);
  Node* ld;
  if (require_atomic_access && bt == T_LONG) {
    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, MemNode::unordered);
  } else {
    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, MemNode::unordered);
  }
  return transform(ld);
}
Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
                      int adr_idx,
                      MemNode::MemOrd mo, bool require_atomic_access,
                      bool mismatched) {
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
  const TypePtr* adr_type = NULL;
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node *mem = memory(adr_idx);
  Node* st;
  if (require_atomic_access && bt == T_LONG) {
    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
  } else {
    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
  }
  if (mismatched) {
    st->as_Store()->set_mismatched_access();
  }
  st = transform(st);
  set_memory(st, adr_idx);
  return st;
}
Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store, int oop_adr_idx,
                        BasicType bt,
                        int adr_idx) {
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
  const TypePtr* adr_type = NULL;
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node *mem = memory(adr_idx);
  Node* st = new (C) StoreCMNode(ctl, mem, adr, adr_type, val, oop_store, oop_adr_idx);
  st = transform(st);
  set_memory(st, adr_idx);
  return st;
}
void IdealKit::do_memory_merge(Node* merging, Node* join) {
  Node* join_region = join->in(TypeFunc::Control);
  assert(join_region != NULL, "join region must exist");
  if (join->in(TypeFunc::I_O) == NULL ) {
    join->set_req(TypeFunc::I_O,  merging->in(TypeFunc::I_O));
  }
  if (join->in(TypeFunc::Memory) == NULL ) {
    join->set_req(TypeFunc::Memory,  merging->in(TypeFunc::Memory));
    return;
  }
  uint slot;
  for (slot = 1; slot < join_region->req() ; slot ++ ) {
    if (join_region->in(slot) == merging->in(TypeFunc::Control)) break;
  }
  assert(slot !=  join_region->req(), "edge must already exist");
  MergeMemNode* join_m    = join->in(TypeFunc::Memory)->as_MergeMem();
  MergeMemNode* merging_m = merging->in(TypeFunc::Memory)->as_MergeMem();
  for (MergeMemStream mms(join_m, merging_m); mms.next_non_empty2(); ) {
    Node* join_slice = mms.force_memory();
    Node* merging_slice = mms.memory2();
    if (join_slice != merging_slice) {
      PhiNode* phi;
      if (join_slice->is_Phi() && join_slice->as_Phi()->region() == join_region) {
        phi = join_slice->as_Phi();
      } else {
        phi = PhiNode::make(join_region, join_slice, Type::MEMORY, mms.adr_type(C));
        phi = (PhiNode*) delay_transform(phi);
      }
      phi->set_req(slot, merging_slice/* slow_path, slow_slice */);
      mms.set_memory(phi);
    }
  }
  Node* join_io    = join->in(TypeFunc::I_O);
  Node* merging_io = merging->in(TypeFunc::I_O);
  if (join_io != merging_io) {
    PhiNode* phi;
    if (join_io->is_Phi() && join_io->as_Phi()->region() == join_region) {
      phi = join_io->as_Phi();
    } else {
      phi = PhiNode::make(join_region, join_io, Type::ABIO);
      phi = (PhiNode*) delay_transform(phi);
      join->set_req(TypeFunc::I_O, phi);
    }
    phi->set_req(slot, merging_io);
  }
}
void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
                              address slow_call,
                              const char *leaf_name,
                              Node* parm0,
                              Node* parm1,
                              Node* parm2,
                              Node* parm3) {
  const TypePtr* adr_type = TypeRawPtr::BOTTOM;
  uint adr_idx = C->get_alias_index(adr_type);
  CallNode *call =  (CallNode*)new (C) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
  call->init_req( TypeFunc::Control, ctrl() );
  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  call->init_req( TypeFunc::Memory , memory(adr_idx));
  call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
  call->init_req( TypeFunc::ReturnAdr, top() );
  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
  if (parm2 != NULL)  call->init_req(TypeFunc::Parms+2, parm2);
  if (parm3 != NULL)  call->init_req(TypeFunc::Parms+3, parm3);
  call = (CallNode *) _gvn.transform(call);
  Node *c = call; // dbx gets confused with call call->dump()
  set_ctrl(transform( new (C) ProjNode(call,TypeFunc::Control) ));
  Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );
  set_memory(mem, adr_idx);
  assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
         "call node must be constructed correctly");
}
void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type,
                              address slow_call,
                              const char *leaf_name,
                              const TypePtr* adr_type,
                              Node* parm0,
                              Node* parm1,
                              Node* parm2,
                              Node* parm3) {
  uint adr_idx = C->get_alias_index(adr_type);
  CallNode *call =  (CallNode*)new (C) CallLeafNoFPNode( slow_call_type, slow_call, leaf_name, adr_type);
  call->init_req( TypeFunc::Control, ctrl() );
  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  call->init_req( TypeFunc::Memory , memory(adr_idx));
  call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
  call->init_req( TypeFunc::ReturnAdr, top() );
  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
  if (parm2 != NULL)  call->init_req(TypeFunc::Parms+2, parm2);
  if (parm3 != NULL)  call->init_req(TypeFunc::Parms+3, parm3);
  call = (CallNode *) _gvn.transform(call);
  Node *c = call; // dbx gets confused with call call->dump()
  set_ctrl(transform( new (C) ProjNode(call,TypeFunc::Control) ));
  Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );
  set_memory(mem, adr_idx);
  assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
         "call node must be constructed correctly");
}
C:\hotspot-69087d08d473\src\share\vm/opto/idealKit.hpp
#ifndef SHARE_VM_OPTO_IDEALKIT_HPP
#define SHARE_VM_OPTO_IDEALKIT_HPP
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
class IdealKit;
class IdealVariable: public StackObj {
 friend class IdealKit;
 private:
  int _id;
  void set_id(int id) { _id = id; }
 public:
  IdealVariable(IdealKit &k);
  int id() { assert(has_id(),"uninitialized id"); return _id; }
  bool has_id() { return _id >= 0; }
};
class IdealKit: public StackObj {
 friend class IdealVariable;
 protected:
  Compile * const C;
  PhaseGVN &_gvn;
  GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
  Node* _cvstate;                          // current cvstate (control, memory and variables)
  uint _var_ct;                            // number of variables
  bool _delay_all_transforms;              // flag forcing all transforms to be delayed
  Node* _initial_ctrl;                     // saves initial control until variables declared
  Node* _initial_memory;                   // saves initial memory  until variables declared
  Node* _initial_i_o;                      // saves initial i_o  until variables declared
  PhaseGVN& gvn() const { return _gvn; }
  Node* new_cvstate();                     // Create a new cvstate
  Node* cvstate() { return _cvstate; }     // current cvstate
  Node* copy_cvstate();                    // copy current cvstate
  void set_memory(Node* mem, uint alias_idx );
  void do_memory_merge(Node* merging, Node* join);
  void clear(Node* m);                     // clear a cvstate
  void stop() { clear(_cvstate); }         // clear current cvstate
  Node* delay_transform(Node* n);
  Node* transform(Node* n);                // gvn.transform or skip it
  Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
  bool was_promoted_to_phi(Node* n, Node* reg) {
    return (n->is_Phi() && n->in(0) == reg);
  }
  void declare(IdealVariable* v) { v->set_id(_var_ct++); }
  static const uint first_var; // = TypeFunc::Parms + 1;
#ifdef ASSERT
  enum State { NullS=0, BlockS=1, LoopS=2, IfThenS=4, ElseS=8, EndifS= 16 };
  GrowableArray<int>* _state;
  State state() { return (State)(_state->top()); }
#endif
  Node* memory(uint alias_idx);
 public:
  IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false);
  ~IdealKit() {
    stop();
  }
  void sync_kit(GraphKit* gkit);
  Node* ctrl()                          { return _cvstate->in(TypeFunc::Control); }
  void set_ctrl(Node* ctrl)             { _cvstate->set_req(TypeFunc::Control, ctrl); }
  Node* top()                           { return C->top(); }
  MergeMemNode* merged_memory()         { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
  void set_all_memory(Node* mem)        { _cvstate->set_req(TypeFunc::Memory, mem); }
  Node* i_o()                           { return _cvstate->in(TypeFunc::I_O); }
  void set_i_o(Node* c)                 { _cvstate->set_req(TypeFunc::I_O, c); }
  void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
  Node* value(IdealVariable& v)         { return _cvstate->in(first_var + v.id()); }
  void dead(IdealVariable& v)           { set(v, (Node*)NULL); }
  void if_then(Node* left, BoolTest::mask relop, Node* right,
               float prob = PROB_FAIR, float cnt = COUNT_UNKNOWN,
               bool push_new_state = true);
  void else_();
  void end_if();
  void loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit,
            float prob = PROB_LIKELY(0.9), float cnt = COUNT_UNKNOWN);
  void end_loop();
  Node* make_label(int goto_ct);
  void bind(Node* lab);
  void goto_(Node* lab, bool bind = false);
  void declarations_done();
  Node* IfTrue(IfNode* iff)  { return transform(new (C) IfTrueNode(iff)); }
  Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); }
  Node* ConI(jint k) { return (Node*)gvn().intcon(k); }
  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
  Node* AddI(Node* l, Node* r) { return transform(new (C) AddINode(l, r)); }
  Node* SubI(Node* l, Node* r) { return transform(new (C) SubINode(l, r)); }
  Node* AndI(Node* l, Node* r) { return transform(new (C) AndINode(l, r)); }
  Node* MaxI(Node* l, Node* r) { return transform(new (C) MaxINode(l, r)); }
  Node* LShiftI(Node* l, Node* r) { return transform(new (C) LShiftINode(l, r)); }
  Node* CmpI(Node* l, Node* r) { return transform(new (C) CmpINode(l, r)); }
  Node* Bool(Node* cmp, BoolTest::mask relop) { return transform(new (C) BoolNode(cmp, relop)); }
  void  increment(IdealVariable& v, Node* j)  { set(v, AddI(value(v), j)); }
  void  decrement(IdealVariable& v, Node* j)  { set(v, SubI(value(v), j)); }
  Node* CmpL(Node* l, Node* r) { return transform(new (C) CmpLNode(l, r)); }
  Node* thread()  {  return gvn().transform(new (C) ThreadLocalNode()); }
  Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new (C) AddPNode(base, ptr, off)); }
  Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); }
#ifdef _LP64
  Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); }
#else // _LP64
  Node* XorX(Node* l, Node* r) { return transform(new (C) XorINode(l, r)); }
#endif // _LP64
  Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); }
  Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
  Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); }
  Node* load(Node* ctl,
             Node* adr,
             const Type* t,
             BasicType bt,
             int adr_idx,
             bool require_atomic_access = false);
  Node* store(Node* ctl,
              Node* adr,
              Node* val,
              BasicType bt,
              int adr_idx,
              MemNode::MemOrd mo,
              bool require_atomic_access = false,
              bool mismatched = false
              );
  Node* storeCM(Node* ctl,
                Node* adr,
                Node* val,
                Node* oop_store,
                int oop_adr_idx,
                BasicType bt,
                int adr_idx);
  void make_leaf_call(const TypeFunc *slow_call_type,
                      address slow_call,
                      const char *leaf_name,
                      Node* parm0,
                      Node* parm1 = NULL,
                      Node* parm2 = NULL,
                      Node* parm3 = NULL);
  void make_leaf_call_no_fp(const TypeFunc *slow_call_type,
                            address slow_call,
                            const char *leaf_name,
                            const TypePtr* adr_type,
                            Node* parm0,
                            Node* parm1,
                            Node* parm2,
                            Node* parm3);
};
#endif // SHARE_VM_OPTO_IDEALKIT_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/ifg.cpp
#include "precompiled.hpp"
#include "compiler/oopMap.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/coalesce.hpp"
#include "opto/connode.hpp"
#include "opto/indexSet.hpp"
#include "opto/machnode.hpp"
#include "opto/memnode.hpp"
#include "opto/opcodes.hpp"
PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
}
void PhaseIFG::init( uint maxlrg ) {
  _maxlrg = maxlrg;
  _yanked = new (_arena) VectorSet(_arena);
  _is_square = false;
  _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg);
  _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) );
  memset(_lrgs,0,sizeof(LRG)*maxlrg);
  for( uint i = 0; i < maxlrg; i++ ) {
    _adjs[i].initialize(maxlrg);
    _lrgs[i].Set_All();
  }
}
int PhaseIFG::add_edge( uint a, uint b ) {
  lrgs(a).invalid_degree();
  lrgs(b).invalid_degree();
  assert( !_is_square, "only on triangular" );
  if( a < b ) { uint tmp = a; a = b; b = tmp; }
  return _adjs[a].insert( b );
}
void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
  assert( !_is_square, "only on triangular" );
  IndexSet *adjs_a = &_adjs[a];
  if( !vec->count() ) return;
  IndexSetIterator elements(vec);
  uint neighbor;
  while ((neighbor = elements.next()) != 0) {
    add_edge( a, neighbor );
  }
}
int PhaseIFG::test_edge( uint a, uint b ) const {
  assert( !_is_square, "only on triangular" );
  if( a < b ) { uint tmp = a; a = b; b = tmp; }
  return _adjs[a].member(b);
}
void PhaseIFG::SquareUp() {
  assert( !_is_square, "only on triangular" );
  for( uint i = 0; i < _maxlrg; i++ ) {
    IndexSetIterator elements(&_adjs[i]);
    uint datum;
    while ((datum = elements.next()) != 0) {
      _adjs[datum].insert( i );
    }
  }
  _is_square = true;
}
void PhaseIFG::Compute_Effective_Degree() {
  assert( _is_square, "only on square" );
  for( uint i = 0; i < _maxlrg; i++ )
    lrgs(i).set_degree(effective_degree(i));
}
int PhaseIFG::test_edge_sq( uint a, uint b ) const {
  assert( _is_square, "only on square" );
  if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; }
  return _adjs[a].member(b);
}
void PhaseIFG::Union( uint a, uint b ) {
  assert( _is_square, "only on square" );
  IndexSet *A = &_adjs[a];
  IndexSetIterator b_elements(&_adjs[b]);
  uint datum;
  while ((datum = b_elements.next()) != 0) {
    if(A->insert(datum)) {
      _adjs[datum].insert(a);
      lrgs(a).invalid_degree();
      lrgs(datum).invalid_degree();
    }
  }
}
IndexSet *PhaseIFG::remove_node( uint a ) {
  assert( _is_square, "only on square" );
  assert( !_yanked->test(a), "" );
  _yanked->set(a);
  IndexSetIterator elements(&_adjs[a]);
  LRG &lrg_a = lrgs(a);
  uint datum;
  while ((datum = elements.next()) != 0) {
    _adjs[datum].remove(a);
    lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) );
  }
  return neighbors(a);
}
void PhaseIFG::re_insert( uint a ) {
  assert( _is_square, "only on square" );
  assert( _yanked->test(a), "" );
  (*_yanked) >>= a;
  IndexSetIterator elements(&_adjs[a]);
  uint datum;
  while ((datum = elements.next()) != 0) {
    _adjs[datum].insert(a);
    lrgs(datum).invalid_degree();
  }
}
int LRG::compute_degree( LRG &l ) const {
  int tmp;
  int num_regs = _num_regs;
  int nregs = l.num_regs();
  tmp =  (_fat_proj || l._fat_proj)     // either is a fat-proj?
    ? (num_regs * nregs)                // then use product
    : MAX2(num_regs,nregs);             // else use max
  return tmp;
}
int PhaseIFG::effective_degree( uint lidx ) const {
  int eff = 0;
  int num_regs = lrgs(lidx).num_regs();
  int fat_proj = lrgs(lidx)._fat_proj;
  IndexSet *s = neighbors(lidx);
  IndexSetIterator elements(s);
  uint nidx;
  while((nidx = elements.next()) != 0) {
    LRG &lrgn = lrgs(nidx);
    int nregs = lrgn.num_regs();
    eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj?
      ? (num_regs * nregs)              // then use product
      : MAX2(num_regs,nregs);           // else use max
  }
  return eff;
}
#ifndef PRODUCT
void PhaseIFG::dump() const {
  tty->print_cr("-- Interference Graph --%s--",
                _is_square ? "square" : "triangular" );
  if( _is_square ) {
    for( uint i = 0; i < _maxlrg; i++ ) {
      tty->print( (*_yanked)[i] ? "XX " : "  ");
      tty->print("L%d: { ",i);
      IndexSetIterator elements(&_adjs[i]);
      uint datum;
      while ((datum = elements.next()) != 0) {
        tty->print("L%d ", datum);
      }
      tty->print_cr("}");
    }
    return;
  }
  for( uint i = 0; i < _maxlrg; i++ ) {
    uint j;
    tty->print( (*_yanked)[i] ? "XX " : "  ");
    tty->print("L%d: { ",i);
    for( j = _maxlrg; j > i; j-- )
      if( test_edge(j - 1,i) ) {
        tty->print("L%d ",j - 1);
      }
    tty->print("| ");
    IndexSetIterator elements(&_adjs[i]);
    uint datum;
    while ((datum = elements.next()) != 0) {
      tty->print("L%d ", datum);
    }
    tty->print("}\n");
  }
  tty->print("\n");
}
void PhaseIFG::stats() const {
  ResourceMark rm;
  int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
  memset( h_cnt, 0, sizeof(int)*_maxlrg*2 );
  uint i;
  for( i = 0; i < _maxlrg; i++ ) {
    h_cnt[neighbor_cnt(i)]++;
  }
  tty->print_cr("--Histogram of counts--");
  for( i = 0; i < _maxlrg*2; i++ )
    if( h_cnt[i] )
      tty->print("%d/%d ",i,h_cnt[i]);
  tty->cr();
}
void PhaseIFG::verify( const PhaseChaitin *pc ) const {
  for( uint i = 0; i < _maxlrg; i++ ) {
    assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" );
    IndexSet *set = &_adjs[i];
    IndexSetIterator elements(set);
    uint idx;
    uint last = 0;
    while ((idx = elements.next()) != 0) {
      assert(idx != i, "Must have empty diagonal");
      assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find");
      assert(_adjs[idx].member(i), "IFG not square");
      assert(!(*_yanked)[idx], "No yanked neighbors");
      assert(last < idx, "not sorted increasing");
      last = idx;
    }
    assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong");
  }
}
#endif
void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
  uint retval = 0;
  const RegMask &rm = lrgs(r).mask();
  IndexSetIterator elements(liveout);
  uint l;
  while( (l = elements.next()) != 0 )
    if( rm.overlap( lrgs(l).mask() ) )
      _ifg->add_edge( r, l );
}
void PhaseChaitin::build_ifg_virtual( ) {
  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
    Block* block = _cfg.get_block(i);
    IndexSet* liveout = _live->live(block);
    for (uint j = block->end_idx() + 1; j > 1; j--) {
      Node* n = block->get_node(j - 1);
      uint r = _lrg_map.live_range_id(n);
      if (r) {
        liveout->remove(r);
        uint idx = n->is_Copy();
        if (idx) {
          liveout->remove(_lrg_map.live_range_id(n->in(idx)));
        }
        interfere_with_live(r, liveout);
      }
      if (!n->is_Phi()) {      // Phi function uses come from prior block
        for(uint k = 1; k < n->req(); k++) {
          liveout->insert(_lrg_map.live_range_id(n->in(k)));
        }
      }
      uint idx;
      if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
        const MachNode *mach = n->as_Mach();
        uint op = mach->ideal_Opcode();
        if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
            n->in(1)->bottom_type()->base() == Type::Int &&
            n->in(2)->is_Phi() &&
            n->in(2)->in(2) == n ) {
          Node *tmp = n->in(1);
          n->set_req( 1, n->in(2) );
          n->set_req( 2, tmp );
        }
        uint lidx = _lrg_map.live_range_id(n->in(idx));
        for (uint k = 1; k < n->req(); k++) {
          uint kidx = _lrg_map.live_range_id(n->in(k));
          if (kidx != lidx) {
            _ifg->add_edge(r, kidx);
          }
        }
      }
    } // End of forall instructions in block
  } // End of forall blocks
}
uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
  IndexSetIterator elements(liveout);
  uint lidx;
  uint cnt = 0;
  while ((lidx = elements.next()) != 0) {
    if( lrgs(lidx).mask().is_UP() &&
        lrgs(lidx).mask_size() &&
        !lrgs(lidx)._is_float &&
        !lrgs(lidx)._is_vector &&
        lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) )
      cnt += lrgs(lidx).reg_pressure();
  }
  return cnt;
}
uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
  IndexSetIterator elements(liveout);
  uint lidx;
  uint cnt = 0;
  while ((lidx = elements.next()) != 0) {
    if( lrgs(lidx).mask().is_UP() &&
        lrgs(lidx).mask_size() &&
        (lrgs(lidx)._is_float || lrgs(lidx)._is_vector))
      cnt += lrgs(lidx).reg_pressure();
  }
  return cnt;
}
static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
  if (lrg->mask().is_UP() && lrg->mask_size()) {
    if (lrg->_is_float || lrg->_is_vector) {
      pressure[1] -= lrg->reg_pressure();
      if( pressure[1] == (uint)FLOATPRESSURE ) {
        hrp_index[1] = where;
        if( pressure[1] > b->_freg_pressure )
          b->_freg_pressure = pressure[1]+1;
      }
    } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
      pressure[0] -= lrg->reg_pressure();
      if( pressure[0] == (uint)INTPRESSURE   ) {
        hrp_index[0] = where;
        if( pressure[0] > b->_reg_pressure )
          b->_reg_pressure = pressure[0]+1;
      }
    }
  }
}
uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
  NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
  uint must_spill = 0;
  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
    Block* block = _cfg.get_block(i);
    IndexSet liveout(_live->live(block));
    uint last_inst = block->end_idx();
    uint first_inst;
    for (first_inst = 1; first_inst < last_inst; first_inst++) {
      if (!block->get_node(first_inst)->is_Phi()) {
        break;
      }
    }
    for (uint insidx = first_inst; insidx < last_inst; insidx++) {
      Node *ex = block->get_node(insidx);
      if (ex->is_SpillCopy()) {
        continue;
      }
      if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
        block->remove_node(insidx);
        block->insert_node(ex, first_inst);
      }
      break;
    }
    uint pressure[2], hrp_index[2];
    pressure[0] = pressure[1] = 0;
    hrp_index[0] = hrp_index[1] = last_inst+1;
    block->_reg_pressure = block->_freg_pressure = 0;
    int inst_count = last_inst - first_inst;
    double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
    assert(!(cost < 0.0), "negative spill cost" );
    IndexSetIterator elements(&liveout);
    uint lidx;
    while ((lidx = elements.next()) != 0) {
      LRG &lrg = lrgs(lidx);
      lrg._area += cost;
      if (lrg.mask().is_UP() && lrg.mask_size()) {
        if (lrg._is_float || lrg._is_vector) {   // Count float pressure
          pressure[1] += lrg.reg_pressure();
          if (pressure[1] > block->_freg_pressure) {
            block->_freg_pressure = pressure[1];
          }
        } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
          pressure[0] += lrg.reg_pressure();
          if (pressure[0] > block->_reg_pressure) {
            block->_reg_pressure = pressure[0];
          }
        }
      }
    }
    assert( pressure[0] == count_int_pressure  (&liveout), "" );
    assert( pressure[1] == count_float_pressure(&liveout), "" );
    uint j;
    for (j = last_inst + 1; j > 1; j--) {
      Node* n = block->get_node(j - 1);
      uint r = _lrg_map.live_range_id(n);
      if(r) {
        lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;
        if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) {
          Node *def = n->in(0);
          if( !n->is_Proj() ||
              (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
            bool remove = true;
            if (n->is_MachProj()) {
              uint cnt = def->outcnt();
              for (uint i = 0; i < cnt; i++) {
                Node* proj = def->raw_out(i);
                if (proj->Opcode() == Op_SCMemProj) {
                  remove = false;
                  break;
                }
              }
            }
            if (remove) {
              block->remove_node(j - 1);
              if (lrgs(r)._def == n) {
                lrgs(r)._def = 0;
              }
              n->disconnect_inputs(NULL, C);
              _cfg.unmap_node_from_block(n);
              n->replace_by(C->top());
              hrp_index[0]--;
              hrp_index[1]--;
              continue;
            }
          }
          if (lrgs(r)._fat_proj) {
            RegMask itmp = lrgs(r).mask();
            itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
            int iregs = itmp.Size();
            if (pressure[0]+iregs > block->_reg_pressure) {
              block->_reg_pressure = pressure[0] + iregs;
            }
            if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
              hrp_index[0] = j - 1;
            }
            RegMask ftmp = lrgs(r).mask();
            ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
            int fregs = ftmp.Size();
            if (pressure[1] + fregs > block->_freg_pressure) {
              block->_freg_pressure = pressure[1] + fregs;
            }
            if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
              hrp_index[1] = j - 1;
            }
          }
        } else {                // Else it is live
          lrgs(r)._area -= cost;
          assert(!(lrgs(r)._area < 0.0), "negative spill area" );
          if( n->is_SpillCopy()
              && lrgs(r).is_singledef()        // MultiDef live range can still split
              && n->outcnt() == 1              // and use must be in this block
              && _cfg.get_block_for_node(n->unique_out()) == block) {
            Node *single_use = n->unique_out();
            assert(block->find_node(single_use) >= j, "Use must be later in block");
            Node *m = n;
            for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
              m = block->get_node(i);
            }
            if (m == single_use) {
              lrgs(r)._area = 0.0;
            }
          }
          if( liveout.remove(r) ) {
            lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index);
            assert( pressure[0] == count_int_pressure  (&liveout), "" );
            assert( pressure[1] == count_float_pressure(&liveout), "" );
          }
          uint idx = n->is_Copy();
          if (idx) {
            uint x = _lrg_map.live_range_id(n->in(idx));
            if (liveout.remove(x)) {
              lrgs(x)._area -= cost;
              lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index);
              assert( pressure[0] == count_int_pressure  (&liveout), "" );
              assert( pressure[1] == count_float_pressure(&liveout), "" );
            }
          }
        } // End of if live or not
        const RegMask &rmask = lrgs(r).mask();
        if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) {
          int r_size = lrgs(r).num_regs();
          OptoReg::Name r_reg = (r_size == 1) ? rmask.find_first_elem() : OptoReg::Physical;
          IndexSetIterator elements(&liveout);
          uint l;
          while ((l = elements.next()) != 0) {
            LRG &lrg = lrgs(l);
            if( lrg._must_spill ) continue;
            RegMask old = lrg.mask();
            uint old_size = lrg.mask_size();
            assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
            if (lrg.num_regs() > 1 && !lrg._fat_proj) {
              RegMask r2mask = rmask;
              r2mask.smear_to_sets(lrg.num_regs());
              lrg.SUBTRACT( r2mask );
              lrg.compute_set_mask_size();
            } else if( r_size != 1 ) { // fat proj
              lrg.SUBTRACT( rmask );
              lrg.compute_set_mask_size();
            } else {            // Common case: size 1 bound removal
              if( lrg.mask().Member(r_reg) ) {
                lrg.Remove(r_reg);
                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
              }
            }
            if( lrg.not_free() ) {
              lrg.set_mask( old );
              lrg.set_mask_size(old_size);
              must_spill++;
              lrg._must_spill = 1;
              lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
            }
          }
        } // End of if bound
        interfere_with_live(r,&liveout);
      } // End of if normal register-allocated value
      inst_count--;
      cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
      if( !n->is_Phi() ) {      // Phi function uses come from prior block
        JVMState* jvms = n->jvms();
        uint debug_start = jvms ? jvms->debug_start() : 999999;
        for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
          Node *def = n->in(k);
          uint x = _lrg_map.live_range_id(def);
          if (!x) {
            continue;
          }
          LRG &lrg = lrgs(x);
          if (k < debug_start) {
            lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq));
          }
          if (liveout.insert(x)) {
            lrg._area += cost;
            if (lrg.mask().is_UP() && lrg.mask_size()) {
              if (lrg._is_float || lrg._is_vector) {
                pressure[1] += lrg.reg_pressure();
                if (pressure[1] > block->_freg_pressure)  {
                  block->_freg_pressure = pressure[1];
                }
              } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
                pressure[0] += lrg.reg_pressure();
                if (pressure[0] > block->_reg_pressure) {
                  block->_reg_pressure = pressure[0];
                }
              }
            }
            assert( pressure[0] == count_int_pressure  (&liveout), "" );
            assert( pressure[1] == count_float_pressure(&liveout), "" );
          }
          assert(!(lrg._area < 0.0), "negative spill area" );
        }
      }
    } // End of reverse pass over all instructions in block
    if (pressure[0] > (uint)INTPRESSURE) {
      hrp_index[0] = 0;
      if (pressure[0] > block->_reg_pressure) {
        block->_reg_pressure = pressure[0];
      }
    }
    if (pressure[1] > (uint)FLOATPRESSURE) {
      hrp_index[1] = 0;
      if (pressure[1] > block->_freg_pressure) {
        block->_freg_pressure = pressure[1];
      }
    }
    j = hrp_index[0];
    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
      Node* cur = block->get_node(j);
      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
        j--;
        cur = block->get_node(j);
      }
    }
    block->_ihrp_index = j;
    j = hrp_index[1];
    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
      Node* cur = block->get_node(j);
      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
        j--;
        cur = block->get_node(j);
      }
    }
    block->_fhrp_index = j;
#ifndef PRODUCT
    if( PrintOptoStatistics ) {
      if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) {
        _high_pressure++;
      } else {
        _low_pressure++;
      }
    }
#endif
  } // End of for all blocks
  return must_spill;
}
C:\hotspot-69087d08d473\src\share\vm/opto/ifnode.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/loopnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
extern int explicit_null_checks_elided;
const Type *IfNode::Value( PhaseTransform *phase ) const {
  if( !in(0) ) return Type::TOP;
  if( phase->type(in(0)) == Type::TOP )
    return Type::TOP;
  const Type *t = phase->type(in(1));
  if( t == Type::TOP )          // data is undefined
    return TypeTuple::IFNEITHER; // unreachable altogether
  if( t == TypeInt::ZERO )      // zero, or false
    return TypeTuple::IFFALSE;  // only false branch is reachable
  if( t == TypeInt::ONE )       // 1, or true
    return TypeTuple::IFTRUE;   // only true branch is reachable
  assert( t == TypeInt::BOOL, "expected boolean type" );
  return TypeTuple::IFBOTH;     // No progress
}
const RegMask &IfNode::out_RegMask() const {
  return RegMask::Empty;
}
static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
  Node *i1 = iff->in(1);
  if( !i1->is_Bool() ) return NULL;
  BoolNode *b = i1->as_Bool();
  Node *cmp = b->in(1);
  if( !cmp->is_Cmp() ) return NULL;
  i1 = cmp->in(1);
  if( i1 == NULL || !i1->is_Phi() ) return NULL;
  PhiNode *phi = i1->as_Phi();
  if( phi->is_copy() ) return NULL;
  Node *con2 = cmp->in(2);
  if( !con2->is_Con() ) return NULL;
  Node *con1=NULL;
  uint i4;
  for( i4 = 1; i4 < phi->req(); i4++ ) {
    con1 = phi->in(i4);
    if( !con1 ) return NULL;    // Do not optimize partially collapsed merges
    if( con1->is_Con() ) break; // Found a constant
    const TypePtr *tp = igvn->type(con1)->isa_ptr();
    if( tp && tp->_ptr == TypePtr::NotNull )
      break;
  }
  if( i4 >= phi->req() ) return NULL; // Found no constants
  igvn->C->set_has_split_ifs(true); // Has chance for split-if
  Node *cmp2 = cmp->clone();
  cmp2->set_req(1,con1);
  cmp2->set_req(2,con2);
  const Type *t = cmp2->Value(igvn);
  igvn->remove_dead_node(cmp2);
  if( !t->singleton() ) return NULL;
  Node *r = iff->in(0);
  if( !r->is_Region() ) return NULL;
  if( phi->region() != r ) return NULL;
  if (b->outcnt() != 1 || cmp->outcnt() != 1) {
    return NULL;
  }
  for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) {
    Node* u = r->fast_out(j);
    if( u == r ) continue;
    if( u == iff ) continue;
    if( u->outcnt() == 0 ) continue; // use is dead & ignorable
    if( !u->is_Phi() ) {
      if( u->is_Start() ) {
        tty->print_cr("Region has inlined start use");
      } else {
        tty->print_cr("Region has odd use");
        u->dump(2);
      }*/
      return NULL;
    }
    if( u != phi ) {
      return NULL;
    }
    for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) {
      Node* v = u->fast_out(k); // User of the phi
      if( v == cmp ) continue;  // The compare is OK
      if( (v->is_ConstraintCast()) &&
          v->in(0)->in(0) == iff )
        continue;               // CastPP/II of the IfNode is OK
      if( !v->is_Call() ) {
        if( v->Opcode() == Op_AddP ) {
          tty->print_cr("Phi has AddP use");
        } else if( v->Opcode() == Op_CastPP ) {
          tty->print_cr("Phi has CastPP use");
        } else if( v->Opcode() == Op_CastII ) {
          tty->print_cr("Phi has CastII use");
        } else {
          tty->print_cr("Phi has use I cant be bothered with");
        }
      }
      return NULL;
      if( !v->in(0) && v != cmp ) {
        tty->print_cr("Phi has free-floating use");
        v->dump(2);
        return NULL;
      }
      for( uint l = 1; l < v->req(); l++ ) {
        if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) &&
            !v->in(l)->is_Con() ) {
          tty->print_cr("Phi has use");
          v->dump(2);
          return NULL;
        } // End of if Phi-use input is neither Phi nor Constant
      } // End of for all inputs to Phi-use
    } // End of for all uses of Phi
  } // End of for all uses of Region
  if (iff->outcnt() != 2)
    return NULL;
  PhaseGVN *phase = igvn;
  uint req_c = 0;
  Node* predicate_proj = NULL;
  for (uint ii = 1; ii < r->req(); ii++) {
    if (phi->in(ii) == con1) {
      req_c++;
    }
    Node* proj = PhaseIdealLoop::find_predicate(r->in(ii));
    if (proj != NULL) {
      assert(predicate_proj == NULL, "only one predicate entry expected");
      predicate_proj = proj;
    }
  }
  if((r->req() - req_c) == 1) {
    return NULL;
  }
  Node* predicate_c = NULL;
  Node* predicate_x = NULL;
  bool counted_loop = r->is_CountedLoop();
  Node *region_c = new (igvn->C) RegionNode(req_c + 1);
  Node *phi_c    = con1;
  uint  len      = r->req();
  Node *region_x = new (igvn->C) RegionNode(len - req_c);
  Node *phi_x    = PhiNode::make_blank(region_x, phi);
  for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) {
    if (phi->in(i) == con1) {
      region_c->init_req( i_c++, r  ->in(i) );
      if (r->in(i) == predicate_proj)
        predicate_c = predicate_proj;
    } else {
      region_x->init_req( i_x,   r  ->in(i) );
      phi_x   ->init_req( i_x++, phi->in(i) );
      if (r->in(i) == predicate_proj)
        predicate_x = predicate_proj;
    }
  }
  if (predicate_c != NULL && (req_c > 1)) {
    assert(predicate_x == NULL, "only one predicate entry expected");
    predicate_c = NULL; // Do not clone predicate below merge point
  }
  if (predicate_x != NULL && ((len - req_c) > 2)) {
    assert(predicate_c == NULL, "only one predicate entry expected");
    predicate_x = NULL; // Do not clone predicate below merge point
  }
  igvn->register_new_node_with_optimizer( region_c );
  igvn->register_new_node_with_optimizer( region_x );
  Node *hook = new (igvn->C) Node(4);
  hook->init_req(0, phi_x);
  hook->init_req(1, phi_c);
  phi_x = phase->transform( phi_x );
  Node *cmp_c = phase->makecon(t);
  Node *cmp_x = cmp->clone();
  cmp_x->set_req(1,phi_x);
  cmp_x->set_req(2,con2);
  cmp_x = phase->transform(cmp_x);
  Node *b_c = phase->transform(new (igvn->C) BoolNode(cmp_c,b->_test._test));
  Node *b_x = phase->transform(new (igvn->C) BoolNode(cmp_x,b->_test._test));
  IfNode *iff_c = new (igvn->C) IfNode(region_c,b_c,iff->_prob,iff->_fcnt);
  igvn->set_type_bottom(iff_c);
  igvn->_worklist.push(iff_c);
  hook->init_req(2, iff_c);
  IfNode *iff_x = new (igvn->C) IfNode(region_x,b_x,iff->_prob, iff->_fcnt);
  igvn->set_type_bottom(iff_x);
  igvn->_worklist.push(iff_x);
  hook->init_req(3, iff_x);
  Node *iff_c_t = phase->transform(new (igvn->C) IfTrueNode (iff_c));
  Node *iff_c_f = phase->transform(new (igvn->C) IfFalseNode(iff_c));
  if (predicate_c != NULL) {
    assert(predicate_x == NULL, "only one predicate entry expected");
    iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t, !counted_loop);
    iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f, !counted_loop);
  }
  Node *iff_x_t = phase->transform(new (igvn->C) IfTrueNode (iff_x));
  Node *iff_x_f = phase->transform(new (igvn->C) IfFalseNode(iff_x));
  if (predicate_x != NULL) {
    assert(predicate_c == NULL, "only one predicate entry expected");
    iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t, !counted_loop);
    iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f, !counted_loop);
  }
  Node *region_s = new (igvn->C) RegionNode(3);
  igvn->_worklist.push(region_s);
  region_s->init_req(1, iff_c_t);
  region_s->init_req(2, iff_x_t);
  igvn->register_new_node_with_optimizer( region_s );
  Node *region_f = new (igvn->C) RegionNode(3);
  igvn->_worklist.push(region_f);
  region_f->init_req(1, iff_c_f);
  region_f->init_req(2, iff_x_f);
  igvn->register_new_node_with_optimizer( region_f );
  igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table.
  cmp->set_req(1,NULL);  // Whack the inputs to cmp because it will be dead
  cmp->set_req(2,NULL);
  Node *phi_s = NULL;     // do not construct unless needed
  Node *phi_f = NULL;     // do not construct unless needed
  for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
    Node* v = phi->last_out(i2);// User of the phi
    igvn->rehash_node_delayed(v); // Have to fixup other Phi users
    uint vop = v->Opcode();
    Node *proj = NULL;
    if( vop == Op_Phi ) {       // Remote merge point
      Node *r = v->in(0);
      for (uint i3 = 1; i3 < r->req(); i3++)
        if (r->in(i3) && r->in(i3)->in(0) == iff) {
          proj = r->in(i3);
          break;
        }
    } else if( v->is_ConstraintCast() ) {
      proj = v->in(0);          // Controlling projection
    } else {
      assert( 0, "do not know how to handle this guy" );
    }
    Node *proj_path_data, *proj_path_ctrl;
    if( proj->Opcode() == Op_IfTrue ) {
      if( phi_s == NULL ) {
        phi_s = PhiNode::make_blank(region_s,phi);
        phi_s->init_req( 1, phi_c );
        phi_s->init_req( 2, phi_x );
        hook->add_req(phi_s);
        phi_s = phase->transform(phi_s);
      }
      proj_path_data = phi_s;
      proj_path_ctrl = region_s;
    } else {
      if( phi_f == NULL ) {
        phi_f = PhiNode::make_blank(region_f,phi);
        phi_f->init_req( 1, phi_c );
        phi_f->init_req( 2, phi_x );
        hook->add_req(phi_f);
        phi_f = phase->transform(phi_f);
      }
      proj_path_data = phi_f;
      proj_path_ctrl = region_f;
    }
    if( vop == Op_Phi ) {       // Remote merge point
      uint i;
      for( i = 1; i < v->req(); i++ )
        if( v->in(i) == phi )
          break;
      v->set_req(i, proj_path_data );
    } else if( v->is_ConstraintCast() ) {
      v->set_req(0, proj_path_ctrl );
      v->set_req(1, proj_path_data );
    } else
      ShouldNotReachHere();
  }
  for (DUIterator_Last i3min, i3 = iff->last_outs(i3min); i3 >= i3min; --i3) {
    Node* p = iff->last_out(i3);
    assert( p->Opcode() == Op_IfTrue || p->Opcode() == Op_IfFalse, "" );
    Node *u = (p->Opcode() == Op_IfTrue) ? region_s : region_f;
    igvn->add_users_to_worklist(p);
    for (DUIterator_Last lmin, l = p->last_outs(lmin); l >= lmin;) {
      Node* x = p->last_out(l);
      igvn->hash_delete(x);
      uint uses_found = 0;
      for( uint j = 0; j < x->req(); j++ ) {
        if( x->in(j) == p ) {
          x->set_req(j, u);
          uses_found++;
        }
      }
      l -= uses_found;    // we deleted 1 or more copies of this edge
    }
    igvn->remove_dead_node(p);
  }
  igvn->hash_delete(r);
  for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
    Node* u = r->last_out(l);
    if( u == r ) {
      r->set_req(0, NULL);
    } else {
      assert(u->outcnt() == 0, "only dead users");
      igvn->remove_dead_node(u);
    }
    l -= 1;
  }
  igvn->remove_dead_node(r);
  igvn->remove_dead_node( hook );
  return new (igvn->C) ConINode(TypeInt::ZERO);
}
int IfNode::is_range_check(Node* &range, Node* &index, jint &offset) {
  if (outcnt() != 2) {
    return 0;
  }
  Node* b = in(1);
  if (b == NULL || !b->is_Bool())  return 0;
  BoolNode* bn = b->as_Bool();
  Node* cmp = bn->in(1);
  if (cmp == NULL)  return 0;
  if (cmp->Opcode() != Op_CmpU)  return 0;
  Node* l = cmp->in(1);
  Node* r = cmp->in(2);
  int flip_test = 1;
  if (bn->_test._test == BoolTest::le) {
    l = cmp->in(2);
    r = cmp->in(1);
    flip_test = 2;
  } else if (bn->_test._test != BoolTest::lt) {
    return 0;
  }
  if (l->is_top())  return 0;   // Top input means dead test
  if (r->Opcode() != Op_LoadRange)  return 0;
  Node* iftrap = proj_out(flip_test == 2 ? true : false);
  bool found_trap = false;
  if (iftrap != NULL) {
    Node* u = iftrap->unique_ctrl_out();
    if (u != NULL) {
      if (u->is_Region()) {
        Node* c = u->unique_ctrl_out();
        if (c != NULL) {
          iftrap = u;
          u = c;
        }
      }
      if (u->in(0) == iftrap && u->is_CallStaticJava()) {
        int req = u->as_CallStaticJava()->uncommon_trap_request();
        if (Deoptimization::trap_request_reason(req) ==
            Deoptimization::Reason_range_check) {
          found_trap = true;
        }
      }
    }
  }
  if (!found_trap)  return 0;   // sorry, no cigar
  Node* ind = l;
  jint  off = 0;
  if (l->is_top()) {
    return 0;
  } else if (l->Opcode() == Op_AddI) {
    if ((off = l->in(1)->find_int_con(0)) != 0) {
      ind = l->in(2);
    } else if ((off = l->in(2)->find_int_con(0)) != 0) {
      ind = l->in(1);
    }
  } else if ((off = l->find_int_con(-1)) >= 0) {
    ind = NULL;
  } else {
    off = 0;
  }
  index  = ind;
  offset = off;
  range  = r;
  return flip_test;
}
static void adjust_check(Node* proj, Node* range, Node* index,
                         int flip, jint off_lo, PhaseIterGVN* igvn) {
  PhaseGVN *gvn = igvn;
  Node *iff = proj->in(0);
  Node *bol = iff->in(1);
  if( bol->is_top() ) return;   // In case a partially dead range check appears
  DEBUG_ONLY( if( !bol->is_Bool() ) { proj->dump(3); fatal("Expect projection-->IfNode-->BoolNode"); } )
  if( !bol->is_Bool() ) return;
  Node *cmp = bol->in(1);
  Node *new_add = gvn->intcon(off_lo);
  if( index ) {
    new_add = off_lo ? gvn->transform(new (gvn->C) AddINode( index, new_add )) : index;
  }
  Node *new_cmp = (flip == 1)
    ? new (gvn->C) CmpUNode( new_add, range )
    : new (gvn->C) CmpUNode( range, new_add );
  new_cmp = gvn->transform(new_cmp);
  if( new_cmp == cmp ) return;
  Node *new_bol = gvn->transform( new (gvn->C) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
  igvn->rehash_node_delayed( iff );
  iff->set_req_X( 1, new_bol, igvn );
}
Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
  Node *dom = curr->in(0);
  if( !dom )                    // Found a Region degraded to a copy?
    return curr->nonnull_req(); // Skip thru it
  if( curr != dom )             // Normal walk up one step?
    return dom;
  if (linear_only)
    return NULL;
  if( dom->is_Root() )
    return NULL;
  if( dom->is_Loop() )
    return dom->in(1);          // Skip up thru loops
  Node *din1, *din2, *din3, *din4;
  if( dom->req() == 3 &&        // 2-path merge point
      (din1 = dom ->in(1)) &&   // Left  path exists
      (din2 = dom ->in(2)) &&   // Right path exists
      (din3 = din1->in(0)) &&   // Left  path up one
      (din4 = din2->in(0)) ) {  // Right path up one
    if( din3->is_Call() &&      // Handle a slow-path call on either arm
        (din3 = din3->in(0)) )
      din3 = din3->in(0);
    if( din4->is_Call() &&      // Handle a slow-path call on either arm
        (din4 = din4->in(0)) )
      din4 = din4->in(0);
    if (din3 != NULL && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
      return din3;              // Skip around diamonds
  }
  return NULL;                  // Dead loop?  Or hit root?
}
const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj) {
  assert(if_proj &&
         (if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
  if (if_proj->in(0) && if_proj->in(0)->is_If()) {
    IfNode* iff = if_proj->in(0)->as_If();
    if (iff->in(1) && iff->in(1)->is_Bool()) {
      BoolNode* bol = iff->in(1)->as_Bool();
      if (bol->in(1) && bol->in(1)->is_Cmp()) {
        const CmpNode* cmp  = bol->in(1)->as_Cmp();
        if (cmp->in(1) == val) {
          const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
          if (cmp2_t != NULL) {
            jint lo = cmp2_t->_lo;
            jint hi = cmp2_t->_hi;
            BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
            switch (msk) {
            case BoolTest::ne:
              return NULL;
            case BoolTest::eq:
              return cmp2_t;
            case BoolTest::lt:
              lo = TypeInt::INT->_lo;
              if (hi - 1 < hi) {
                hi = hi - 1;
              }
              break;
            case BoolTest::le:
              lo = TypeInt::INT->_lo;
              break;
            case BoolTest::gt:
              if (lo + 1 > lo) {
                lo = lo + 1;
              }
              hi = TypeInt::INT->_hi;
              break;
            case BoolTest::ge:
              hi = TypeInt::INT->_hi;
              break;
            }
            const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
            return rtn_t;
          }
        }
      }
    }
  }
  return NULL;
}
Node* IfNode::fold_compares(PhaseGVN* phase) {
  if (Opcode() != Op_If) return NULL;
  Node* this_cmp = in(1)->in(1);
  if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI &&
      this_cmp->in(2)->is_Con() && this_cmp->in(2) != phase->C->top()) {
    Node* ctrl = in(0);
    BoolNode* this_bool = in(1)->as_Bool();
    Node* n = this_cmp->in(1);
    int hi = this_cmp->in(2)->get_int();
    if (ctrl != NULL && ctrl->is_Proj() && ctrl->outcnt() == 1 &&
        ctrl->in(0)->is_If() &&
        ctrl->in(0)->outcnt() == 2 &&
        ctrl->in(0)->in(1)->is_Bool() &&
        ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
        ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
        ctrl->in(0)->in(1)->in(1)->in(1) == n) {
      IfNode* dom_iff = ctrl->in(0)->as_If();
      Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
      if (otherproj->outcnt() == 1 && otherproj->unique_out()->is_Region() &&
          this_bool->_test._test != BoolTest::ne && this_bool->_test._test != BoolTest::eq) {
        RegionNode* region = otherproj->unique_out()->as_Region();
        Node* success = NULL;
        Node* fail = NULL;
        for (int i = 0; i < 2; i++) {
          Node* proj = proj_out(i);
          if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) {
            success = proj;
          } else if (fail == NULL) {
            fail = proj;
          } else {
            success = fail = NULL;
          }
        }
        if (success != NULL && fail != NULL && !region->has_phi()) {
          int lo = dom_iff->in(1)->in(1)->in(2)->get_int();
          BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
          Node* dom_cmp =  dom_bool->in(1);
          const TypeInt* failtype  = filtered_int_type(phase, n, ctrl);
          if (failtype != NULL) {
            const TypeInt* type2 = filtered_int_type(phase, n, fail);
            if (type2 != NULL) {
              failtype = failtype->join(type2)->is_int();
            } else {
              failtype = NULL;
            }
          }
          if (failtype != NULL &&
              dom_bool->_test._test != BoolTest::ne && dom_bool->_test._test != BoolTest::eq) {
            int bound = failtype->_hi - failtype->_lo + 1;
            if (failtype->_hi != max_jint && failtype->_lo != min_jint && bound > 1) {
              BoolTest::mask cond = fail->as_Proj()->_con ? BoolTest::lt : BoolTest::ge;
              Node* adjusted = phase->transform(new (phase->C) SubINode(n, phase->intcon(failtype->_lo)));
              Node* newcmp = phase->transform(new (phase->C) CmpUNode(adjusted, phase->intcon(bound)));
              Node* newbool = phase->transform(new (phase->C) BoolNode(newcmp, cond));
              phase->is_IterGVN()->replace_input_of(dom_iff, 1, phase->intcon(ctrl->as_Proj()->_con));
              phase->hash_delete(this);
              set_req(1, newbool);
              return this;
            }
            if (failtype->_lo > failtype->_hi) {
              phase->hash_delete(this);
              set_req(1, phase->intcon(success->as_Proj()->_con));
              return this;
            }
          }
        }
      }
    }
  }
  return NULL;
}
static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) {
  Node *i1 = iff->in(1);
  if( !i1->is_Bool() ) return NULL;
  BoolNode *bol = i1->as_Bool();
  Node *cmp = bol->in(1);
  if( cmp->Opcode() != Op_CmpI ) return NULL;
  const Type *cmp2_t = phase->type( cmp->in(2) );
  if( cmp2_t != TypeInt::ZERO &&
      cmp2_t != TypeInt::ONE )
    return NULL;
  i1 = cmp->in(1);
  if( !i1->is_Phi() ) return NULL;
  PhiNode *phi = i1->as_Phi();
  if( phase->type( phi ) != TypeInt::BOOL )
    return NULL;
  int true_path = phi->is_diamond_phi();
  if( true_path == 0 ) return NULL;
  if (phi->in(0)->in(1)->in(0) == iff) return NULL;
  BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
  int flip = 0;
  if( bol->_test._test == BoolTest::ne ) flip = 1-flip;
  else if( bol->_test._test != BoolTest::eq ) return NULL;
  if( cmp2_t == TypeInt::ZERO ) flip = 1-flip;
  const Type *phi1_t = phase->type( phi->in(1) );
  const Type *phi2_t = phase->type( phi->in(2) );
  if( phi1_t == TypeInt::ZERO ) {
    if( phi2_t != TypeInt::ONE ) return NULL;
    flip = 1-flip;
  } else {
    if( phi1_t != TypeInt::ONE  ) return NULL;
    if( phi2_t != TypeInt::ZERO ) return NULL;
  }
  if( true_path == 2 ) {
    flip = 1-flip;
  }
  Node* new_bol = (flip ? phase->transform( bol2->negate(phase) ) : bol2);
  assert(new_bol != iff->in(1), "must make progress");
  iff->set_req(1, new_bol);
  phase->C->set_major_progress();
  return iff;
}
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
struct RangeCheck {
  Node* ctl;
  jint off;
};
Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  if (remove_dead_region(phase, can_reshape))  return this;
  if (!can_reshape)  return NULL;
  PhaseIterGVN *igvn = phase->is_IterGVN();
  if (in(0)->is_top())  return NULL;
  if (in(1)->is_top())  return NULL;
  if (in(1)->is_Con())  return NULL;
  if (outcnt() < 2)  return NULL;
  Node* idt_if = idealize_test(phase, this);
  if (idt_if != NULL)  return idt_if;
  Node *s = split_if(this, igvn);
  if (s != NULL)  return s;
  Node *bol2 = remove_useless_bool(this, phase);
  if( bol2 ) return bol2;
  Node *dom = in(0);
  Node *prev_dom = this;
  Node *index1, *range1;
  jint offset1;
  int flip1 = is_range_check(range1, index1, offset1);
  if( flip1 ) {
    const int NRC =3;
    RangeCheck prev_checks[NRC];
    int nb_checks = 0;
    jint off_lo = offset1;
    jint off_hi = offset1;
    bool found_immediate_dominator = false;
    for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
      if (dom->Opcode() == Op_If &&  // Not same opcode?
          prev_dom->in(0) == dom) { // One path of test does dominate?
        if (dom == this) return NULL; // dead loop
        Node *index2, *range2;
        jint offset2;
        int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
        if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
            dom->outcnt() == 2) {
          if (nb_checks == 0 && dom->in(1) == in(1)) {
            assert(offset2 == offset1, "Same test but different offsets");
            found_immediate_dominator = true;
            break;
          }
          off_lo = MIN2(off_lo,offset2);
          off_hi = MAX2(off_hi,offset2);
          prev_checks[nb_checks%NRC].ctl = prev_dom;
          prev_checks[nb_checks%NRC].off = offset2;
          nb_checks++;
        }
      }
      prev_dom = dom;
      dom = up_one_dom(dom);
      if (!dom) break;
    }
    if (!found_immediate_dominator) {
      if (!phase->C->allow_range_check_smearing())  return NULL;
      if (nb_checks == 0) {
        return NULL;
      }
      int chk0 = (nb_checks - 1) % NRC;
      if (index1) {
        if (nb_checks == 1) {
          return NULL;
        } else {
          RangeCheck rc0 = prev_checks[chk0];
          int chk1 = (nb_checks - 2) % NRC;
          RangeCheck rc1 = prev_checks[chk1];
          if (rc0.off == off_lo) {
            adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
            prev_dom = rc1.ctl;
          } else if (rc0.off == off_hi) {
            adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
            prev_dom = rc1.ctl;
          } else {
            if (nb_checks == 2) {
              return NULL;
            }
            int chk2 = (nb_checks - 3) % NRC;
            RangeCheck rc2 = prev_checks[chk2];
            if (rc1.off <= rc0.off) {
              adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
              adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
            } else {
              adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
              adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
            }
            prev_dom = rc2.ctl;
          }
        }
      } else {
        RangeCheck rc0 = prev_checks[chk0];
        adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
        prev_dom = rc0.ctl;
      }
    }
  } else {                      // Scan for an equivalent test
    Node *cmp;
    int dist = 0;               // Cutoff limit for search
    int op = Opcode();
    if( op == Op_If &&
        (cmp=in(1)->in(1))->Opcode() == Op_CmpP ) {
      if( cmp->in(2) != NULL && // make sure cmp is not already dead
          cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
        dist = 64;              // Limit for null-pointer scans
      } else {
        dist = 4;               // Do not bother for random pointer tests
      }
    } else {
      dist = 4;                 // Limit for random junky scans
    }
    if( !dom ) return NULL;     // Dead loop?
    Node* result = fold_compares(phase);
    if (result != NULL) {
      return result;
    }
    while( dom->Opcode() != op    ||  // Not same opcode?
           dom->in(1)    != in(1) ||  // Not same input 1?
           (req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
           prev_dom->in(0) != dom ) { // One path of test does not dominate?
      if( dist < 0 ) return NULL;
      dist--;
      prev_dom = dom;
      dom = up_one_dom( dom );
      if( !dom ) return NULL;
    }
    if( this == dom )
      return NULL;
    if( dist > 2 )              // Add to count of NULL checks elided
      explicit_null_checks_elided++;
  } // End of Else scan for an equivalent test
#ifndef PRODUCT
  if( TraceIterativeGVN ) {
    tty->print("   Removing IfNode: "); this->dump();
  }
  if( VerifyOpto && !phase->allow_progress() ) {
    return NULL;
  }
#endif
  dominated_by( prev_dom, igvn );
  return new (phase->C) ConINode(TypeInt::ZERO);
}
void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
  igvn->hash_delete(this);      // Remove self to prevent spurious V-N
  Node *idom = in(0);
  int prev_op = prev_dom->Opcode();
  Node *top = igvn->C->top(); // Shortcut to top
  ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
  if ((unc_proj != NULL) && (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))) {
    prev_dom = idom;
  }
  for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
    Node *ifp = last_out(i);     // Get IfTrue/IfFalse
    igvn->add_users_to_worklist(ifp);
    Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
    Node *ctrl_target = (ifp->Opcode() == prev_op) ?     idom : top;
    for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
      Node* s = ifp->last_out(j);   // Get child of IfTrue/IfFalse
      if( !s->depends_only_on_test() ) {
        uint l;
        for( l = 0; s->in(l) != ifp; l++ ) { }
        igvn->replace_input_of(s, l, ctrl_target);
      } else {                      // Else, for control producers,
        igvn->replace_input_of(s, 0, data_target); // Move child to data-target
      }
    } // End for each child of a projection
    igvn->remove_dead_node(ifp);
  } // End for each IfTrue/IfFalse child of If
  igvn->remove_dead_node(this);
}
Node *IfTrueNode::Identity( PhaseTransform *phase ) {
  const TypeTuple *t = phase->type(in(0))->is_tuple();
  return ( t == TypeTuple::IFNEITHER || t == TypeTuple::IFTRUE )
    ? in(0)->in(0)              // IfNode control
    : this;                     // no progress
}
#ifndef PRODUCT
void IfNode::dump_spec(outputStream *st) const {
  st->print("P=%f, C=%f",_prob,_fcnt);
}
#endif
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
  assert(iff->in(0) != NULL, "If must be live");
  if (iff->outcnt() != 2)  return NULL; // Malformed projections.
  Node* old_if_f = iff->proj_out(false);
  Node* old_if_t = iff->proj_out(true);
  if (iff->is_CountedLoopEnd())  return NULL;
  if (!iff->in(1)->is_Bool())  return NULL; // Happens for partially optimized IF tests
  BoolNode *b = iff->in(1)->as_Bool();
  BoolTest bt = b->_test;
  if( bt.is_canonical() )
    return NULL;
  Node* new_b = phase->transform( new (phase->C) BoolNode(b->in(1), bt.negate()) );
  if( !new_b->is_Bool() ) return NULL;
  b = new_b->as_Bool();
  PhaseIterGVN *igvn = phase->is_IterGVN();
  assert( igvn, "Test is not canonical in parser?" );
  iff = new (phase->C) IfNode( iff->in(0), b, 1.0-iff->_prob, iff->_fcnt);
  Node *prior = igvn->hash_find_insert(iff);
  if( prior ) {
    igvn->remove_dead_node(iff);
    iff = (IfNode*)prior;
  } else {
    igvn->set_type_bottom(iff);
  }
  igvn->_worklist.push(iff);
  Node* new_if_f = (Node*)(new (phase->C) IfFalseNode( iff ));
  Node* new_if_t = (Node*)(new (phase->C) IfTrueNode ( iff ));
  igvn->register_new_node_with_optimizer(new_if_f);
  igvn->register_new_node_with_optimizer(new_if_t);
  igvn->replace_node(old_if_f, new_if_t);
  igvn->replace_node(old_if_t, new_if_f);
  return iff;
}
Node *IfFalseNode::Identity( PhaseTransform *phase ) {
  const TypeTuple *t = phase->type(in(0))->is_tuple();
  return ( t == TypeTuple::IFNEITHER || t == TypeTuple::IFFALSE )
    ? in(0)->in(0)              // IfNode control
    : this;                     // no progress
}
C:\hotspot-69087d08d473\src\share\vm/opto/indexSet.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/chaitin.hpp"
#include "opto/compile.hpp"
#include "opto/indexSet.hpp"
#include "opto/regmask.hpp"
IndexSet::BitBlock  IndexSet::_empty_block     = IndexSet::BitBlock();
#ifdef ASSERT
julong IndexSet::_alloc_new = 0;
julong IndexSet::_alloc_total = 0;
julong IndexSet::_total_bits = 0;
julong IndexSet::_total_used_blocks = 0;
julong IndexSet::_total_unused_blocks = 0;
int IndexSet::_serial_count = 1;
#endif
const byte IndexSetIterator::_first_bit[32] = {
  0, 0, 1, 0,
  2, 0, 1, 0,
  3, 0, 1, 0,
  2, 0, 1, 0,
  4, 0, 1, 0,
  2, 0, 1, 0,
  3, 0, 1, 0,
  2, 0, 1, 0
};
const byte IndexSetIterator::_second_bit[32] = {
  5, 5, 5, 1,
  5, 2, 2, 1,
  5, 3, 3, 1,
  3, 2, 2, 1,
  5, 4, 4, 1,
  4, 2, 2, 1,
  4, 3, 3, 1,
  3, 2, 2, 1
};
  8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
  4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
const byte IndexSetIterator::_second_bit[256] = {
  8, 8, 8, 1, 8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1,
  8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  8, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1,
  7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
  6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
  6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
  5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1
};*/
void IndexSet::populate_free_list() {
  Compile *compile = Compile::current();
  BitBlock *free = (BitBlock*)compile->indexSet_free_block_list();
  char *mem = (char*)arena()->Amalloc_4(sizeof(BitBlock) *
                                        bitblock_alloc_chunk_size + 32);
  BitBlock *new_blocks = (BitBlock*)(((uintptr_t)mem + 32) & ~0x001F);
  for (int i = 0; i < bitblock_alloc_chunk_size; i++) {
    new_blocks->set_next(free);
    free = new_blocks;
    new_blocks++;
  }
  compile->set_indexSet_free_block_list(free);
#ifdef ASSERT
  if (CollectIndexSetStatistics) {
    inc_stat_counter(&_alloc_new, bitblock_alloc_chunk_size);
  }
#endif
}
IndexSet::BitBlock *IndexSet::alloc_block() {
#ifdef ASSERT
  if (CollectIndexSetStatistics) {
    inc_stat_counter(&_alloc_total, 1);
  }
#endif
  Compile *compile = Compile::current();
  BitBlock* free_list = (BitBlock*)compile->indexSet_free_block_list();
  if (free_list == NULL) {
    populate_free_list();
    free_list = (BitBlock*)compile->indexSet_free_block_list();
  }
  BitBlock *block = free_list;
  compile->set_indexSet_free_block_list(block->next());
  block->clear();
  return block;
}
IndexSet::BitBlock *IndexSet::alloc_block_containing(uint element) {
  BitBlock *block = alloc_block();
  uint bi = get_block_index(element);
  _blocks[bi] = block;
  return block;
}
void IndexSet::free_block(uint i) {
  debug_only(check_watch("free block", i));
  assert(i < _max_blocks, "block index too large");
  BitBlock *block = _blocks[i];
  assert(block != &_empty_block, "cannot free the empty block");
  block->set_next((IndexSet::BitBlock*)Compile::current()->indexSet_free_block_list());
  Compile::current()->set_indexSet_free_block_list(block);
  set_block(i,&_empty_block);
}
uint IndexSet::lrg_union(uint lr1, uint lr2,
                         const uint fail_degree,
                         const PhaseIFG *ifg,
                         const RegMask &mask ) {
  IndexSet *one = ifg->neighbors(lr1);
  IndexSet *two = ifg->neighbors(lr2);
  LRG &lrg1 = ifg->lrgs(lr1);
  LRG &lrg2 = ifg->lrgs(lr2);
#ifdef ASSERT
  assert(_max_elements == one->_max_elements, "max element mismatch");
  check_watch("union destination");
  one->check_watch("union source");
  two->check_watch("union source");
#endif
  if (two->count() > one->count()) {
    IndexSet *temp = one;
    one = two;
    two = temp;
  }
  clear();
  uint reg_degree = 0;
  uint element;
  IndexSetIterator elements(one);
  while ((element = elements.next()) != 0) {
    LRG &lrg = ifg->lrgs(element);
    if (mask.overlap(lrg.mask())) {
      insert(element);
      if( !lrg.mask().is_AllStack() ) {
        reg_degree += lrg1.compute_degree(lrg);
        if( reg_degree >= fail_degree ) return reg_degree;
      } else {
        assert( lrg.lo_degree(), "" );
      }
    }
  }
  IndexSetIterator elements2(two);
  while ((element = elements2.next()) != 0) {
    LRG &lrg = ifg->lrgs(element);
    if (mask.overlap(lrg.mask())) {
      if (insert(element)) {
        if( !lrg.mask().is_AllStack() ) {
          reg_degree += lrg2.compute_degree(lrg);
          if( reg_degree >= fail_degree ) return reg_degree;
        } else {
          assert( lrg.lo_degree(), "" );
        }
      }
    }
  }
  return reg_degree;
}
IndexSet::IndexSet (IndexSet *set) {
#ifdef ASSERT
  _serial_number = _serial_count++;
  set->check_watch("copied", _serial_number);
  check_watch("initialized by copy", set->_serial_number);
  _max_elements = set->_max_elements;
#endif
  _count = set->_count;
  _max_blocks = set->_max_blocks;
  if (_max_blocks <= preallocated_block_list_size) {
    _blocks = _preallocated_block_list;
  } else {
    _blocks =
      (IndexSet::BitBlock**) arena()->Amalloc_4(sizeof(IndexSet::BitBlock**) * _max_blocks);
  }
  for (uint i = 0; i < _max_blocks; i++) {
    BitBlock *block = set->_blocks[i];
    if (block == &_empty_block) {
      set_block(i, &_empty_block);
    } else {
      BitBlock *new_block = alloc_block();
      memcpy(new_block->words(), block->words(), sizeof(uint32) * words_per_block);
      set_block(i, new_block);
    }
  }
}
void IndexSet::initialize(uint max_elements) {
#ifdef ASSERT
  _serial_number = _serial_count++;
  check_watch("initialized", max_elements);
  _max_elements = max_elements;
#endif
  _count = 0;
  _max_blocks = (max_elements + bits_per_block - 1) / bits_per_block;
  if (_max_blocks <= preallocated_block_list_size) {
    _blocks = _preallocated_block_list;
  } else {
    _blocks = (IndexSet::BitBlock**) arena()->Amalloc_4(sizeof(IndexSet::BitBlock**) * _max_blocks);
  }
  for (uint i = 0; i < _max_blocks; i++) {
    set_block(i, &_empty_block);
  }
}
void IndexSet::initialize(uint max_elements, Arena *arena) {
#ifdef ASSERT
  _serial_number = _serial_count++;
  check_watch("initialized2", max_elements);
  _max_elements = max_elements;
#endif // ASSERT
  _count = 0;
  _max_blocks = (max_elements + bits_per_block - 1) / bits_per_block;
  if (_max_blocks <= preallocated_block_list_size) {
    _blocks = _preallocated_block_list;
  } else {
    _blocks = (IndexSet::BitBlock**) arena->Amalloc_4(sizeof(IndexSet::BitBlock**) * _max_blocks);
  }
  for (uint i = 0; i < _max_blocks; i++) {
    set_block(i, &_empty_block);
  }
}
void IndexSet::swap(IndexSet *set) {
#ifdef ASSERT
  assert(_max_elements == set->_max_elements, "must have same universe size to swap");
  check_watch("swap", set->_serial_number);
  set->check_watch("swap", _serial_number);
#endif
  for (uint i = 0; i < _max_blocks; i++) {
    BitBlock *temp = _blocks[i];
    set_block(i, set->_blocks[i]);
    set->set_block(i, temp);
  }
  uint temp = _count;
  _count = set->_count;
  set->_count = temp;
}
#ifndef PRODUCT
void IndexSet::dump() const {
  IndexSetIterator elements(this);
  tty->print("{");
  uint i;
  while ((i = elements.next()) != 0) {
    tty->print("L%d ", i);
  }
  tty->print_cr("}");
}
#endif
#ifdef ASSERT
void IndexSet::tally_iteration_statistics() const {
  inc_stat_counter(&_total_bits, count());
  for (uint i = 0; i < _max_blocks; i++) {
    if (_blocks[i] != &_empty_block) {
      inc_stat_counter(&_total_used_blocks, 1);
    } else {
      inc_stat_counter(&_total_unused_blocks, 1);
    }
  }
}
void IndexSet::print_statistics() {
  julong total_blocks = _total_used_blocks + _total_unused_blocks;
  tty->print_cr ("Accumulated IndexSet usage statistics:");
  tty->print_cr ("--------------------------------------");
  tty->print_cr ("  Iteration:");
  tty->print_cr ("    blocks visited: " UINT64_FORMAT, total_blocks);
  tty->print_cr ("    blocks empty: %4.2f%%", 100.0*(double)_total_unused_blocks/total_blocks);
  tty->print_cr ("    bit density (bits/used blocks): %4.2f", (double)_total_bits/_total_used_blocks);
  tty->print_cr ("    bit density (bits/all blocks): %4.2f", (double)_total_bits/total_blocks);
  tty->print_cr ("  Allocation:");
  tty->print_cr ("    blocks allocated: " UINT64_FORMAT, _alloc_new);
  tty->print_cr ("    blocks used/reused: " UINT64_FORMAT, _alloc_total);
}
void IndexSet::verify() const {
  assert(!member(0), "zero cannot be a member");
  uint count = 0;
  uint i;
  for (i = 1; i < _max_elements; i++) {
    if (member(i)) {
      count++;
      assert(count <= _count, "_count is messed up");
    }
  }
  IndexSetIterator elements(this);
  count = 0;
  while ((i = elements.next()) != 0) {
    count++;
    assert(member(i), "returned a non member");
    assert(count <= _count, "iterator returned wrong number of elements");
  }
}
#endif
IndexSetIterator::IndexSetIterator(IndexSet *set) {
#ifdef ASSERT
  if (CollectIndexSetStatistics) {
    set->tally_iteration_statistics();
  }
  set->check_watch("traversed", set->count());
#endif
  if (set->is_empty()) {
    _current = 0;
    _next_word = IndexSet::words_per_block;
    _next_block = 1;
    _max_blocks = 1;
  } else {
    _current = 0;
    _value = 0;
    _next_block = 0;
    _next_word = IndexSet::words_per_block;
    _max_blocks = set->_max_blocks;
    _words = NULL;
    _blocks = set->_blocks;
    _set = set;
  }
}
IndexSetIterator::IndexSetIterator(const IndexSet *set) {
#ifdef ASSERT
  if (CollectIndexSetStatistics) {
    set->tally_iteration_statistics();
  }
#endif
  if (set->is_empty()) {
    _current = 0;
    _next_word = IndexSet::words_per_block;
    _next_block = 1;
    _max_blocks = 1;
  } else {
    _current = 0;
    _value = 0;
    _next_block = 0;
    _next_word = IndexSet::words_per_block;
    _max_blocks = set->_max_blocks;
    _words = NULL;
    _blocks = set->_blocks;
    _set = NULL;
  }
}
uint IndexSetIterator::advance_and_next() {
  for (uint wi = _next_word; wi < (unsigned)IndexSet::words_per_block; wi++) {
    if (_words[wi] != 0) {
      _value = ((_next_block - 1) * IndexSet::bits_per_block) + (wi * IndexSet::bits_per_word);
      _current = _words[wi];
      _next_word = wi+1;
      return next();
    }
  }
  for (uint bi = _next_block; bi < _max_blocks; bi++) {
    if (_blocks[bi] != &IndexSet::_empty_block) {
      _words = _blocks[bi]->words();
      for (uint wi = 0; wi < (unsigned)IndexSet::words_per_block; wi++) {
        if (_words[wi] != 0) {
          _value = (bi * IndexSet::bits_per_block) + (wi * IndexSet::bits_per_word);
          _current = _words[wi];
          _next_block = bi+1;
          _next_word = wi+1;
          return next();
        }
      }
      if (_set) {
        _set->free_block(bi);
      }
    }
  }
  _next_block = _max_blocks;
  _next_word = IndexSet::words_per_block;
  return 0;
}
C:\hotspot-69087d08d473\src\share\vm/opto/indexSet.hpp
#ifndef SHARE_VM_OPTO_INDEXSET_HPP
#define SHARE_VM_OPTO_INDEXSET_HPP
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "opto/compile.hpp"
#include "opto/regmask.hpp"
class IndexSet : public ResourceObj {
 friend class IndexSetIterator;
 public:
  enum { preallocated_block_list_size = 16 };
  enum { bit_index_length = 5,
         word_index_length = 3,
         block_index_length = 8 // not used
  };
  enum {
         bit_index_offset = 0, // not used
         word_index_offset = bit_index_length,
         block_index_offset = bit_index_length + word_index_length,
         bits_per_word = 1 << bit_index_length,
         words_per_block = 1 << word_index_length,
         bits_per_block = bits_per_word * words_per_block,
         bit_index_mask = right_n_bits(bit_index_length),
         word_index_mask = right_n_bits(word_index_length)
  };
  static uint get_block_index(uint element) {
    return element >> block_index_offset;
  }
  static uint get_word_index(uint element) {
    return mask_bits(element >> word_index_offset,word_index_mask);
  }
  static uint get_bit_index(uint element) {
    return mask_bits(element,bit_index_mask);
  }
  class BitBlock : public ResourceObj {
   friend class IndexSetIterator;
   friend class IndexSet;
   private:
    union {
      uint32 _words[words_per_block];
      BitBlock *_next;
    } _data;
    uint32 *words() { return _data._words; }
    void set_next(BitBlock *next) { _data._next = next; }
    BitBlock *next() { return _data._next; }
    void clear() {
      memset(words(), 0, sizeof(uint32) * words_per_block);
    }
    bool member(uint element) {
      uint word_index = IndexSet::get_word_index(element);
      uint bit_index = IndexSet::get_bit_index(element);
      return ((words()[word_index] & (uint32)(0x1 << bit_index)) != 0);
    }
    bool insert(uint element) {
      uint word_index = IndexSet::get_word_index(element);
      uint bit_index = IndexSet::get_bit_index(element);
      uint32 bit = (0x1 << bit_index);
      uint32 before = words()[word_index];
      words()[word_index] = before | bit;
      return ((before & bit) != 0);
    }
    bool remove(uint element) {
      uint word_index = IndexSet::get_word_index(element);
      uint bit_index = IndexSet::get_bit_index(element);
      uint32 bit = (0x1 << bit_index);
      uint32 before = words()[word_index];
      words()[word_index] = before & ~bit;
      return ((before & bit) != 0);
    }
  };
 private:
  enum { bitblock_alloc_chunk_size = 50 };
  static Arena *arena() { return Compile::current()->indexSet_arena(); }
  static void populate_free_list();
 public:
  static void reset_memory(Compile* compile, Arena *arena) {
    compile->set_indexSet_free_block_list(NULL);
    compile->set_indexSet_arena(arena);
   _empty_block.clear();
  }
 private:
  friend class BitBlock;
  static BitBlock _empty_block;
  uint      _count;
  BitBlock **_blocks;
  BitBlock  *_preallocated_block_list[preallocated_block_list_size];
  uint       _max_blocks;
#ifdef ASSERT
  uint       _max_elements;
#endif
  IndexSet *_next;
 public:
  IndexSet *next() {
#ifdef ASSERT
    if( VerifyOpto ) {
      check_watch("removed from free list?", ((_next == NULL) ? 0 : _next->_serial_number));
    }
#endif
    return _next;
  }
  void set_next(IndexSet *next) {
#ifdef ASSERT
    if( VerifyOpto ) {
      check_watch("put on free list?", ((next == NULL) ? 0 : next->_serial_number));
    }
#endif
    _next = next;
  }
 private:
  BitBlock *get_block_containing(uint element) const {
    assert(element < _max_elements, "element out of bounds");
    return _blocks[get_block_index(element)];
  }
  void set_block(uint index, BitBlock *block) {
#ifdef ASSERT
    if( VerifyOpto )
      check_watch("set block", index);
#endif
    _blocks[index] = block;
  }
  BitBlock *alloc_block();
  BitBlock *alloc_block_containing(uint element);
  void free_block(uint i);
 public:
  void clear() {
#ifdef ASSERT
    if( VerifyOpto )
      check_watch("clear");
#endif
    _count = 0;
    for (uint i = 0; i < _max_blocks; i++) {
      BitBlock *block = _blocks[i];
      if (block != &_empty_block) {
        free_block(i);
      }
    }
  }
  uint count() const { return _count; }
  bool is_empty() const { return _count == 0; }
  bool member(uint element) const {
    return get_block_containing(element)->member(element);
  }
  bool insert(uint element) {
#ifdef ASSERT
    if( VerifyOpto )
      check_watch("insert", element);
#endif
    if (element == 0) {
      return 0;
    }
    BitBlock *block = get_block_containing(element);
    if (block == &_empty_block) {
      block = alloc_block_containing(element);
    }
    bool present = block->insert(element);
    if (!present) {
      _count++;
    }
    return !present;
  }
  bool remove(uint element) {
#ifdef ASSERT
    if( VerifyOpto )
      check_watch("remove", element);
#endif
    BitBlock *block = get_block_containing(element);
    bool present = block->remove(element);
    if (present) {
      _count--;
    }
    return present;
  }
  uint lrg_union(uint lr1, uint lr2,
                 const uint fail_degree,
                 const class PhaseIFG *ifg,
                 const RegMask &mask);
  IndexSet() {}
  IndexSet(IndexSet *set);
  void initialize(uint max_element);
  void initialize(uint max_element, Arena *arena);
  void swap(IndexSet *set);
#ifndef PRODUCT
  void dump() const;
#endif
#ifdef ASSERT
  void tally_iteration_statistics() const;
  static julong _alloc_new;
  static julong _alloc_total;
  static julong _total_bits;
  static julong _total_used_blocks;
  static julong _total_unused_blocks;
  void verify() const;
  static int _serial_count;
  int        _serial_number;
  void check_watch(const char *operation, uint operand) const {
    if (IndexSetWatch != 0) {
      if (IndexSetWatch == -1 || _serial_number == IndexSetWatch) {
        tty->print_cr("IndexSet %d : %s ( %d )", _serial_number, operation, operand);
      }
    }
  }
  void check_watch(const char *operation) const {
    if (IndexSetWatch != 0) {
      if (IndexSetWatch == -1 || _serial_number == IndexSetWatch) {
        tty->print_cr("IndexSet %d : %s", _serial_number, operation);
      }
    }
  }
 public:
  static void print_statistics();
#endif
};
class IndexSetIterator VALUE_OBJ_CLASS_SPEC {
 friend class IndexSet;
 public:
  enum { window_size = 5,
         window_mask = right_n_bits(window_size),
         table_size  = (1 << window_size) };
  static const byte _first_bit[table_size];
  static const byte _second_bit[table_size];
 private:
  uint32                _current;
  uint                  _value;
  uint                  _next_word;
  uint32               *_words;
  uint                  _next_block;
  IndexSet::BitBlock **_blocks;
  uint                  _max_blocks;
  IndexSet            *_set;
  uint advance_and_next();
 public:
  IndexSetIterator(IndexSet *set);
  IndexSetIterator(const IndexSet *set);
  uint next() {
    uint current = _current;
    if (current != 0) {
      uint value = _value;
      while (mask_bits(current,window_mask) == 0) {
        current >>= window_size;
        value += window_size;
      }
      uint advance = _second_bit[mask_bits(current,window_mask)];
      _current = current >> advance;
      _value = value + advance;
      return value + _first_bit[mask_bits(current,window_mask)];
    } else {
      return advance_and_next();
    }
  }
};
#endif // SHARE_VM_OPTO_INDEXSET_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/lcm.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/block.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/machnode.hpp"
#include "opto/runtime.hpp"
#if defined AD_MD_HPP
# include AD_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#elif defined TARGET_ARCH_MODEL_aarch64
# include "adfiles/ad_aarch64.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
static bool accesses_heap_base_zone(Node *val) {
  if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops.
    if (val && val->is_Mach()) {
      if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
        if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
          return true;
        }
      }
      NOT_AIX(Unimplemented());
    }
  }
  return false;
}
static bool needs_explicit_null_check_for_read(Node *val) {
  if (os::zero_page_read_protected()) {
    return false;  // Implicit null check will work.
  }
  if (accesses_heap_base_zone(val) &&                    // Hits the base zone page.
      Universe::narrow_oop_use_implicit_null_checks()) { // Base zone page is protected.
    return false;
  }
  return true;
}
void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
  if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
  float f = block->end()->as_MachIf()->_prob;
  if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
  if( f > PROB_UNLIKELY_MAG(4) ) return;
  uint bidx = 0;                // Capture index of value into memop
  bool was_store;               // Memory op is a store op
  Block* not_null_block;  // this one goes with the proj
  Block* null_block;
  if (block->get_node(block->number_of_nodes()-1) == proj) {
    null_block     = block->_succs[0];
    not_null_block = block->_succs[1];
  } else {
    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
    not_null_block = block->_succs[0];
    null_block     = block->_succs[1];
  }
  while (null_block->is_Empty() == Block::empty_with_goto) {
    null_block     = null_block->_succs[0];
  }
  {
    bool found_trap = false;
    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
      Node* nn = null_block->get_node(i1);
      if (nn->is_MachCall() &&
          nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
        const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
        if (trtype->isa_int() && trtype->is_int()->is_con()) {
          jint tr_con = trtype->is_int()->get_con();
          Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
          Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
          assert((int)reason < (int)BitsPerInt, "recode bit map");
          if (is_set_nth_bit(allowed_reasons, (int) reason)
              && action != Deoptimization::Action_none) {
            found_trap = true;
          }
        }
        break;
      }
    }
    if (!found_trap) {
      return;
    }
  }
  bool is_decoden = ((intptr_t)val) & 1;
  val = (Node*)(((intptr_t)val) & ~1);
  assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() &&
         (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity");
  Node_List *out = new Node_List(Thread::current()->resource_area());
  MachNode *best = NULL;        // Best found so far
  for (DUIterator i = val->outs(); val->has_out(i); i++) {
    Node *m = val->out(i);
    if( !m->is_Mach() ) continue;
    MachNode *mach = m->as_Mach();
    was_store = false;
    int iop = mach->ideal_Opcode();
    switch( iop ) {
    case Op_LoadB:
    case Op_LoadUB:
    case Op_LoadUS:
    case Op_LoadD:
    case Op_LoadF:
    case Op_LoadI:
    case Op_LoadL:
    case Op_LoadP:
    case Op_LoadN:
    case Op_LoadS:
    case Op_LoadKlass:
    case Op_LoadNKlass:
    case Op_LoadRange:
    case Op_LoadD_unaligned:
    case Op_LoadL_unaligned:
      assert(mach->in(2) == val, "should be address");
      break;
    case Op_StoreB:
    case Op_StoreC:
    case Op_StoreCM:
    case Op_StoreD:
    case Op_StoreF:
    case Op_StoreI:
    case Op_StoreL:
    case Op_StoreP:
    case Op_StoreN:
    case Op_StoreNKlass:
      was_store = true;         // Memory op is a store op
      if( mach->in(2) != val ) continue;
      break;                    // Found a memory op?
    case Op_StrComp:
    case Op_StrEquals:
    case Op_StrIndexOf:
    case Op_AryEq:
    case Op_EncodeISOArray:
      continue;
    default:                    // Also check for embedded loads
      if( !mach->needs_anti_dependence_check() )
        continue;               // Not an memory op; skip it
      if( must_clone[iop] ) {
        continue;
      }
      {
        Node* base;
        Node* index;
        const MachOper* oper = mach->memory_inputs(base, index);
        if (oper == NULL || oper == (MachOper*)-1) {
          continue;             // Not an memory op; skip it
        }
        if (val == base ||
            val == index && val->bottom_type()->isa_narrowoop()) {
          break;                // Found it
        } else {
          continue;             // Skip it
        }
      }
      break;
    }
    if (!was_store && needs_explicit_null_check_for_read(val)) {
      continue;
    }
    Node* ctrl = mach->in(0);
    if (ctrl != NULL && !(ctrl == not_null_block->head() ||
        get_block_for_node(ctrl)->dominates(not_null_block))) {
      continue;
    }
    {
      intptr_t offset = 0;
      const TypePtr *adr_type = NULL;  // Do not need this return value here
      const Node* base = mach->get_base_and_disp(offset, adr_type);
      if (base == NULL || base == NodeSentinel) {
        if( val->bottom_type()->isa_narrowoop() &&
            MacroAssembler::needs_explicit_null_check(offset) )
          continue;             // Give up if offset is beyond page size
      } else {
        const TypePtr* tptr;
        if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 ||
                                  Universe::narrow_klass_shift() == 0)) {
          tptr = base->get_ptr_type();
        } else {
          tptr = base->bottom_type()->is_ptr();
        }
        if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
          continue;
        offset += tptr->_offset; // correct if base is offseted
        if( MacroAssembler::needs_explicit_null_check(offset) )
          continue;             // Give up is reference is beyond 4K page size
      }
    }
    Block *cb = get_block_for_node(mach);
    cb = cb->_idom;             // Always hoist at least 1 block
    if( !was_store ) {          // Stores can be hoisted only one block
      while( cb->_dom_depth > (block->_dom_depth + 1))
        cb = cb->_idom;         // Hoist loads as far as we want
      if (cb->_dom_depth == (block->_dom_depth + 1)) {
        if (cb != not_null_block) continue;
        cb = cb->_idom;
      }
    }
    if( cb != block ) continue;
    uint vidx = 0;              // Capture index of value into memop
    uint j;
    for( j = mach->req()-1; j > 0; j-- ) {
      if( mach->in(j) == val ) {
        vidx = j;
        if( is_decoden ) continue;
      }
      Block *inb = get_block_for_node(mach->in(j));
      Block *b = block;          // Start from nul check
      while( b != inb && b->_dom_depth > inb->_dom_depth )
        b = b->_idom;           // search upwards for input
      if( b != inb )
        break;
    }
    if( j > 0 )
      continue;
    Block *mb = get_block_for_node(mach);
    if( was_store ) {
      Block *b = mb;            // Start searching here for a local load
      while( b != block ) {
        uint k;
        for( k = 1; k < b->number_of_nodes(); k++ ) {
          Node *n = b->get_node(k);
          if( n->needs_anti_dependence_check() &&
              n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
            break;              // Found anti-dependent load
        }
        if( k < b->number_of_nodes() )
          break;                // Found anti-dependent load
        if( b->num_preds() != 2 ) break;
        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
      }
      if( b != block ) continue;
    }
    Node *e = mb->end();
    if( e->is_MachNullCheck() && e->in(1) == mach )
      continue;                 // Already being used as a NULL check
    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
      best = mach;
      bidx = vidx;
    }
  }
  if (best == NULL) {
    return;
  }
  extern int implicit_null_checks;
  implicit_null_checks++;
  if( is_decoden ) {
    Block *valb = get_block_for_node(val);
    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
      valb->find_remove(val);
      block->add_inst(val);
      map_node_to_block(val, block);
      for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
        Node* n = val->fast_out(j);
        if( n->is_MachProj() ) {
          get_block_for_node(n)->find_remove(n);
          block->add_inst(n);
          map_node_to_block(n, block);
        }
      }
    }
  }
  Block *old_block = get_block_for_node(best);
  old_block->find_remove(best);
  block->add_inst(best);
  map_node_to_block(best, block);
  if (best->in(0) == not_null_block->head()) {
    best->set_req(0, proj->in(0)->in(0));
  }
  for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
    Node* n = best->fast_out(j);
    if( n->is_MachProj() ) {
      get_block_for_node(n)->find_remove(n);
      block->add_inst(n);
      map_node_to_block(n, block);
    }
  }
  if( proj->Opcode() == Op_IfTrue ) {
    Node *tmp1 = block->get_node(block->end_idx()+1);
    Node *tmp2 = block->get_node(block->end_idx()+2);
    block->map_node(tmp2, block->end_idx()+1);
    block->map_node(tmp1, block->end_idx()+2);
    Node *tmp = new (C) Node(C->top()); // Use not NULL input
    tmp1->replace_by(tmp);
    tmp2->replace_by(tmp1);
    tmp->replace_by(tmp2);
    tmp->destruct();
  }
  Node *old_tst = proj->in(0);
  MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
  block->map_node(nul_chk, block->end_idx());
  map_node_to_block(nul_chk, block);
  for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
    old_tst->last_out(i2)->set_req(0, nul_chk);
  for (uint i3 = 0; i3 < old_tst->req(); i3++) {
    Node* in = old_tst->in(i3);
    old_tst->set_req(i3, NULL);
    if (in->outcnt() == 0) {
      in->disconnect_inputs(NULL, C);
      block->find_remove(in);
    }
  }
  latency_from_uses(nul_chk);
  latency_from_uses(best);
  if (! best->needs_anti_dependence_check()) {
    for (uint k = 1; k < block->number_of_nodes(); k++) {
      Node *n = block->get_node(k);
      if (n->needs_anti_dependence_check() &&
          n->in(LoadNode::Memory) == best->in(StoreNode::Memory)) {
        insert_anti_dependences(block, n);
      }
    }
  }
}
Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
  uint cnt = worklist.size();
  if (cnt == 1) {
    Node *n = worklist[0];
    worklist.map(0,worklist.pop());
    return n;
  }
  uint choice  = 0; // Bigger is most important
  uint latency = 0; // Bigger is scheduled first
  uint score   = 0; // Bigger is better
  int idx = -1;     // Index in worklist
  int cand_cnt = 0; // Candidate count
  for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
    Node *n = worklist[i];      // Get Node on worklist
    int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
    if( n->is_Proj() ||         // Projections always win
        n->Opcode()== Op_Con || // So does constant 'Top'
        iop == Op_CreateEx ||   // Create-exception must start block
        iop == Op_CheckCastPP
        ) {
      worklist.map(i,worklist.pop());
      return n;
    }
    Node *e = block->end();
    if( e->is_Catch() && e->in(0)->in(0) == n )
      continue;
    if( e->is_MachNullCheck() && e->in(1) == n )
      continue;
    if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd &&
        e->in(1)->in(1) == n && n->is_iteratively_computed())
      continue;
    uint n_choice  = 2;
    if ( must_clone[iop] ) {
      bool found_machif = false;
      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
        Node* use = n->fast_out(j);
        if (use->is_MachIf() && get_block_for_node(use) == block) {
          found_machif = true;
          break;
        }
        if (ready_cnt.at(use->_idx) > 1)
          n_choice = 1;
      }
      if (found_machif)
        continue;
    }
    for (uint j = 0; j < n->req() ; j++) {
      Node *inn = n->in(j);
      if (inn) {
        if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) {
          n_choice = 3;
          break;
        }
      }
    }
    if (n->is_MachTemp()) {
      n_choice = 1;
    }
    uint n_latency = get_latency_for_node(n);
    uint n_score   = n->req();   // Many inputs get high score to break ties
    cand_cnt++;
    if (choice < n_choice ||
        (choice == n_choice &&
         ((StressLCM && Compile::randomized_select(cand_cnt)) ||
          (!StressLCM &&
           (latency < n_latency ||
            (latency == n_latency &&
             (score < n_score))))))) {
      choice  = n_choice;
      latency = n_latency;
      score   = n_score;
      idx     = i;               // Also keep index in worklist
    }
  } // End of for all ready nodes in worklist
  assert(idx >= 0, "index should be set");
  Node *n = worklist[(uint)idx];      // Get the winner
  worklist.map((uint)idx, worklist.pop());     // Compress worklist
  return n;
}
void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
  if( next_call.test_set(n->_idx) ) return;
  for( uint i=0; i<n->len(); i++ ) {
    Node *m = n->in(i);
    if( !m ) continue;  // must see all nodes in block that precede call
    if (get_block_for_node(m) == block) {
      set_next_call(block, m, next_call);
    }
  }
}
void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
  Node* call = NULL;
  for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
    Node* m = this_call->fast_out(i);
    if (get_block_for_node(m) == block && // Local-block user
        m != this_call &&       // Not self-start node
        m->is_MachCall()) {
      call = m;
      break;
    }
  }
  if (call == NULL)  return;    // No next call (e.g., block end is near)
  set_next_call(block, call, next_call);
}
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
  for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
    if( !regs.Member(r) ) {     // Not already defined by the call
      if ((save_policy[r] == 'C') ||
          (save_policy[r] == 'A') ||
          ((save_policy[r] == 'E') && exclude_soe)) {
        proj->_rout.Insert(r);
      }
    }
  }
}
uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
  RegMask regs;
  for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
    Node* n = mcall->fast_out(i);
    assert( n->is_MachProj(), "" );
    int n_cnt = ready_cnt.at(n->_idx)-1;
    ready_cnt.at_put(n->_idx, n_cnt);
    assert( n_cnt == 0, "" );
    block->map_node(n, node_cnt++);
    regs.OR(n->out_RegMask());
    if( n->bottom_type() == Type::CONTROL )
      needed_for_next_call(block, n, next_call);
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j); // Get user
      if(get_block_for_node(m) != block) {
        continue;
      }
      if( m->is_Phi() ) continue;
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
        worklist.push(m);
    }
  }
  regs.Insert(_matcher.c_frame_pointer());
  uint r_cnt = mcall->tf()->range()->cnt();
  int op = mcall->ideal_Opcode();
  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  map_node_to_block(proj, block);
  block->insert_node(proj, node_cnt++);
  const char *save_policy = NULL;
  switch (op) {
    case Op_CallRuntime:
    case Op_CallLeaf:
    case Op_CallLeafNoFP:
      save_policy = _matcher._c_reg_save_policy;
      break;
    case Op_CallStaticJava:
    case Op_CallDynamicJava:
      save_policy = _matcher._register_save_policy;
      break;
    default:
      ShouldNotReachHere();
  }
  bool exclude_soe = op == Op_CallRuntime;
  if (op == Op_CallStaticJava) {
    MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
    if (mcallstaticjava->_method_handle_invoke)
      proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
  }
  add_call_kills(proj, regs, save_policy, exclude_soe);
  return node_cnt;
}
bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
      for (uint i = 0;i < block->number_of_nodes(); i++) {
        tty->print("# ");
        block->get_node(i)->fast_dump();
      }
      tty->print_cr("#");
    }
#endif
  if (block->number_of_nodes() == 1) {
    return true;
  }
  uint node_cnt = block->end_idx();
  uint phi_cnt = 1;
  uint i;
  for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
    Node *n = block->get_node(i);
    if( n->is_Phi() ||          // Found a PhiNode or ParmNode
        (n->is_Proj()  && n->in(0) == block->head()) ) {
      block->map_node(block->get_node(phi_cnt), i);
      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
    } else {                    // All others
      uint cnt = n->len();      // Input count
      uint local = 0;
      for( uint j=0; j<cnt; j++ ) {
        Node *m = n->in(j);
        if( m && get_block_for_node(m) == block && !m->is_top() )
          local++;              // One more block-local input
      }
      ready_cnt.at_put(n->_idx, local); // Count em up
#ifdef ASSERT
      if( UseConcMarkSweepGC || UseG1GC ) {
        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
          for (uint prec = n->req(); prec < n->len(); prec++) {
            Node* oop_store = n->in(prec);
            if (oop_store != NULL) {
              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
            }
          }
        }
      }
#endif
      if( n->is_Mach() && n->req() > TypeFunc::Parms &&
          (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
           n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
        Node *x = n->in(TypeFunc::Parms);
        if (x != NULL && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) {
          int cnt = ready_cnt.at(n->_idx);
          assert(cnt > 1, err_msg("MemBar node %d must not get ready here", n->_idx));
          ready_cnt.at_put(n->_idx, cnt-1);
        }
        n->del_req(TypeFunc::Parms);
        n->add_prec(x);
      }
    }
  }
  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
  uint i3;
  for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
    Node *n = block->get_node(i3);       // Get pre-scheduled
    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
      Node* m = n->fast_out(j);
      if (get_block_for_node(m) == block) { // Local-block user
        int m_cnt = ready_cnt.at(m->_idx)-1;
        ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
      }
    }
  }
  Node_List delay;
  Node_List worklist;
  for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
    Node *m = block->get_node(i4);
    if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
      if (m->is_iteratively_computed()) {
        delay.push(m);
      } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) {
        worklist.insert(0, m);
      } else {
        worklist.push(m);         // Then on to worklist!
      }
    }
  }
  while (delay.size()) {
    Node* d = delay.pop();
    worklist.push(d);
  }
  needed_for_next_call(block, block->head(), next_call);
#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      for (uint j=0; j< block->number_of_nodes(); j++) {
        Node     *n = block->get_node(j);
        int     idx = n->_idx;
        tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
        tty->print("latency:%3d  ", get_latency_for_node(n));
        tty->print("%4d: %s\n", idx, n->Name());
      }
    }
#endif
  uint max_idx = (uint)ready_cnt.length();
  while( worklist.size() ) {    // Worklist is not ready
#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print("#   ready list:");
      for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
        Node *n = worklist[i];      // Get Node on worklist
        tty->print(" %d", n->_idx);
      }
      tty->cr();
    }
#endif
    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
    block->map_node(n, phi_cnt++);    // Schedule him next
#ifndef PRODUCT
    if (trace_opto_pipelining()) {
      tty->print("#    select %d: %s", n->_idx, n->Name());
      tty->print(", latency:%d", get_latency_for_node(n));
      n->dump();
      if (Verbose) {
        tty->print("#   ready list:");
        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
          Node *n = worklist[i];      // Get Node on worklist
          tty->print(" %d", n->_idx);
        }
        tty->cr();
      }
    }
#endif
    if( n->is_MachCall() ) {
      MachCallNode *mcall = n->as_MachCall();
      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
      continue;
    }
    if (n->is_Mach() && n->as_Mach()->has_call()) {
      RegMask regs;
      regs.Insert(_matcher.c_frame_pointer());
      regs.OR(n->out_RegMask());
      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
      map_node_to_block(proj, block);
      block->insert_node(proj, phi_cnt++);
      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
    }
    for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
      Node* m = n->fast_out(i5); // Get user
      if (get_block_for_node(m) != block) {
        continue;
      }
      if( m->is_Phi() ) continue;
      if (m->_idx >= max_idx) { // new node, skip it
        assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
        continue;
      }
      int m_cnt = ready_cnt.at(m->_idx)-1;
      ready_cnt.at_put(m->_idx, m_cnt);
      if( m_cnt == 0 )
        worklist.push(m);
    }
  }
  if( phi_cnt != block->end_idx() ) {
    if (C->subsume_loads() == true && !C->failing()) {
      C->record_failure(C2Compiler::retry_no_subsuming_loads());
    } else {
      assert(false, "graph should be schedulable");
    }
    return false;
  }
#ifndef PRODUCT
  if (trace_opto_pipelining()) {
    tty->print_cr("#");
    tty->print_cr("# after schedule_local");
    for (uint i = 0;i < block->number_of_nodes();i++) {
      tty->print("# ");
      block->get_node(i)->fast_dump();
    }
    tty->cr();
  }
#endif
  return true;
}
static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) {
  for (uint l = 0; l < use->len(); l++) {
    if (use->in(l) == old_def) {
      if (l < use->req()) {
        use->set_req(l, new_def);
      } else {
        use->rm_prec(l);
        use->add_prec(new_def);
        l--;
      }
    }
  }
}
Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
  assert( use_blk != def_blk, "Inter-block cleanup only");
  while( use_blk->_dom_depth > def_blk->_dom_depth+1 )
    use_blk = use_blk->_idom;
  Node *fixup = NULL;
  uint j;
  for( j = 0; j < def_blk->_num_succs; j++ )
    if( use_blk == def_blk->_succs[j] )
      break;
  if( j == def_blk->_num_succs ) {
    Node_Array inputs = new Node_List(Thread::current()->resource_area());
    for(uint k = 1; k < use_blk->num_preds(); k++) {
      Block* block = get_block_for_node(use_blk->pred(k));
      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
    }
    Node *phi = use_blk->get_node(1);
    if( phi->is_Phi() ) {
      fixup = phi;
      for (uint k = 1; k < use_blk->num_preds(); k++) {
        if (phi->in(k) != inputs[k]) {
          fixup = NULL;
          break;
        }
      }
    }
    if (fixup == NULL) {
      Node *new_phi = PhiNode::make(use_blk->head(), def);
      use_blk->insert_node(new_phi, 1);
      map_node_to_block(new_phi, use_blk);
      for (uint k = 1; k < use_blk->num_preds(); k++) {
        new_phi->set_req(k, inputs[k]);
      }
      fixup = new_phi;
    }
  } else {
    fixup = use_blk->get_node(n_clone_idx);
  }
  return fixup;
}
static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) {
  uint use_idx = blk->find_node(use);
  uint offset_idx = use_idx - beg;
  for( uint k = 0; k < blk->_num_succs; k++ ) {
    Block *sb = blk->_succs[k];
    Node *clone = sb->get_node(offset_idx+1);
    assert( clone->Opcode() == use->Opcode(), "" );
    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
  }
}
void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
  if( !use_blk ) return;        // Can happen if the use is a precedence edge
  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
  catch_cleanup_fix_all_inputs(use, def, new_def);
}
void PhaseCFG::call_catch_cleanup(Block* block) {
  uint end = block->end_idx();
  if( !block->get_node(end)->is_Catch() ) return;
  uint beg = end;
  while(!block->get_node(beg-1)->is_MachProj() ||
        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
    beg--;
    assert(beg > 0,"Catch cleanup walking beyond block boundary");
  }
  if( beg == end ) return;
  for( uint i = 0; i < block->_num_succs; i++ ) {
    Block *sb = block->_succs[i];
    for( uint j = end; j > beg; j-- ) {
      Node *clone = block->get_node(j-1)->clone();
      sb->insert_node(clone, 1);
      map_node_to_block(clone, sb);
      if (clone->needs_anti_dependence_check()) {
        insert_anti_dependences(sb, clone);
      }
    }
  }
  for(uint i2 = beg; i2 < end; i2++ ) {
    uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
    Node *n = block->get_node(i2);        // Node that got cloned
    Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
    for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
      out->push(n->fast_out(j1));
    }
    uint max = out->size();
    for (uint j = 0; j < max; j++) {// For all users
      Node *use = out->pop();
      Block *buse = get_block_for_node(use);
      if( use->is_Phi() ) {
        for( uint k = 1; k < use->req(); k++ )
          if( use->in(k) == n ) {
            Block* b = get_block_for_node(buse->pred(k));
            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
            use->set_req(k, fixup);
          }
      } else {
        if (block == buse) {
          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
        } else {
          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
        }
      }
    } // End for all users
  } // End of for all Nodes in cloned area
  for(uint i3 = beg; i3 < end; i3++ ) {
    block->get_node(beg)->disconnect_inputs(NULL, C);
    block->remove_node(beg);
  }
  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
    Block *sb = block->_succs[i4];
    uint new_cnt = end - beg;
    for( uint j = new_cnt; j > 0; j-- ) {
      Node *n = sb->get_node(j);
      if (n->outcnt() == 0 &&
          (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
        n->disconnect_inputs(NULL, C);
        sb->remove_node(j);
        new_cnt--;
      }
    }
    if (new_cnt > 0) {
      Node *cex = sb->get_node(1+new_cnt);
      if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
        sb->remove_node(1+new_cnt);
        sb->insert_node(cex, 1);
      }
    }
  }
}
C:\hotspot-69087d08d473\src\share\vm/opto/library_call.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "jfr/support/jfrIntrinsics.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/idealKit.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/parse.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
class LibraryIntrinsic : public InlineCallGenerator {
 public:
 private:
  bool             _is_virtual;
  bool             _does_virtual_dispatch;
  int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
  int8_t           _last_predicate; // Last generated predicate
  vmIntrinsics::ID _intrinsic_id;
 public:
  LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
    : InlineCallGenerator(m),
      _is_virtual(is_virtual),
      _does_virtual_dispatch(does_virtual_dispatch),
      _predicates_count((int8_t)predicates_count),
      _last_predicate((int8_t)-1),
      _intrinsic_id(id)
  {
  }
  virtual bool is_intrinsic() const { return true; }
  virtual bool is_virtual()   const { return _is_virtual; }
  virtual bool is_predicated() const { return _predicates_count > 0; }
  virtual int  predicates_count() const { return _predicates_count; }
  virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
  virtual JVMState* generate(JVMState* jvms);
  virtual Node* generate_predicate(JVMState* jvms, int predicate);
  vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
};
class LibraryCallKit : public GraphKit {
 private:
  LibraryIntrinsic* _intrinsic;     // the library intrinsic being called
  Node*             _result;        // the result node, if any
  int               _reexecute_sp;  // the stack pointer when bytecode needs to be reexecuted
  const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
 public:
  LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
    : GraphKit(jvms),
      _intrinsic(intrinsic),
      _result(NULL)
  {
    if (!jvms->has_method()) {
      _reexecute_sp = sp();
    } else {
      bool ignored_will_link;
      ciSignature* declared_signature = NULL;
      ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
      const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
      _reexecute_sp = sp() + nargs;  // "push" arguments back on stack
    }
  }
  virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
  ciMethod*         caller()    const    { return jvms()->method(); }
  int               bci()       const    { return jvms()->bci(); }
  LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
  vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
  ciMethod*         callee()    const    { return _intrinsic->method(); }
  bool  try_to_inline(int predicate);
  Node* try_to_predicate(int predicate);
  void push_result() {
    if (!stopped() && result() != NULL) {
      BasicType bt = result()->bottom_type()->basic_type();
      push_node(bt, result());
    }
  }
 private:
  void fatal_unexpected_iid(vmIntrinsics::ID iid) {
    fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
  }
  void  set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
  void  set_result(RegionNode* region, PhiNode* value);
  Node*     result() { return _result; }
  virtual int reexecute_sp() { return _reexecute_sp; }
  Node* generate_guard(Node* test, RegionNode* region, float true_prob);
  Node* generate_slow_guard(Node* test, RegionNode* region);
  Node* generate_fair_guard(Node* test, RegionNode* region);
  Node* generate_negative_guard(Node* index, RegionNode* region,
                                Node* *pos_index = NULL);
  Node* generate_nonpositive_guard(Node* index, bool never_negative,
                                   Node* *pos_index = NULL);
  Node* generate_limit_guard(Node* offset, Node* subseq_length,
                             Node* array_length,
                             RegionNode* region);
  Node* generate_current_thread(Node* &tls_output);
  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
                              bool disjoint_bases, const char* &name, bool dest_uninitialized);
  Node* load_mirror_from_klass(Node* klass);
  Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
                                      RegionNode* region, int null_path,
                                      int offset);
  Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
                               RegionNode* region, int null_path) {
    int offset = java_lang_Class::klass_offset_in_bytes();
    return load_klass_from_mirror_common(mirror, never_see_null,
                                         region, null_path,
                                         offset);
  }
  Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
                                     RegionNode* region, int null_path) {
    int offset = java_lang_Class::array_klass_offset_in_bytes();
    return load_klass_from_mirror_common(mirror, never_see_null,
                                         region, null_path,
                                         offset);
  }
  Node* generate_access_flags_guard(Node* kls,
                                    int modifier_mask, int modifier_bits,
                                    RegionNode* region);
  Node* generate_interface_guard(Node* kls, RegionNode* region);
  Node* generate_array_guard(Node* kls, RegionNode* region) {
    return generate_array_guard_common(kls, region, false, false);
  }
  Node* generate_non_array_guard(Node* kls, RegionNode* region) {
    return generate_array_guard_common(kls, region, false, true);
  }
  Node* generate_objArray_guard(Node* kls, RegionNode* region) {
    return generate_array_guard_common(kls, region, true, false);
  }
  Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
    return generate_array_guard_common(kls, region, true, true);
  }
  Node* generate_array_guard_common(Node* kls, RegionNode* region,
                                    bool obj_array, bool not_array);
  Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
  CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
                                     bool is_virtual = false, bool is_static = false);
  CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
    return generate_method_call(method_id, false, true);
  }
  CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
    return generate_method_call(method_id, true, false);
  }
  Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static);
  Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
  Node* make_string_method_node(int opcode, Node* str1, Node* str2);
  bool inline_string_compareTo();
  bool inline_string_indexOf();
  Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
  bool inline_string_equals();
  Node* round_double_node(Node* n);
  bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
  bool inline_math_native(vmIntrinsics::ID id);
  bool inline_trig(vmIntrinsics::ID id);
  bool inline_math(vmIntrinsics::ID id);
  template <typename OverflowOp>
  bool inline_math_overflow(Node* arg1, Node* arg2);
  void inline_math_mathExact(Node* math, Node* test);
  bool inline_math_addExactI(bool is_increment);
  bool inline_math_addExactL(bool is_increment);
  bool inline_math_multiplyExactI();
  bool inline_math_multiplyExactL();
  bool inline_math_negateExactI();
  bool inline_math_negateExactL();
  bool inline_math_subtractExactI(bool is_decrement);
  bool inline_math_subtractExactL(bool is_decrement);
  bool inline_exp();
  bool inline_pow();
  Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
  bool inline_min_max(vmIntrinsics::ID id);
  Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
  int classify_unsafe_addr(Node* &base, Node* &offset);
  Node* make_unsafe_address(Node* base, Node* offset);
  void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
  bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
  static bool klass_needs_init_guard(Node* kls);
  bool inline_unsafe_allocate();
  bool inline_unsafe_copyMemory();
  bool inline_native_currentThread();
#ifdef JFR_HAVE_INTRINSICS
  bool inline_native_classID();
  bool inline_native_getEventWriter();
#endif
  bool inline_native_time_funcs(address method, const char* funcName);
  bool inline_native_isInterrupted();
  bool inline_native_Class_query(vmIntrinsics::ID id);
  bool inline_native_subtype_check();
  bool inline_native_newArray();
  bool inline_native_getLength();
  bool inline_array_copyOf(bool is_copyOfRange);
  bool inline_array_equals();
  void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
  bool inline_native_clone(bool is_virtual);
  bool inline_native_Reflection_getCallerClass();
  bool inline_native_hashcode(bool is_virtual, bool is_static);
  bool inline_native_getClass();
  bool inline_arraycopy();
  void generate_arraycopy(const TypePtr* adr_type,
                          BasicType basic_elem_type,
                          Node* src,  Node* src_offset,
                          Node* dest, Node* dest_offset,
                          Node* copy_length,
                          bool disjoint_bases = false,
                          bool length_never_negative = false,
                          RegionNode* slow_region = NULL);
  AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
                                                RegionNode* slow_region);
  void generate_clear_array(const TypePtr* adr_type,
                            Node* dest,
                            BasicType basic_elem_type,
                            Node* slice_off,
                            Node* slice_len,
                            Node* slice_end);
  bool generate_block_arraycopy(const TypePtr* adr_type,
                                BasicType basic_elem_type,
                                AllocateNode* alloc,
                                Node* src,  Node* src_offset,
                                Node* dest, Node* dest_offset,
                                Node* dest_size, bool dest_uninitialized);
  void generate_slow_arraycopy(const TypePtr* adr_type,
                               Node* src,  Node* src_offset,
                               Node* dest, Node* dest_offset,
                               Node* copy_length, bool dest_uninitialized);
  Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
                                     Node* dest_elem_klass,
                                     Node* src,  Node* src_offset,
                                     Node* dest, Node* dest_offset,
                                     Node* copy_length, bool dest_uninitialized);
  Node* generate_generic_arraycopy(const TypePtr* adr_type,
                                   Node* src,  Node* src_offset,
                                   Node* dest, Node* dest_offset,
                                   Node* copy_length, bool dest_uninitialized);
  void generate_unchecked_arraycopy(const TypePtr* adr_type,
                                    BasicType basic_elem_type,
                                    bool disjoint_bases,
                                    Node* src,  Node* src_offset,
                                    Node* dest, Node* dest_offset,
                                    Node* copy_length, bool dest_uninitialized);
  typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
  bool inline_unsafe_ordered_store(BasicType type);
  bool inline_unsafe_fence(vmIntrinsics::ID id);
  bool inline_fp_conversions(vmIntrinsics::ID id);
  bool inline_number_methods(vmIntrinsics::ID id);
  bool inline_reference_get();
  bool inline_aescrypt_Block(vmIntrinsics::ID id);
  bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
  Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
  Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
  Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
  bool inline_ghash_processBlocks();
  bool inline_sha_implCompress(vmIntrinsics::ID id);
  bool inline_digestBase_implCompressMB(int predicate);
  bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
                                 bool long_state, address stubAddr, const char *stubName,
                                 Node* src_start, Node* ofs, Node* limit);
  Node* get_state_from_sha_object(Node *sha_object);
  Node* get_state_from_sha5_object(Node *sha_object);
  Node* inline_digestBase_implCompressMB_predicate(int predicate);
  bool inline_encodeISOArray();
  bool inline_updateCRC32();
  bool inline_updateBytesCRC32();
  bool inline_updateByteBufferCRC32();
  bool inline_multiplyToLen();
  bool inline_squareToLen();
  bool inline_mulAdd();
  bool inline_montgomeryMultiply();
  bool inline_montgomerySquare();
  bool inline_profileBoolean();
};
CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
  vmIntrinsics::ID id = m->intrinsic_id();
  assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
  ccstr disable_intr = NULL;
  if ((DisableIntrinsic[0] != '\0'
       && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
      (method_has_option_value("DisableIntrinsic", disable_intr)
       && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
    return NULL;
  }
  if (!m->is_loaded()) {
    return NULL;
  }
  if (is_virtual) {
    switch (id) {
    case vmIntrinsics::_hashCode:
    case vmIntrinsics::_clone:
      break;
    default:
      return NULL;
    }
  }
  if (!InlineNatives) {
    switch (id) {
    case vmIntrinsics::_indexOf:
    case vmIntrinsics::_compareTo:
    case vmIntrinsics::_equals:
    case vmIntrinsics::_equalsC:
    case vmIntrinsics::_getAndAddInt:
    case vmIntrinsics::_getAndAddLong:
    case vmIntrinsics::_getAndSetInt:
    case vmIntrinsics::_getAndSetLong:
    case vmIntrinsics::_getAndSetObject:
    case vmIntrinsics::_loadFence:
    case vmIntrinsics::_storeFence:
    case vmIntrinsics::_fullFence:
      break;  // InlineNatives does not control String.compareTo
    case vmIntrinsics::_Reference_get:
      break;  // InlineNatives does not control Reference.get
    default:
      return NULL;
    }
  }
  int predicates = 0;
  bool does_virtual_dispatch = false;
  switch (id) {
  case vmIntrinsics::_compareTo:
    if (!SpecialStringCompareTo)  return NULL;
    if (!Matcher::match_rule_supported(Op_StrComp))  return NULL;
    break;
  case vmIntrinsics::_indexOf:
    if (!SpecialStringIndexOf)  return NULL;
    break;
  case vmIntrinsics::_equals:
    if (!SpecialStringEquals)  return NULL;
    if (!Matcher::match_rule_supported(Op_StrEquals))  return NULL;
    break;
  case vmIntrinsics::_equalsC:
    if (!SpecialArraysEquals)  return NULL;
    if (!Matcher::match_rule_supported(Op_AryEq))  return NULL;
    break;
  case vmIntrinsics::_arraycopy:
    if (!InlineArrayCopy)  return NULL;
    break;
  case vmIntrinsics::_copyMemory:
    if (StubRoutines::unsafe_arraycopy() == NULL)  return NULL;
    if (!InlineArrayCopy)  return NULL;
    break;
  case vmIntrinsics::_hashCode:
    if (!InlineObjectHash)  return NULL;
    does_virtual_dispatch = true;
    break;
  case vmIntrinsics::_clone:
    does_virtual_dispatch = true;
  case vmIntrinsics::_copyOf:
  case vmIntrinsics::_copyOfRange:
    if (!InlineObjectCopy)  return NULL;
    if (!InlineArrayCopy)  return NULL;
    break;
  case vmIntrinsics::_encodeISOArray:
    if (!SpecialEncodeISOArray)  return NULL;
    if (!Matcher::match_rule_supported(Op_EncodeISOArray))  return NULL;
    break;
  case vmIntrinsics::_checkIndex:
    return NULL;
  case vmIntrinsics::_getCallerClass:
    if (!UseNewReflection)  return NULL;
    if (!InlineReflectionGetCallerClass)  return NULL;
    if (SystemDictionary::reflect_CallerSensitive_klass() == NULL)  return NULL;
    break;
  case vmIntrinsics::_bitCount_i:
    if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
    break;
  case vmIntrinsics::_bitCount_l:
    if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
    break;
  case vmIntrinsics::_numberOfLeadingZeros_i:
    if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
    break;
  case vmIntrinsics::_numberOfLeadingZeros_l:
    if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
    break;
  case vmIntrinsics::_numberOfTrailingZeros_i:
    if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
    break;
  case vmIntrinsics::_numberOfTrailingZeros_l:
    if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
    break;
  case vmIntrinsics::_reverseBytes_c:
    if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
    break;
  case vmIntrinsics::_reverseBytes_s:
    if (!Matcher::match_rule_supported(Op_ReverseBytesS))  return NULL;
    break;
  case vmIntrinsics::_reverseBytes_i:
    if (!Matcher::match_rule_supported(Op_ReverseBytesI))  return NULL;
    break;
  case vmIntrinsics::_reverseBytes_l:
    if (!Matcher::match_rule_supported(Op_ReverseBytesL))  return NULL;
    break;
  case vmIntrinsics::_Reference_get:
    break;
  case vmIntrinsics::_compareAndSwapObject:
#ifdef _LP64
    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
#endif
    break;
  case vmIntrinsics::_compareAndSwapLong:
    if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
    break;
  case vmIntrinsics::_getAndAddInt:
    if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
    break;
  case vmIntrinsics::_getAndAddLong:
    if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
    break;
  case vmIntrinsics::_getAndSetInt:
    if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
    break;
  case vmIntrinsics::_getAndSetLong:
    if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
    break;
  case vmIntrinsics::_getAndSetObject:
#ifdef _LP64
    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
    if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
    break;
#else
    if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
    break;
#endif
  case vmIntrinsics::_aescrypt_encryptBlock:
  case vmIntrinsics::_aescrypt_decryptBlock:
    if (!UseAESIntrinsics) return NULL;
    break;
  case vmIntrinsics::_multiplyToLen:
    if (!UseMultiplyToLenIntrinsic) return NULL;
    break;
  case vmIntrinsics::_squareToLen:
    if (!UseSquareToLenIntrinsic) return NULL;
    break;
  case vmIntrinsics::_mulAdd:
    if (!UseMulAddIntrinsic) return NULL;
    break;
  case vmIntrinsics::_montgomeryMultiply:
     if (!UseMontgomeryMultiplyIntrinsic) return NULL;
    break;
  case vmIntrinsics::_montgomerySquare:
     if (!UseMontgomerySquareIntrinsic) return NULL;
    break;
  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
    if (!UseAESIntrinsics) return NULL;
    predicates = 1;
    break;
  case vmIntrinsics::_sha_implCompress:
    if (!UseSHA1Intrinsics) return NULL;
    break;
  case vmIntrinsics::_sha2_implCompress:
    if (!UseSHA256Intrinsics) return NULL;
    break;
  case vmIntrinsics::_sha5_implCompress:
    if (!UseSHA512Intrinsics) return NULL;
    break;
  case vmIntrinsics::_digestBase_implCompressMB:
    if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return NULL;
    predicates = 3;
    break;
  case vmIntrinsics::_ghash_processBlocks:
    if (!UseGHASHIntrinsics) return NULL;
    break;
  case vmIntrinsics::_updateCRC32:
  case vmIntrinsics::_updateBytesCRC32:
  case vmIntrinsics::_updateByteBufferCRC32:
    if (!UseCRC32Intrinsics) return NULL;
    break;
  case vmIntrinsics::_incrementExactI:
  case vmIntrinsics::_addExactI:
    if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_incrementExactL:
  case vmIntrinsics::_addExactL:
    if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_decrementExactI:
  case vmIntrinsics::_subtractExactI:
    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_decrementExactL:
  case vmIntrinsics::_subtractExactL:
    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_negateExactI:
    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_negateExactL:
    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_multiplyExactI:
    if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
    break;
  case vmIntrinsics::_multiplyExactL:
    if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
    break;
 default:
    assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
    assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
    break;
  }
  if (m->holder()->name() == ciSymbol::java_lang_Class() ||
      m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
    if (!InlineClassNatives)  return NULL;
  }
  if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
    if (!InlineThreadNatives)  return NULL;
  }
  if (m->holder()->name() == ciSymbol::java_lang_Math() ||
      m->holder()->name() == ciSymbol::java_lang_Float() ||
      m->holder()->name() == ciSymbol::java_lang_Double()) {
    if (!InlineMathNatives)  return NULL;
  }
  if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
    if (!InlineUnsafeOps)  return NULL;
  }
  return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id);
}
void Compile::register_library_intrinsics() {
}
JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
  LibraryCallKit kit(jvms, this);
  Compile* C = kit.C;
  int nodes = C->unique();
#ifndef PRODUCT
  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    char buf[1000];
    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
    tty->print_cr("Intrinsic %s", str);
  }
#endif
  ciMethod* callee = kit.callee();
  const int bci    = kit.bci();
  if (kit.try_to_inline(_last_predicate)) {
    if (C->print_intrinsics() || C->print_inlining()) {
      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
    }
    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
    if (C->log()) {
      C->log()->elem("intrinsic id='%s'%s nodes='%d'",
                     vmIntrinsics::name_at(intrinsic_id()),
                     (is_virtual() ? " virtual='1'" : ""),
                     C->unique() - nodes);
    }
    kit.push_result();
    return kit.transfer_exceptions_into_jvms();
  }
  if (C->print_intrinsics() || C->print_inlining()) {
    if (jvms->has_method()) {
      const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
      C->print_inlining(callee, jvms->depth() - 1, bci, msg);
    } else {
      tty->print("Did not generate intrinsic %s%s at bci:%d in",
               vmIntrinsics::name_at(intrinsic_id()),
               (is_virtual() ? " (virtual)" : ""), bci);
    }
  }
  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
  return NULL;
}
Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
  LibraryCallKit kit(jvms, this);
  Compile* C = kit.C;
  int nodes = C->unique();
  _last_predicate = predicate;
#ifndef PRODUCT
  assert(is_predicated() && predicate < predicates_count(), "sanity");
  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    char buf[1000];
    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
    tty->print_cr("Predicate for intrinsic %s", str);
  }
#endif
  ciMethod* callee = kit.callee();
  const int bci    = kit.bci();
  Node* slow_ctl = kit.try_to_predicate(predicate);
  if (!kit.failing()) {
    if (C->print_intrinsics() || C->print_inlining()) {
      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)");
    }
    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
    if (C->log()) {
      C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
                     vmIntrinsics::name_at(intrinsic_id()),
                     (is_virtual() ? " virtual='1'" : ""),
                     C->unique() - nodes);
    }
    return slow_ctl; // Could be NULL if the check folds.
  }
  if (C->print_intrinsics() || C->print_inlining()) {
    if (jvms->has_method()) {
      const char* msg = "failed to generate predicate for intrinsic";
      C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
    } else {
      C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
                                        vmIntrinsics::name_at(intrinsic_id()),
                                        (is_virtual() ? " (virtual)" : ""), bci);
    }
  }
  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
  return NULL;
}
bool LibraryCallKit::try_to_inline(int predicate) {
  const bool is_store       = true;
  const bool is_native_ptr  = true;
  const bool is_static      = true;
  const bool is_volatile    = true;
  if (!jvms()->has_method()) {
    assert(map()->memory()->Opcode() == Op_Parm, "");
    set_all_memory(reset_memory());
  }
  assert(merged_memory(), "");
  switch (intrinsic_id()) {
  case vmIntrinsics::_hashCode:                 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
  case vmIntrinsics::_identityHashCode:         return inline_native_hashcode(/*!virtual*/ false,         is_static);
  case vmIntrinsics::_getClass:                 return inline_native_getClass();
  case vmIntrinsics::_dsin:
  case vmIntrinsics::_dcos:
  case vmIntrinsics::_dtan:
  case vmIntrinsics::_dabs:
  case vmIntrinsics::_datan2:
  case vmIntrinsics::_dsqrt:
  case vmIntrinsics::_dexp:
  case vmIntrinsics::_dlog:
  case vmIntrinsics::_dlog10:
  case vmIntrinsics::_dpow:                     return inline_math_native(intrinsic_id());
  case vmIntrinsics::_min:
  case vmIntrinsics::_max:                      return inline_min_max(intrinsic_id());
  case vmIntrinsics::_addExactI:                return inline_math_addExactI(false /* add */);
  case vmIntrinsics::_addExactL:                return inline_math_addExactL(false /* add */);
  case vmIntrinsics::_decrementExactI:          return inline_math_subtractExactI(true /* decrement */);
  case vmIntrinsics::_decrementExactL:          return inline_math_subtractExactL(true /* decrement */);
  case vmIntrinsics::_incrementExactI:          return inline_math_addExactI(true /* increment */);
  case vmIntrinsics::_incrementExactL:          return inline_math_addExactL(true /* increment */);
  case vmIntrinsics::_multiplyExactI:           return inline_math_multiplyExactI();
  case vmIntrinsics::_multiplyExactL:           return inline_math_multiplyExactL();
  case vmIntrinsics::_negateExactI:             return inline_math_negateExactI();
  case vmIntrinsics::_negateExactL:             return inline_math_negateExactL();
  case vmIntrinsics::_subtractExactI:           return inline_math_subtractExactI(false /* subtract */);
  case vmIntrinsics::_subtractExactL:           return inline_math_subtractExactL(false /* subtract */);
  case vmIntrinsics::_arraycopy:                return inline_arraycopy();
  case vmIntrinsics::_compareTo:                return inline_string_compareTo();
  case vmIntrinsics::_indexOf:                  return inline_string_indexOf();
  case vmIntrinsics::_equals:                   return inline_string_equals();
  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile, false);
  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, false);
  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile, false);
  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile, false);
  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, false);
  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile, false);
  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile, false);
  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile, false);
  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile, false);
  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile, false);
  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile, false);
  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile, false);
  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile, false);
  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile, false);
  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile, false);
  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile, false);
  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile, false);
  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile, false);
  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile, false);
  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile, false);
  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile, false);
  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile, false);
  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile, false);
  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile, false);
  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile, false);
  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile, false);
  case vmIntrinsics::_prefetchRead:             return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
  case vmIntrinsics::_prefetchWrite:            return inline_unsafe_prefetch(!is_native_ptr,  is_store, !is_static);
  case vmIntrinsics::_prefetchReadStatic:       return inline_unsafe_prefetch(!is_native_ptr, !is_store,  is_static);
  case vmIntrinsics::_prefetchWriteStatic:      return inline_unsafe_prefetch(!is_native_ptr,  is_store,  is_static);
  case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
  case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
  case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
  case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
  case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
  case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
  case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
  case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
  case vmIntrinsics::_loadFence:
  case vmIntrinsics::_storeFence:
  case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
  case vmIntrinsics::_currentThread:            return inline_native_currentThread();
  case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
#ifdef JFR_HAVE_INTRINSICS
  case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
  case vmIntrinsics::_getClassId:               return inline_native_classID();
  case vmIntrinsics::_getEventWriter:           return inline_native_getEventWriter();
#endif
  case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
  case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
  case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
  case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
  case vmIntrinsics::_newArray:                 return inline_native_newArray();
  case vmIntrinsics::_getLength:                return inline_native_getLength();
  case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
  case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
  case vmIntrinsics::_equalsC:                  return inline_array_equals();
  case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
  case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
  case vmIntrinsics::_isInstance:
  case vmIntrinsics::_getModifiers:
  case vmIntrinsics::_isInterface:
  case vmIntrinsics::_isArray:
  case vmIntrinsics::_isPrimitive:
  case vmIntrinsics::_getSuperclass:
  case vmIntrinsics::_getComponentType:
  case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
  case vmIntrinsics::_floatToRawIntBits:
  case vmIntrinsics::_floatToIntBits:
  case vmIntrinsics::_intBitsToFloat:
  case vmIntrinsics::_doubleToRawLongBits:
  case vmIntrinsics::_doubleToLongBits:
  case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
  case vmIntrinsics::_numberOfLeadingZeros_i:
  case vmIntrinsics::_numberOfLeadingZeros_l:
  case vmIntrinsics::_numberOfTrailingZeros_i:
  case vmIntrinsics::_numberOfTrailingZeros_l:
  case vmIntrinsics::_bitCount_i:
  case vmIntrinsics::_bitCount_l:
  case vmIntrinsics::_reverseBytes_i:
  case vmIntrinsics::_reverseBytes_l:
  case vmIntrinsics::_reverseBytes_s:
  case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
  case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
  case vmIntrinsics::_Reference_get:            return inline_reference_get();
  case vmIntrinsics::_aescrypt_encryptBlock:
  case vmIntrinsics::_aescrypt_decryptBlock:    return inline_aescrypt_Block(intrinsic_id());
  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
    return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
  case vmIntrinsics::_sha_implCompress:
  case vmIntrinsics::_sha2_implCompress:
  case vmIntrinsics::_sha5_implCompress:
    return inline_sha_implCompress(intrinsic_id());
  case vmIntrinsics::_digestBase_implCompressMB:
    return inline_digestBase_implCompressMB(predicate);
  case vmIntrinsics::_multiplyToLen:
    return inline_multiplyToLen();
  case vmIntrinsics::_squareToLen:
    return inline_squareToLen();
  case vmIntrinsics::_mulAdd:
    return inline_mulAdd();
  case vmIntrinsics::_montgomeryMultiply:
    return inline_montgomeryMultiply();
  case vmIntrinsics::_montgomerySquare:
    return inline_montgomerySquare();
  case vmIntrinsics::_ghash_processBlocks:
    return inline_ghash_processBlocks();
  case vmIntrinsics::_encodeISOArray:
    return inline_encodeISOArray();
  case vmIntrinsics::_updateCRC32:
    return inline_updateCRC32();
  case vmIntrinsics::_updateBytesCRC32:
    return inline_updateBytesCRC32();
  case vmIntrinsics::_updateByteBufferCRC32:
    return inline_updateByteBufferCRC32();
  case vmIntrinsics::_profileBoolean:
    return inline_profileBoolean();
  default:
#ifndef PRODUCT
    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
      tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
    }
#endif
    return false;
  }
}
Node* LibraryCallKit::try_to_predicate(int predicate) {
  if (!jvms()->has_method()) {
    assert(map()->memory()->Opcode() == Op_Parm, "");
    set_all_memory(reset_memory());
  }
  assert(merged_memory(), "");
  switch (intrinsic_id()) {
  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
    return inline_cipherBlockChaining_AESCrypt_predicate(false);
  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
    return inline_cipherBlockChaining_AESCrypt_predicate(true);
  case vmIntrinsics::_digestBase_implCompressMB:
    return inline_digestBase_implCompressMB_predicate(predicate);
  default:
#ifndef PRODUCT
    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
      tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
    }
#endif
    Node* slow_ctl = control();
    set_control(top()); // No fast path instrinsic
    return slow_ctl;
  }
}
void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
  record_for_igvn(region);
  set_control(_gvn.transform(region));
  set_result( _gvn.transform(value));
  assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
}
Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
  if (stopped()) {
    return NULL;
  }
  if (_gvn.type(test) == TypeInt::ZERO) {
    return NULL;
  }
  IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
  Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff));
  if (if_slow == top()) {
    return NULL;
  }
  if (region != NULL)
    region->add_req(if_slow);
  Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff));
  set_control(if_fast);
  return if_slow;
}
inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
  return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
}
inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
  return generate_guard(test, region, PROB_FAIR);
}
inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
                                                     Node* *pos_index) {
  if (stopped())
    return NULL;                // already stopped
  if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
    return NULL;                // index is already adequately typed
  Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0)));
  Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
  Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
  if (is_neg != NULL && pos_index != NULL) {
    Node* ccast = new (C) CastIINode(index, TypeInt::POS);
    ccast->set_req(0, control());
    (*pos_index) = _gvn.transform(ccast);
  }
  return is_neg;
}
inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
                                                        Node* *pos_index) {
  if (stopped())
    return NULL;                // already stopped
  if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
    return NULL;                // index is already adequately typed
  Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0)));
  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
  Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq));
  Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
  if (is_notp != NULL && pos_index != NULL) {
    Node* ccast = new (C) CastIINode(index, TypeInt::POS1);
    ccast->set_req(0, control());
    (*pos_index) = _gvn.transform(ccast);
  }
  return is_notp;
}
inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
                                                  Node* subseq_length,
                                                  Node* array_length,
                                                  RegionNode* region) {
  if (stopped())
    return NULL;                // already stopped
  bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
  if (zero_offset && subseq_length->eqv_uncast(array_length))
    return NULL;                // common case of whole-array copy
  Node* last = subseq_length;
  if (!zero_offset)             // last += offset
    last = _gvn.transform(new (C) AddINode(last, offset));
  Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last));
  Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
  Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
  return is_over;
}
Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
  ciKlass*    thread_klass = env()->Thread_klass();
  const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
  Node* thread = _gvn.transform(new (C) ThreadLocalNode());
  Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
  Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
  tls_output = thread;
  return threadObj;
}
Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
  Node* no_ctrl = NULL;
  Node* str1_value   = load_String_value(no_ctrl, str1);
  Node* str1_offset  = load_String_offset(no_ctrl, str1);
  Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
  Node* str1_len  = load_String_length(no_ctrl, str1);
  Node* str2_value   = load_String_value(no_ctrl, str2);
  Node* str2_offset  = load_String_offset(no_ctrl, str2);
  Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
  Node* str2_len = NULL;
  Node* result = NULL;
  switch (opcode) {
  case Op_StrIndexOf:
    str2_len = load_String_length(no_ctrl, str2);
    result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
                                 str1_start, str1_len, str2_start, str2_len);
    break;
  case Op_StrComp:
    str2_len = load_String_length(no_ctrl, str2);
    result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
                                 str1_start, str1_len, str2_start, str2_len);
    break;
  case Op_StrEquals:
    result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
                               str1_start, str2_start, str1_len);
    break;
  default:
    ShouldNotReachHere();
    return NULL;
  }
  C->set_has_split_ifs(true); // Has chance for split-if optimization
  return _gvn.transform(result);
}
Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
  Node* result = NULL;
  switch (opcode) {
  case Op_StrIndexOf:
    result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
                                 str1_start, cnt1, str2_start, cnt2);
    break;
  case Op_StrComp:
    result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
                                 str1_start, cnt1, str2_start, cnt2);
    break;
  case Op_StrEquals:
    result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
                                 str1_start, str2_start, cnt1);
    break;
  default:
    ShouldNotReachHere();
    return NULL;
  }
  C->set_has_split_ifs(true); // Has chance for split-if optimization
  return _gvn.transform(result);
}
bool LibraryCallKit::inline_string_compareTo() {
  Node* receiver = null_check(argument(0));
  Node* arg      = null_check(argument(1));
  if (stopped()) {
    return true;
  }
  set_result(make_string_method_node(Op_StrComp, receiver, arg));
  return true;
}
bool LibraryCallKit::inline_string_equals() {
  Node* receiver = null_check_receiver();
  Node* argument = this->argument(1);
  if (stopped()) {
    return true;
  }
  RegionNode* region = new (C) RegionNode(5);
  Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
  Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
  Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
  Node* if_eq = generate_slow_guard(bol, NULL);
  if (if_eq != NULL) {
    phi->init_req(2, intcon(1));
    region->init_req(2, if_eq);
  }
  ciInstanceKlass* klass = env()->String_klass();
  if (!stopped()) {
    Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
    Node* cmp  = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
    Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
    Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
    if (inst_false != NULL) {
      phi->init_req(3, intcon(0));
      region->init_req(3, inst_false);
    }
  }
  if (!stopped()) {
    const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
    argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
    argument = cast_not_null(argument, false);
    Node* no_ctrl = NULL;
    Node* receiver_val    = load_String_value(no_ctrl, receiver);
    Node* receiver_offset = load_String_offset(no_ctrl, receiver);
    Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
    Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
    Node* argument_val    = load_String_value(no_ctrl, argument);
    Node* argument_offset = load_String_offset(no_ctrl, argument);
    Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
    Node* argument_cnt  = load_String_length(no_ctrl, argument);
    Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
    Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
    Node* if_ne = generate_slow_guard(bol, NULL);
    if (if_ne != NULL) {
      phi->init_req(4, intcon(0));
      region->init_req(4, if_ne);
    }
    if (!stopped()) {
      Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
      phi->init_req(1, equals);
      region->init_req(1, control());
    }
  }
  set_control(_gvn.transform(region));
  record_for_igvn(region);
  set_result(_gvn.transform(phi));
  return true;
}
bool LibraryCallKit::inline_array_equals() {
  Node* arg1 = argument(0);
  Node* arg2 = argument(1);
  set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
  return true;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值