sssssssssss64


#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "opto/addnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"

//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
GraphKit::GraphKit(JVMState* jvms)
  : Phase(Phase::Parser),
    _env(C->env()),
    _gvn(*C->initial_gvn())
{
  _exceptions = jvms->map()->next_exception();
  if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  set_jvms(jvms);
}

// Private constructor for parser.
GraphKit::GraphKit()
  : Phase(Phase::Parser),
    _env(C->env()),
    _gvn(*C->initial_gvn())
{
  _exceptions = NULL;
  set_map(NULL);
  debug_only(_sp = -99);
  debug_only(set_bci(-99));
}



//---------------------------clean_stack---------------------------------------
// Clear away rubbish from the stack area of the JVM state.
// This destroys any arguments that may be waiting on the stack.
void GraphKit::clean_stack(int from_sp) {
  SafePointNode* map      = this->map();
  JVMState*      jvms     = this->jvms();
  int            stk_size = jvms->stk_size();
  int            stkoff   = jvms->stkoff();
  Node*          top      = this->top();
  for (int i = from_sp; i < stk_size; i++) {
    if (map->in(stkoff + i) != top) {
      map->set_req(stkoff + i, top);
    }
  }
}


//--------------------------------sync_jvms-----------------------------------
// Make sure our current jvms agrees with our parse state.
JVMState* GraphKit::sync_jvms() const {
  JVMState* jvms = this->jvms();
  jvms->set_bci(bci());       // Record the new bci in the JVMState
  jvms->set_sp(sp());         // Record the new sp in the JVMState
  assert(jvms_in_sync(), "jvms is now in sync");
  return jvms;
}

//--------------------------------sync_jvms_for_reexecute---------------------
// Make sure our current jvms agrees with our parse state.  This version
// uses the reexecute_sp for reexecuting bytecodes.
JVMState* GraphKit::sync_jvms_for_reexecute() {
  JVMState* jvms = this->jvms();
  jvms->set_bci(bci());          // Record the new bci in the JVMState
  jvms->set_sp(reexecute_sp());  // Record the new sp in the JVMState
  return jvms;
}

#ifdef ASSERT
bool GraphKit::jvms_in_sync() const {
  Parse* parse = is_Parse();
  if (parse == NULL) {
    if (bci() !=      jvms()->bci())          return false;
    if (sp()  != (int)jvms()->sp())           return false;
    return true;
  }
  if (jvms()->method() != parse->method())    return false;
  if (jvms()->bci()    != parse->bci())       return false;
  int jvms_sp = jvms()->sp();
  if (jvms_sp          != parse->sp())        return false;
  int jvms_depth = jvms()->depth();
  if (jvms_depth       != parse->depth())     return false;
  return true;
}

// Local helper checks for special internal merge points
// used to accumulate and merge exception states.
// They are marked by the region's in(0) edge being the map itself.
// Such merge points must never "escape" into the parser at large,
// until they have been handed to gvn.transform.
static bool is_hidden_merge(Node* reg) {
  if (reg == NULL)  return false;
  if (reg->is_Phi()) {
    reg = reg->in(0);
    if (reg == NULL)  return false;
  }
  return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();
}

void GraphKit::verify_map() const {
  if (map() == NULL)  return;  // null map is OK
  assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");
  assert(!map()->has_exceptions(),    "call add_exception_states_from 1st");
  assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");
}

void GraphKit::verify_exception_state(SafePointNode* ex_map) {
  assert(ex_map->next_exception() == NULL, "not already part of a chain");
  assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");
}
#endif

//---------------------------stop_and_kill_map---------------------------------
// Set _map to NULL, signalling a stop to further bytecode execution.
// First smash the current map's control to a constant, to mark it dead.
void GraphKit::stop_and_kill_map() {
  SafePointNode* dead_map = stop();
  if (dead_map != NULL) {
    dead_map->disconnect_inputs(NULL, C); // Mark the map as killed.
    assert(dead_map->is_killed(), "must be so marked");
  }
}


//--------------------------------stopped--------------------------------------
// Tell if _map is NULL, or control is top.
bool GraphKit::stopped() {
  if (map() == NULL)           return true;
  else if (control() == top()) return true;
  else                         return false;
}


//-----------------------------has_ex_handler----------------------------------
// Tell if this method or any caller method has exception handlers.
bool GraphKit::has_ex_handler() {
  for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {
    if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {
      return true;
    }
  }
  return false;
}

//------------------------------save_ex_oop------------------------------------
// Save an exception without blowing stack contents or other JVM state.
void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
  assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
  ex_map->add_req(ex_oop);
  debug_only(verify_exception_state(ex_map));
}

inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
  assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");
  Node* ex_oop = ex_map->in(ex_map->req()-1);
  if (clear_it)  ex_map->del_req(ex_map->req()-1);
  return ex_oop;
}

//-----------------------------saved_ex_oop------------------------------------
// Recover a saved exception from its map.
Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {
  return common_saved_ex_oop(ex_map, false);
}

//--------------------------clear_saved_ex_oop---------------------------------
// Erase a previously saved exception from its map.
Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {
  return common_saved_ex_oop(ex_map, true);
}

#ifdef ASSERT
//---------------------------has_saved_ex_oop----------------------------------
// Erase a previously saved exception from its map.
bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {
  return ex_map->req() == ex_map->jvms()->endoff()+1;
}
#endif

//-------------------------make_exception_state--------------------------------
// Turn the current JVM state into an exception state, appending the ex_oop.
SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {
  sync_jvms();
  SafePointNode* ex_map = stop();  // do not manipulate this map any more
  set_saved_ex_oop(ex_map, ex_oop);
  return ex_map;
}


//--------------------------add_exception_state--------------------------------
// Add an exception to my list of exceptions.
void GraphKit::add_exception_state(SafePointNode* ex_map) {
  if (ex_map == NULL || ex_map->control() == top()) {
    return;
  }
#ifdef ASSERT
  verify_exception_state(ex_map);
  if (has_exceptions()) {
    assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
  }
#endif

  // If there is already an exception of exactly this type, merge with it.
  // In particular, null-checks and other low-level exceptions common up here.
  Node*       ex_oop  = saved_ex_oop(ex_map);
  const Type* ex_type = _gvn.type(ex_oop);
  if (ex_oop == top()) {
    // No action needed.
    return;
  }
  assert(ex_type->isa_instptr(), "exception must be an instance");
  for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {
    const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));
    // We check sp also because call bytecodes can generate exceptions
    // both before and after arguments are popped!
    if (ex_type2 == ex_type
        && e2->_jvms->sp() == ex_map->_jvms->sp()) {
      combine_exception_states(ex_map, e2);
      return;
    }
  }

  // No pre-existing exception of the same type.  Chain it on the list.
  push_exception_state(ex_map);
}

//-----------------------add_exception_states_from-----------------------------
void GraphKit::add_exception_states_from(JVMState* jvms) {
  SafePointNode* ex_map = jvms->map()->next_exception();
  if (ex_map != NULL) {
    jvms->map()->set_next_exception(NULL);
    for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {
      next_map = ex_map->next_exception();
      ex_map->set_next_exception(NULL);
      add_exception_state(ex_map);
    }
  }
}

//-----------------------transfer_exceptions_into_jvms-------------------------
JVMState* GraphKit::transfer_exceptions_into_jvms() {
  if (map() == NULL) {
    // We need a JVMS to carry the exceptions, but the map has gone away.
    // Create a scratch JVMS, cloned from any of the exception states...
    if (has_exceptions()) {
      _map = _exceptions;
      _map = clone_map();
      _map->set_next_exception(NULL);
      clear_saved_ex_oop(_map);
      debug_only(verify_map());
    } else {
      // ...or created from scratch
      JVMState* jvms = new (C) JVMState(_method, NULL);
      jvms->set_bci(_bci);
      jvms->set_sp(_sp);
      jvms->set_map(new (C) SafePointNode(TypeFunc::Parms, jvms));
      set_jvms(jvms);
      for (uint i = 0; i < map()->req(); i++)  map()->init_req(i, top());
      set_all_memory(top());
      while (map()->req() < jvms->endoff())  map()->add_req(top());
    }
    // (This is a kludge, in case you didn't notice.)
    set_control(top());
  }
  JVMState* jvms = sync_jvms();
  assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");
  jvms->map()->set_next_exception(_exceptions);
  _exceptions = NULL;   // done with this set of exceptions
  return jvms;
}

static inline void add_n_reqs(Node* dstphi, Node* srcphi) {
  assert(is_hidden_merge(dstphi), "must be a special merge node");
  assert(is_hidden_merge(srcphi), "must be a special merge node");
  uint limit = srcphi->req();
  for (uint i = PhiNode::Input; i < limit; i++) {
    dstphi->add_req(srcphi->in(i));
  }
}
static inline void add_one_req(Node* dstphi, Node* src) {
  assert(is_hidden_merge(dstphi), "must be a special merge node");
  assert(!is_hidden_merge(src), "must not be a special merge node");
  dstphi->add_req(src);
}

//-----------------------combine_exception_states------------------------------
// This helper function combines exception states by building phis on a
// specially marked state-merging region.  These regions and phis are
// untransformed, and can build up gradually.  The region is marked by
// having a control input of its exception map, rather than NULL.  Such
// regions do not appear except in this function, and in use_exception_state.
void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
  if (failing())  return;  // dying anyway...
  JVMState* ex_jvms = ex_map->_jvms;
  assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
  assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
  assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
  assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
  assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
  assert(ex_map->req() == phi_map->req(), "matching maps");
  uint tos = ex_jvms->stkoff() + ex_jvms->sp();
  Node*         hidden_merge_mark = root();
  Node*         region  = phi_map->control();
  MergeMemNode* phi_mem = phi_map->merged_memory();
  MergeMemNode* ex_mem  = ex_map->merged_memory();
  if (region->in(0) != hidden_merge_mark) {
    // The control input is not (yet) a specially-marked region in phi_map.
    // Make it so, and build some phis.
    region = new (C) RegionNode(2);
    _gvn.set_type(region, Type::CONTROL);
    region->set_req(0, hidden_merge_mark);  // marks an internal ex-state
    region->init_req(1, phi_map->control());
    phi_map->set_control(region);
    Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
    record_for_igvn(io_phi);
    _gvn.set_type(io_phi, Type::ABIO);
    phi_map->set_i_o(io_phi);
    for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
      Node* m = mms.memory();
      Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));
      record_for_igvn(m_phi);
      _gvn.set_type(m_phi, Type::MEMORY);
      mms.set_memory(m_phi);
    }
  }

  // Either or both of phi_map and ex_map might already be converted into phis.
  Node* ex_control = ex_map->control();
  // if there is special marking on ex_map also, we add multiple edges from src
  bool add_multiple = (ex_control->in(0) == hidden_merge_mark);
  // how wide was the destination phi_map, originally?
  uint orig_width = region->req();

  if (add_multiple) {
    add_n_reqs(region, ex_control);
    add_n_reqs(phi_map->i_o(), ex_map->i_o());
  } else {
    // ex_map has no merges, so we just add single edges everywhere
    add_one_req(region, ex_control);
    add_one_req(phi_map->i_o(), ex_map->i_o());
  }
  for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {
    if (mms.is_empty()) {
      // get a copy of the base memory, and patch some inputs into it
      const TypePtr* adr_type = mms.adr_type(C);
      Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
      assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
      mms.set_memory(phi);
      // Prepare to append interesting stuff onto the newly sliced phi:
      while (phi->req() > orig_width)  phi->del_req(phi->req()-1);
    }
    // Append stuff from ex_map:
    if (add_multiple) {
      add_n_reqs(mms.memory(), mms.memory2());
    } else {
      add_one_req(mms.memory(), mms.memory2());
    }
  }
  uint limit = ex_map->req();
  for (uint i = TypeFunc::Parms; i < limit; i++) {
    // Skip everything in the JVMS after tos.  (The ex_oop follows.)
    if (i == tos)  i = ex_jvms->monoff();
    Node* src = ex_map->in(i);
    Node* dst = phi_map->in(i);
    if (src != dst) {
      PhiNode* phi;
      if (dst->in(0) != region) {
        dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
        record_for_igvn(phi);
        _gvn.set_type(phi, phi->type());
        phi_map->set_req(i, dst);
        // Prepare to append interesting stuff onto the new phi:
        while (dst->req() > orig_width)  dst->del_req(dst->req()-1);
      } else {
        assert(dst->is_Phi(), "nobody else uses a hidden region");
        phi = dst->as_Phi();
      }
      if (add_multiple && src->in(0) == ex_control) {
        // Both are phis.
        add_n_reqs(dst, src);
      } else {
        while (dst->req() < region->req())  add_one_req(dst, src);
      }
      const Type* srctype = _gvn.type(src);
      if (phi->type() != srctype) {
        const Type* dsttype = phi->type()->meet_speculative(srctype);
        if (phi->type() != dsttype) {
          phi->set_type(dsttype);
          _gvn.set_type(phi, dsttype);
        }
      }
    }
  }
  phi_map->merge_replaced_nodes_with(ex_map);
}

//--------------------------use_exception_state--------------------------------
Node* GraphKit::use_exception_state(SafePointNode* phi_map) {
  if (failing()) { stop(); return top(); }
  Node* region = phi_map->control();
  Node* hidden_merge_mark = root();
  assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");
  Node* ex_oop = clear_saved_ex_oop(phi_map);
  if (region->in(0) == hidden_merge_mark) {
    // Special marking for internal ex-states.  Process the phis now.
    region->set_req(0, region);  // now it's an ordinary region
    set_jvms(phi_map->jvms());   // ...so now we can use it as a map
    // Note: Setting the jvms also sets the bci and sp.
    set_control(_gvn.transform(region));
    uint tos = jvms()->stkoff() + sp();
    for (uint i = 1; i < tos; i++) {
      Node* x = phi_map->in(i);
      if (x->in(0) == region) {
        assert(x->is_Phi(), "expected a special phi");
        phi_map->set_req(i, _gvn.transform(x));
      }
    }
    for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
      Node* x = mms.memory();
      if (x->in(0) == region) {
        assert(x->is_Phi(), "nobody else uses a hidden region");
        mms.set_memory(_gvn.transform(x));
      }
    }
    if (ex_oop->in(0) == region) {
      assert(ex_oop->is_Phi(), "expected a special phi");
      ex_oop = _gvn.transform(ex_oop);
    }
  } else {
    set_jvms(phi_map->jvms());
  }

  assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");
  assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");
  return ex_oop;
}

//---------------------------------java_bc-------------------------------------
Bytecodes::Code GraphKit::java_bc() const {
  ciMethod* method = this->method();
  int       bci    = this->bci();
  if (method != NULL && bci != InvocationEntryBci)
    return method->java_code_at_bci(bci);
  else
    return Bytecodes::_illegal;
}

void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
                                                          bool must_throw) {
    // if the exception capability is set, then we will generate code
    // to check the JavaThread.should_post_on_exceptions flag to see
    // if we actually need to report exception events (for this
    // thread).  If we don't need to report exception events, we will
    // take the normal fast path provided by add_exception_events.  If
    // exception event reporting is enabled for this thread, we will
    // take the uncommon_trap in the BuildCutout below.

    // first must access the should_post_on_exceptions_flag in this thread's JavaThread
    Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
    Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
    Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);

    // Test the should_post_on_exceptions_flag vs. 0
    Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
    Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );

    // Branch to slow_path if should_post_on_exceptions_flag was true
    { BuildCutout unless(this, tst, PROB_MAX);
      // Do not try anything fancy if we're notifying the VM on every throw.
      // Cf. case Bytecodes::_athrow in parse2.cpp.
      uncommon_trap(reason, Deoptimization::Action_none,
                    (ciKlass*)NULL, (char*)NULL, must_throw);
    }

}

//------------------------------builtin_throw----------------------------------
void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
  bool must_throw = true;

  if (env()->jvmti_can_post_on_exceptions()) {
    // check if we must post exception events, take uncommon trap if so
    uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
    // here if should_post_on_exceptions is false
    // continue on with the normal codegen
  }

  // If this particular condition has not yet happened at this
  // bytecode, then use the uncommon trap mechanism, and allow for
  // a future recompilation if several traps occur here.
  // If the throw is hot, try to use a more complicated inline mechanism
  // which keeps execution inside the compiled code.
  bool treat_throw_as_hot = false;
  ciMethodData* md = method()->method_data();

  if (ProfileTraps) {
    if (too_many_traps(reason)) {
      treat_throw_as_hot = true;
    }
    // (If there is no MDO at all, assume it is early in
    // execution, and that any deopts are part of the
    // startup transient, and don't need to be remembered.)

    // Also, if there is a local exception handler, treat all throws
    // as hot if there has been at least one in this method.
    if (C->trap_count(reason) != 0
        && method()->method_data()->trap_count(reason) != 0
        && has_ex_handler()) {
        treat_throw_as_hot = true;
    }
  }

  // If this throw happens frequently, an uncommon trap might cause
  // a performance pothole.  If there is a local exception handler,
  // and if this particular bytecode appears to be deoptimizing often,
  // let us handle the throw inline, with a preconstructed instance.
  // Note:   If the deopt count has blown up, the uncommon trap
  // runtime is going to flush this nmethod, not matter what.
  if (treat_throw_as_hot
      && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {
    // If the throw is local, we use a pre-existing instance and
    // punt on the backtrace.  This would lead to a missing backtrace
    // (a repeat of 4292742) if the backtrace object is ever asked
    // for its backtrace.
    // Fixing this remaining case of 4292742 requires some flavor of
    // escape analysis.  Leave that for the future.
    ciInstance* ex_obj = NULL;
    switch (reason) {
    case Deoptimization::Reason_null_check:
      ex_obj = env()->NullPointerException_instance();
      break;
    case Deoptimization::Reason_div0_check:
      ex_obj = env()->ArithmeticException_instance();
      break;
    case Deoptimization::Reason_range_check:
      ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
      break;
    case Deoptimization::Reason_class_check:
      if (java_bc() == Bytecodes::_aastore) {
        ex_obj = env()->ArrayStoreException_instance();
      } else {
        ex_obj = env()->ClassCastException_instance();
      }
      break;
    }
    if (failing()) { stop(); return; }  // exception allocation might fail
    if (ex_obj != NULL) {
      // Cheat with a preallocated exception object.
      if (C->log() != NULL)
        C->log()->elem("hot_throw preallocated='1' reason='%s'",
                       Deoptimization::trap_reason_name(reason));
      const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
      Node*              ex_node = _gvn.transform( ConNode::make(C, ex_con) );

      // Clear the detail message of the preallocated exception object.
      // Weblogic sometimes mutates the detail message of exceptions
      // using reflection.
      int offset = java_lang_Throwable::get_detailMessage_offset();
      const TypePtr* adr_typ = ex_con->add_offset(offset);

      Node *adr = basic_plus_adr(ex_node, ex_node, offset);
      const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
      // Conservatively release stores of object references.
      Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);

      add_exception_state(make_exception_state(ex_node));
      return;
    }
  }

  // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
  // It won't be much cheaper than bailing to the interp., since we'll
  // have to pass up all the debug-info, and the runtime will have to
  // create the stack trace.

  // Usual case:  Bail to interpreter.
  // Reserve the right to recompile if we haven't seen anything yet.

  assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
  Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
  if (treat_throw_as_hot
      && (method()->method_data()->trap_recompiled_at(bci(), NULL)
          || C->too_many_traps(reason))) {
    // We cannot afford to take more traps here.  Suffer in the interpreter.
    if (C->log() != NULL)
      C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
                     Deoptimization::trap_reason_name(reason),
                     C->trap_count(reason));
    action = Deoptimization::Action_none;
  }

  // "must_throw" prunes the JVM state to include only the stack, if there
  // are no local exception handlers.  This should cut down on register
  // allocation time and code size, by drastically reducing the number
  // of in-edges on the call to the uncommon trap.

  uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);
}


//----------------------------PreserveJVMState---------------------------------
PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
  debug_only(kit->verify_map());
  _kit    = kit;
  _map    = kit->map();   // preserve the map
  _sp     = kit->sp();
  kit->set_map(clone_map ? kit->clone_map() : NULL);
#ifdef ASSERT
  _bci    = kit->bci();
  Parse* parser = kit->is_Parse();
  int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
  _block  = block;
#endif
}
PreserveJVMState::~PreserveJVMState() {
  GraphKit* kit = _kit;
#ifdef ASSERT
  assert(kit->bci() == _bci, "bci must not shift");
  Parse* parser = kit->is_Parse();
  int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
  assert(block == _block,    "block must not shift");
#endif
  kit->set_map(_map);
  kit->set_sp(_sp);
}


//-----------------------------BuildCutout-------------------------------------
BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)
  : PreserveJVMState(kit)
{
  assert(p->is_Con() || p->is_Bool(), "test must be a bool");
  SafePointNode* outer_map = _map;   // preserved map is caller's
  SafePointNode* inner_map = kit->map();
  IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);
  outer_map->set_control(kit->gvn().transform( new (kit->C) IfTrueNode(iff) ));
  inner_map->set_control(kit->gvn().transform( new (kit->C) IfFalseNode(iff) ));
}
BuildCutout::~BuildCutout() {
  GraphKit* kit = _kit;
  assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
}

//---------------------------PreserveReexecuteState----------------------------
PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
  assert(!kit->stopped(), "must call stopped() before");
  _kit    =    kit;
  _sp     =    kit->sp();
  _reexecute = kit->jvms()->_reexecute;
}
PreserveReexecuteState::~PreserveReexecuteState() {
  if (_kit->stopped()) return;
  _kit->jvms()->_reexecute = _reexecute;
  _kit->set_sp(_sp);
}

//------------------------------clone_map--------------------------------------
// Implementation of PreserveJVMState
//
// Only clone_map(...) here. If this function is only used in the
// PreserveJVMState class we may want to get rid of this extra
// function eventually and do it all there.

SafePointNode* GraphKit::clone_map() {
  if (map() == NULL)  return NULL;

  // Clone the memory edge first
  Node* mem = MergeMemNode::make(C, map()->memory());
  gvn().set_type_bottom(mem);

  SafePointNode *clonemap = (SafePointNode*)map()->clone();
  JVMState* jvms = this->jvms();
  JVMState* clonejvms = jvms->clone_shallow(C);
  clonemap->set_memory(mem);
  clonemap->set_jvms(clonejvms);
  clonejvms->set_map(clonemap);
  record_for_igvn(clonemap);
  gvn().set_type_bottom(clonemap);
  return clonemap;
}


//-----------------------------set_map_clone-----------------------------------
void GraphKit::set_map_clone(SafePointNode* m) {
  _map = m;
  _map = clone_map();
  _map->set_next_exception(NULL);
  debug_only(verify_map());
}


//----------------------------kill_dead_locals---------------------------------
// Detect any locals which are known to be dead, and force them to top.
void GraphKit::kill_dead_locals() {
  // Consult the liveness information for the locals.  If any
  // of them are unused, then they can be replaced by top().  This
  // should help register allocation time and cut down on the size
  // of the deoptimization information.

  // This call is made from many of the bytecode handling
  // subroutines called from the Big Switch in do_one_bytecode.
  // Every bytecode which might include a slow path is responsible
  // for killing its dead locals.  The more consistent we
  // are about killing deads, the fewer useless phis will be
  // constructed for them at various merge points.

  // bci can be -1 (InvocationEntryBci).  We return the entry
  // liveness for the method.

  if (method() == NULL || method()->code_size() == 0) {
    // We are building a graph for a call to a native method.
    // All locals are live.
    return;
  }

  ResourceMark rm;

  // Consult the liveness information for the locals.  If any
  // of them are unused, then they can be replaced by top().  This
  // should help register allocation time and cut down on the size
  // of the deoptimization information.
  MethodLivenessResult live_locals = method()->liveness_at_bci(bci());

  int len = (int)live_locals.size();
  assert(len <= jvms()->loc_size(), "too many live locals");
  for (int local = 0; local < len; local++) {
    if (!live_locals.at(local)) {
      set_local(local, top());
    }
  }
}

#ifdef ASSERT
//-------------------------dead_locals_are_killed------------------------------
// Return true if all dead locals are set to top in the map.
// Used to assert "clean" debug info at various points.
bool GraphKit::dead_locals_are_killed() {
  if (method() == NULL || method()->code_size() == 0) {
    // No locals need to be dead, so all is as it should be.
    return true;
  }

  // Make sure somebody called kill_dead_locals upstream.
  ResourceMark rm;
  for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
    if (jvms->loc_size() == 0)  continue;  // no locals to consult
    SafePointNode* map = jvms->map();
    ciMethod* method = jvms->method();
    int       bci    = jvms->bci();
    if (jvms == this->jvms()) {
      bci = this->bci();  // it might not yet be synched
    }
    MethodLivenessResult live_locals = method->liveness_at_bci(bci);
    int len = (int)live_locals.size();
    if (!live_locals.is_valid() || len == 0)
      // This method is trivial, or is poisoned by a breakpoint.
      return true;
    assert(len == jvms->loc_size(), "live map consistent with locals map");
    for (int local = 0; local < len; local++) {
      if (!live_locals.at(local) && map->local(jvms, local) != top()) {
        if (PrintMiscellaneous && (Verbose || WizardMode)) {
          tty->print_cr("Zombie local %d: ", local);
          jvms->dump();
        }
        return false;
      }
    }
  }
  return true;
}

#endif //ASSERT

// Helper function for enforcing certain bytecodes to reexecute if
// deoptimization happens
static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
  ciMethod* cur_method = jvms->method();
  int       cur_bci   = jvms->bci();
  if (cur_method != NULL && cur_bci != InvocationEntryBci) {
    Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
    return Interpreter::bytecode_should_reexecute(code) ||
           is_anewarray && code == Bytecodes::_multianewarray;
    // Reexecute _multianewarray bytecode which was replaced with
    // sequence of [a]newarray. See Parse::do_multianewarray().
    //
    // Note: interpreter should not have it set since this optimization
    // is limited by dimensions and guarded by flag so in some cases
    // multianewarray() runtime calls will be generated and
    // the bytecode should not be reexecutes (stack will not be reset).
  } else
    return false;
}

// Helper function for adding JVMState and debug information to node
void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
  // Add the safepoint edges to the call (or other safepoint).

  // Make sure dead locals are set to top.  This
  // should help register allocation time and cut down on the size
  // of the deoptimization information.
  assert(dead_locals_are_killed(), "garbage in debug info before safepoint");

  // Walk the inline list to fill in the correct set of JVMState's
  // Also fill in the associated edges for each JVMState.

  // If the bytecode needs to be reexecuted we need to put
  // the arguments back on the stack.
  const bool should_reexecute = jvms()->should_reexecute();
  JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();

  // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
  // undefined if the bci is different.  This is normal for Parse but it
  // should not happen for LibraryCallKit because only one bci is processed.
  assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
         "in LibraryCallKit the reexecute bit should not change");

  // If we are guaranteed to throw, we can prune everything but the
  // input to the current bytecode.
  bool can_prune_locals = false;
  uint stack_slots_not_pruned = 0;
  int inputs = 0, depth = 0;
  if (must_throw) {
    assert(method() == youngest_jvms->method(), "sanity");
    if (compute_stack_effects(inputs, depth)) {
      can_prune_locals = true;
      stack_slots_not_pruned = inputs;
    }
  }

  if (env()->should_retain_local_variables()) {
    // At any safepoint, this method can get breakpointed, which would
    // then require an immediate deoptimization.
    can_prune_locals = false;  // do not prune locals
    stack_slots_not_pruned = 0;
  }

  // do not scribble on the input jvms
  JVMState* out_jvms = youngest_jvms->clone_deep(C);
  call->set_jvms(out_jvms); // Start jvms list for call node

  // For a known set of bytecodes, the interpreter should reexecute them if
  // deoptimization happens. We set the reexecute state for them here
  if (out_jvms->is_reexecute_undefined() && //don't change if already specified
      should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
    out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
  }

  // Presize the call:
  DEBUG_ONLY(uint non_debug_edges = call->req());
  call->add_req_batch(top(), youngest_jvms->debug_depth());
  assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");

  // Set up edges so that the call looks like this:
  //  Call [state:] ctl io mem fptr retadr
  //       [parms:] parm0 ... parmN
  //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
  //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
  //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
  // Note that caller debug info precedes callee debug info.

  // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
  uint debug_ptr = call->req();

  // Loop over the map input edges associated with jvms, add them
  // to the call node, & reset all offsets to match call node array.
  for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
    uint debug_end   = debug_ptr;
    uint debug_start = debug_ptr - in_jvms->debug_size();
    debug_ptr = debug_start;  // back up the ptr

    uint p = debug_start;  // walks forward in [debug_start, debug_end)
    uint j, k, l;
    SafePointNode* in_map = in_jvms->map();
    out_jvms->set_map(call);

    if (can_prune_locals) {
      assert(in_jvms->method() == out_jvms->method(), "sanity");
      // If the current throw can reach an exception handler in this JVMS,
      // then we must keep everything live that can reach that handler.
      // As a quick and dirty approximation, we look for any handlers at all.
      if (in_jvms->method()->has_exception_handlers()) {
        can_prune_locals = false;
      }
    }

    // Add the Locals
    k = in_jvms->locoff();
    l = in_jvms->loc_size();
    out_jvms->set_locoff(p);
    if (!can_prune_locals) {
      for (j = 0; j < l; j++)
        call->set_req(p++, in_map->in(k+j));
    } else {
      p += l;  // already set to top above by add_req_batch
    }

    // Add the Expression Stack
    k = in_jvms->stkoff();
    l = in_jvms->sp();
    out_jvms->set_stkoff(p);
    if (!can_prune_locals) {
      for (j = 0; j < l; j++)
        call->set_req(p++, in_map->in(k+j));
    } else if (can_prune_locals && stack_slots_not_pruned != 0) {
      // Divide stack into {S0,...,S1}, where S0 is set to top.
      uint s1 = stack_slots_not_pruned;
      stack_slots_not_pruned = 0;  // for next iteration
      if (s1 > l)  s1 = l;
      uint s0 = l - s1;
      p += s0;  // skip the tops preinstalled by add_req_batch
      for (j = s0; j < l; j++)
        call->set_req(p++, in_map->in(k+j));
    } else {
      p += l;  // already set to top above by add_req_batch
    }

    // Add the Monitors
    k = in_jvms->monoff();
    l = in_jvms->mon_size();
    out_jvms->set_monoff(p);
    for (j = 0; j < l; j++)
      call->set_req(p++, in_map->in(k+j));

    // Copy any scalar object fields.
    k = in_jvms->scloff();
    l = in_jvms->scl_size();
    out_jvms->set_scloff(p);
    for (j = 0; j < l; j++)
      call->set_req(p++, in_map->in(k+j));

    // Finish the new jvms.
    out_jvms->set_endoff(p);

    assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
    assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
    assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
    assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
    assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
    assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");

    // Update the two tail pointers in parallel.
    out_jvms = out_jvms->caller();
    in_jvms  = in_jvms->caller();
  }

  assert(debug_ptr == non_debug_edges, "debug info must fit exactly");

  // Test the correctness of JVMState::debug_xxx accessors:
  assert(call->jvms()->debug_start() == non_debug_edges, "");
  assert(call->jvms()->debug_end()   == call->req(), "");
  assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
}

bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
  Bytecodes::Code code = java_bc();
  if (code == Bytecodes::_wide) {
    code = method()->java_code_at_bci(bci() + 1);
  }

  BasicType rtype = T_ILLEGAL;
  int       rsize = 0;

  if (code != Bytecodes::_illegal) {
    depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
    rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
    if (rtype < T_CONFLICT)
      rsize = type2size[rtype];
  }

  switch (code) {
  case Bytecodes::_illegal:
    return false;

  case Bytecodes::_ldc:
  case Bytecodes::_ldc_w:
  case Bytecodes::_ldc2_w:
    inputs = 0;
    break;

  case Bytecodes::_dup:         inputs = 1;  break;
  case Bytecodes::_dup_x1:      inputs = 2;  break;
  case Bytecodes::_dup_x2:      inputs = 3;  break;
  case Bytecodes::_dup2:        inputs = 2;  break;
  case Bytecodes::_dup2_x1:     inputs = 3;  break;
  case Bytecodes::_dup2_x2:     inputs = 4;  break;
  case Bytecodes::_swap:        inputs = 2;  break;
  case Bytecodes::_arraylength: inputs = 1;  break;

  case Bytecodes::_getstatic:
  case Bytecodes::_putstatic:
  case Bytecodes::_getfield:
  case Bytecodes::_putfield:
    {
      bool ignored_will_link;
      ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
      int      size  = field->type()->size();
      bool is_get = (depth >= 0), is_static = (depth & 1);
      inputs = (is_static ? 0 : 1);
      if (is_get) {
        depth = size - inputs;
      } else {
        inputs += size;        // putxxx pops the value from the stack
        depth = - inputs;
      }
    }
    break;

  case Bytecodes::_invokevirtual:
  case Bytecodes::_invokespecial:
  case Bytecodes::_invokestatic:
  case Bytecodes::_invokedynamic:
  case Bytecodes::_invokeinterface:
    {
      bool ignored_will_link;
      ciSignature* declared_signature = NULL;
      ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
      assert(declared_signature != NULL, "cannot be null");
      inputs   = declared_signature->arg_size_for_bc(code);
      int size = declared_signature->return_type()->size();
      depth = size - inputs;
    }
    break;

  case Bytecodes::_multianewarray:
    {
      ciBytecodeStream iter(method());
      iter.reset_to_bci(bci());
      iter.next();
      inputs = iter.get_dimensions();
      assert(rsize == 1, "");
      depth = rsize - inputs;
    }
    break;

  case Bytecodes::_ireturn:
  case Bytecodes::_lreturn:
  case Bytecodes::_freturn:
  case Bytecodes::_dreturn:
  case Bytecodes::_areturn:
    assert(rsize = -depth, "");
    inputs = rsize;
    break;

  case Bytecodes::_jsr:
  case Bytecodes::_jsr_w:
    inputs = 0;
    depth  = 1;                  // S.B. depth=1, not zero
    break;

  default:
    // bytecode produces a typed result
    inputs = rsize - depth;
    assert(inputs >= 0, "");
    break;
  }

#ifdef ASSERT
  // spot check
  int outputs = depth + inputs;
  assert(outputs >= 0, "sanity");
  switch (code) {
  case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
  case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
  case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
  case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
  case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
  }
#endif //ASSERT

  return true;
}



//------------------------------basic_plus_adr---------------------------------
Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
  // short-circuit a common case
  if (offset == intcon(0))  return ptr;
  return _gvn.transform( new (C) AddPNode(base, ptr, offset) );
}

Node* GraphKit::ConvI2L(Node* offset) {
  // short-circuit a common case
  jint offset_con = find_int_con(offset, Type::OffsetBot);
  if (offset_con != Type::OffsetBot) {
    return longcon((jlong) offset_con);
  }
  return _gvn.transform( new (C) ConvI2LNode(offset));
}

Node* GraphKit::ConvI2UL(Node* offset) {
  juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
  if (offset_con != (juint) Type::OffsetBot) {
    return longcon((julong) offset_con);
  }
  Node* conv = _gvn.transform( new (C) ConvI2LNode(offset));
  Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
  return _gvn.transform( new (C) AndLNode(conv, mask) );
}

Node* GraphKit::ConvL2I(Node* offset) {
  // short-circuit a common case
  jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
  if (offset_con != (jlong)Type::OffsetBot) {
    return intcon((int) offset_con);
  }
  return _gvn.transform( new (C) ConvL2INode(offset));
}

//-------------------------load_object_klass-----------------------------------
Node* GraphKit::load_object_klass(Node* obj) {
  // Special-case a fresh allocation to avoid building nodes:
  Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
  if (akls != NULL)  return akls;
  Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
  return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
}

//-------------------------load_array_length-----------------------------------
Node* GraphKit::load_array_length(Node* array) {
  // Special-case a fresh allocation to avoid building nodes:
  AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
  Node *alen;
  if (alloc == NULL) {
    Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
    alen = _gvn.transform( new (C) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
  } else {
    alen = alloc->Ideal_length();
    Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
    if (ccast != alen) {
      alen = _gvn.transform(ccast);
    }
  }
  return alen;
}

//------------------------------do_null_check----------------------------------
// Helper function to do a NULL pointer check.  Returned value is
// the incoming address with NULL casted away.  You are allowed to use the
// not-null value only if you are control dependent on the test.
extern int explicit_null_checks_inserted,
           explicit_null_checks_elided;
Node* GraphKit::null_check_common(Node* value, BasicType type,
                                  // optional arguments for variations:
                                  bool assert_null,
                                  Node* *null_control) {
  assert(!assert_null || null_control == NULL, "not both at once");
  if (stopped())  return top();
  if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
    // For some performance testing, we may wish to suppress null checking.
    value = cast_not_null(value);   // Make it appear to be non-null (4962416).
    return value;
  }
  explicit_null_checks_inserted++;

  // Construct NULL check
  Node *chk = NULL;
  switch(type) {
    case T_LONG   : chk = new (C) CmpLNode(value, _gvn.zerocon(T_LONG)); break;
    case T_INT    : chk = new (C) CmpINode(value, _gvn.intcon(0)); break;
    case T_ARRAY  : // fall through
      type = T_OBJECT;  // simplify further tests
    case T_OBJECT : {
      const Type *t = _gvn.type( value );

      const TypeOopPtr* tp = t->isa_oopptr();
      if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
          // Only for do_null_check, not any of its siblings:
          && !assert_null && null_control == NULL) {
        // Usually, any field access or invocation on an unloaded oop type
        // will simply fail to link, since the statically linked class is
        // likely also to be unloaded.  However, in -Xcomp mode, sometimes
        // the static class is loaded but the sharper oop type is not.
        // Rather than checking for this obscure case in lots of places,
        // we simply observe that a null check on an unloaded class
        // will always be followed by a nonsense operation, so we
        // can just issue the uncommon trap here.
        // Our access to the unloaded class will only be correct
        // after it has been loaded and initialized, which requires
        // a trip through the interpreter.
#ifndef PRODUCT
        if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }
#endif
        uncommon_trap(Deoptimization::Reason_unloaded,
                      Deoptimization::Action_reinterpret,
                      tp->klass(), "!loaded");
        return top();
      }

      if (assert_null) {
        // See if the type is contained in NULL_PTR.
        // If so, then the value is already null.
        if (t->higher_equal(TypePtr::NULL_PTR)) {
          explicit_null_checks_elided++;
          return value;           // Elided null assert quickly!
        }
      } else {
        // See if mixing in the NULL pointer changes type.
        // If so, then the NULL pointer was not allowed in the original
        // type.  In other words, "value" was not-null.
        if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
          // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
          explicit_null_checks_elided++;
          return value;           // Elided null check quickly!
        }
      }
      chk = new (C) CmpPNode( value, null() );
      break;
    }

    default:
      fatal(err_msg_res("unexpected type: %s", type2name(type)));
  }
  assert(chk != NULL, "sanity check");
  chk = _gvn.transform(chk);

  BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
  BoolNode *btst = new (C) BoolNode( chk, btest);
  Node   *tst = _gvn.transform( btst );

  //-----------
  // if peephole optimizations occurred, a prior test existed.
  // If a prior test existed, maybe it dominates as we can avoid this test.
  if (tst != btst && type == T_OBJECT) {
    // At this point we want to scan up the CFG to see if we can
    // find an identical test (and so avoid this test altogether).
    Node *cfg = control();
    int depth = 0;
    while( depth < 16 ) {       // Limit search depth for speed
      if( cfg->Opcode() == Op_IfTrue &&
          cfg->in(0)->in(1) == tst ) {
        // Found prior test.  Use "cast_not_null" to construct an identical
        // CastPP (and hence hash to) as already exists for the prior test.
        // Return that casted value.
        if (assert_null) {
          replace_in_map(value, null());
          return null();  // do not issue the redundant test
        }
        Node *oldcontrol = control();
        set_control(cfg);
        Node *res = cast_not_null(value);
        set_control(oldcontrol);
        explicit_null_checks_elided++;
        return res;
      }
      cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
      if (cfg == NULL)  break;  // Quit at region nodes
      depth++;
    }
  }

  //-----------
  // Branch to failure if null
  float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
  Deoptimization::DeoptReason reason;
  if (assert_null)
    reason = Deoptimization::Reason_null_assert;
  else if (type == T_OBJECT)
    reason = Deoptimization::Reason_null_check;
  else
    reason = Deoptimization::Reason_div0_check;

  // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
  // ciMethodData::has_trap_at will return a conservative -1 if any
  // must-be-null assertion has failed.  This could cause performance
  // problems for a method after its first do_null_assert failure.
  // Consider using 'Reason_class_check' instead?

  // To cause an implicit null check, we set the not-null probability
  // to the maximum (PROB_MAX).  For an explicit check the probability
  // is set to a smaller value.
  if (null_control != NULL || too_many_traps(reason)) {
    // probability is less likely
    ok_prob =  PROB_LIKELY_MAG(3);
  } else if (!assert_null &&
             (ImplicitNullCheckThreshold > 0) &&
             method() != NULL &&
             (method()->method_data()->trap_count(reason)
              >= (uint)ImplicitNullCheckThreshold)) {
    ok_prob =  PROB_LIKELY_MAG(3);
  }

  if (null_control != NULL) {
    IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
    Node* null_true = _gvn.transform( new (C) IfFalseNode(iff));
    set_control(      _gvn.transform( new (C) IfTrueNode(iff)));
    if (null_true == top())
      explicit_null_checks_elided++;
    (*null_control) = null_true;
  } else {
    BuildCutout unless(this, tst, ok_prob);
    // Check for optimizer eliding test at parse time
    if (stopped()) {
      // Failure not possible; do not bother making uncommon trap.
      explicit_null_checks_elided++;
    } else if (assert_null) {
      uncommon_trap(reason,
                    Deoptimization::Action_make_not_entrant,
                    NULL, "assert_null");
    } else {
      replace_in_map(value, zerocon(type));
      builtin_throw(reason);
    }
  }

  // Must throw exception, fall-thru not possible?
  if (stopped()) {
    return top();               // No result
  }

  if (assert_null) {
    // Cast obj to null on this path.
    replace_in_map(value, zerocon(type));
    return zerocon(type);
  }

  // Cast obj to not-null on this path, if there is no null_control.
  // (If there is a null_control, a non-null value may come back to haunt us.)
  if (type == T_OBJECT) {
    Node* cast = cast_not_null(value, false);
    if (null_control == NULL || (*null_control) == top())
      replace_in_map(value, cast);
    value = cast;
  }

  return value;
}


//------------------------------cast_not_null----------------------------------
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
  const Type *t = _gvn.type(obj);
  const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
  // Object is already not-null?
  if( t == t_not_null ) return obj;

  Node *cast = new (C) CastPPNode(obj,t_not_null);
  cast->init_req(0, control());
  cast = _gvn.transform( cast );

  // Scan for instances of 'obj' in the current JVM mapping.
  // These instances are known to be not-null after the test.
  if (do_replace_in_map)
    replace_in_map(obj, cast);

  return cast;                  // Return casted value
}


//--------------------------replace_in_map-------------------------------------
void GraphKit::replace_in_map(Node* old, Node* neww) {
  if (old == neww) {
    return;
  }

  map()->replace_edge(old, neww);

  // Note: This operation potentially replaces any edge
  // on the map.  This includes locals, stack, and monitors
  // of the current (innermost) JVM state.

  // don't let inconsistent types from profiling escape this
  // method

  const Type* told = _gvn.type(old);
  const Type* tnew = _gvn.type(neww);

  if (!tnew->higher_equal(told)) {
    return;
  }

  map()->record_replaced_node(old, neww);
}


//=============================================================================
//--------------------------------memory---------------------------------------
Node* GraphKit::memory(uint alias_idx) {
  MergeMemNode* mem = merged_memory();
  Node* p = mem->memory_at(alias_idx);
  _gvn.set_type(p, Type::MEMORY);  // must be mapped
  return p;
}

//-----------------------------reset_memory------------------------------------
Node* GraphKit::reset_memory() {
  Node* mem = map()->memory();
  // do not use this node for any more parsing!
  debug_only( map()->set_memory((Node*)NULL) );
  return _gvn.transform( mem );
}

//------------------------------set_all_memory---------------------------------
void GraphKit::set_all_memory(Node* newmem) {
  Node* mergemem = MergeMemNode::make(C, newmem);
  gvn().set_type_bottom(mergemem);
  map()->set_memory(mergemem);
}

//------------------------------set_all_memory_call----------------------------
void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
  Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
  set_all_memory(newmem);
}

//=============================================================================
//
// parser factory methods for MemNodes
//
// These are layered on top of the factory methods in LoadNode and StoreNode,
// and integrate with the parser's memory state and _gvn engine.
//

// factory methods in "int adr_idx"
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                          int adr_idx,
                          MemNode::MemOrd mo,
                          LoadNode::ControlDependency control_dependency,
                          bool require_atomic_access,
                          bool unaligned,
                          bool mismatched) {
  assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
  const TypePtr* adr_type = NULL; // debug-mode-only argument
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node* mem = memory(adr_idx);
  Node* ld;
  if (require_atomic_access && bt == T_LONG) {
    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
  } else if (require_atomic_access && bt == T_DOUBLE) {
    ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
  } else {
    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
  }
  if (unaligned) {
    ld->as_Load()->set_unaligned_access();
  }
  if (mismatched) {
    ld->as_Load()->set_mismatched_access();
  }
  ld = _gvn.transform(ld);
  if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
    // Improve graph before escape analysis and boxing elimination.
    record_for_igvn(ld);
  }
  return ld;
}

Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
                                int adr_idx,
                                MemNode::MemOrd mo,
                                bool require_atomic_access,
                                bool unaligned,
                                bool mismatched) {
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
  const TypePtr* adr_type = NULL;
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node *mem = memory(adr_idx);
  Node* st;
  if (require_atomic_access && bt == T_LONG) {
    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
  } else if (require_atomic_access && bt == T_DOUBLE) {
    st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
  } else {
    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
  }
  if (unaligned) {
    st->as_Store()->set_unaligned_access();
  }
  if (mismatched) {
    st->as_Store()->set_mismatched_access();
  }
  st = _gvn.transform(st);
  set_memory(st, adr_idx);
  // Back-to-back stores can only remove intermediate store with DU info
  // so push on worklist for optimizer.
  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
    record_for_igvn(st);

  return st;
}


void GraphKit::pre_barrier(bool do_load,
                           Node* ctl,
                           Node* obj,
                           Node* adr,
                           uint  adr_idx,
                           Node* val,
                           const TypeOopPtr* val_type,
                           Node* pre_val,
                           BasicType bt) {

  BarrierSet* bs = Universe::heap()->barrier_set();
  set_control(ctl);
  switch (bs->kind()) {
    case BarrierSet::G1SATBCT:
    case BarrierSet::G1SATBCTLogging:
      g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
      break;

    case BarrierSet::CardTableModRef:
    case BarrierSet::CardTableExtension:
    case BarrierSet::ModRef:
      break;

    case BarrierSet::Other:
    default      :
      ShouldNotReachHere();

  }
}

bool GraphKit::can_move_pre_barrier() const {
  BarrierSet* bs = Universe::heap()->barrier_set();
  switch (bs->kind()) {
    case BarrierSet::G1SATBCT:
    case BarrierSet::G1SATBCTLogging:
      return true; // Can move it if no safepoint

    case BarrierSet::CardTableModRef:
    case BarrierSet::CardTableExtension:
    case BarrierSet::ModRef:
      return true; // There is no pre-barrier

    case BarrierSet::Other:
    default      :
      ShouldNotReachHere();
  }
  return false;
}

void GraphKit::post_barrier(Node* ctl,
                            Node* store,
                            Node* obj,
                            Node* adr,
                            uint  adr_idx,
                            Node* val,
                            BasicType bt,
                            bool use_precise) {
  BarrierSet* bs = Universe::heap()->barrier_set();
  set_control(ctl);
  switch (bs->kind()) {
    case BarrierSet::G1SATBCT:
    case BarrierSet::G1SATBCTLogging:
      g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
      break;

    case BarrierSet::CardTableModRef:
    case BarrierSet::CardTableExtension:
      write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
      break;

    case BarrierSet::ModRef:
      break;

    case BarrierSet::Other:
    default      :
      ShouldNotReachHere();

  }
}

Node* GraphKit::store_oop(Node* ctl,
                          Node* obj,
                          Node* adr,
                          const TypePtr* adr_type,
                          Node* val,
                          const TypeOopPtr* val_type,
                          BasicType bt,
                          bool use_precise,
                          MemNode::MemOrd mo,
                          bool mismatched) {
  // Transformation of a value which could be NULL pointer (CastPP #NULL)
  // could be delayed during Parse (for example, in adjust_map_after_if()).
  // Execute transformation here to avoid barrier generation in such case.
  if (_gvn.type(val) == TypePtr::NULL_PTR)
    val = _gvn.makecon(TypePtr::NULL_PTR);

  set_control(ctl);
  if (stopped()) return top(); // Dead path ?

  assert(bt == T_OBJECT, "sanity");
  assert(val != NULL, "not dead path");
  uint adr_idx = C->get_alias_index(adr_type);
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );

  pre_barrier(true /* do_load */,
              control(), obj, adr, adr_idx, val, val_type,
              NULL /* pre_val */,
              bt);

  Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
  post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
  return store;
}

// Could be an array or object we don't know at compile time (unsafe ref.)
Node* GraphKit::store_oop_to_unknown(Node* ctl,
                             Node* obj,   // containing obj
                             Node* adr,  // actual adress to store val at
                             const TypePtr* adr_type,
                             Node* val,
                             BasicType bt,
                             MemNode::MemOrd mo,
                             bool mismatched) {
  Compile::AliasType* at = C->alias_type(adr_type);
  const TypeOopPtr* val_type = NULL;
  if (adr_type->isa_instptr()) {
    if (at->field() != NULL) {
      // known field.  This code is a copy of the do_put_xxx logic.
      ciField* field = at->field();
      if (!field->type()->is_loaded()) {
        val_type = TypeInstPtr::BOTTOM;
      } else {
        val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
      }
    }
  } else if (adr_type->isa_aryptr()) {
    val_type = adr_type->is_aryptr()->elem()->make_oopptr();
  }
  if (val_type == NULL) {
    val_type = TypeInstPtr::BOTTOM;
  }
  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
}


//-------------------------array_element_address-------------------------
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
                                      const TypeInt* sizetype, Node* ctrl) {
  uint shift  = exact_log2(type2aelembytes(elembt));
  uint header = arrayOopDesc::base_offset_in_bytes(elembt);

  // short-circuit a common case (saves lots of confusing waste motion)
  jint idx_con = find_int_con(idx, -1);
  if (idx_con >= 0) {
    intptr_t offset = header + ((intptr_t)idx_con << shift);
    return basic_plus_adr(ary, offset);
  }

  // must be correct type for alignment purposes
  Node* base  = basic_plus_adr(ary, header);
#ifdef _LP64
  // The scaled index operand to AddP must be a clean 64-bit value.
  // Java allows a 32-bit int to be incremented to a negative
  // value, which appears in a 64-bit register as a large
  // positive number.  Using that large positive number as an
  // operand in pointer arithmetic has bad consequences.
  // On the other hand, 32-bit overflow is rare, and the possibility
  // can often be excluded, if we annotate the ConvI2L node with
  // a type assertion that its value is known to be a small positive
  // number.  (The prior range check has ensured this.)
  // This assertion is used by ConvI2LNode::Ideal.
  int index_max = max_jint - 1;  // array size is max_jint, index is one less
  if (sizetype != NULL) index_max = sizetype->_hi - 1;
  const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
  idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
#endif
  Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
  return basic_plus_adr(ary, base, scale);
}

//-------------------------load_array_element-------------------------
Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
  const Type* elemtype = arytype->elem();
  BasicType elembt = elemtype->array_element_basic_type();
  Node* adr = array_element_address(ary, idx, elembt, arytype->size());
  if (elembt == T_NARROWOOP) {
    elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
  }
  Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
  return ld;
}

//-------------------------set_arguments_for_java_call-------------------------
// Arguments (pre-popped from the stack) are taken from the JVMS.
void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
  // Add the call arguments:
  uint nargs = call->method()->arg_size();
  for (uint i = 0; i < nargs; i++) {
    Node* arg = argument(i);
    call->init_req(i + TypeFunc::Parms, arg);
  }
}

//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
// A return value node (if any) is returned from set_edges_for_java_call.
void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {

  // Add the predefined inputs:
  call->init_req( TypeFunc::Control, control() );
  call->init_req( TypeFunc::I_O    , i_o() );
  call->init_req( TypeFunc::Memory , reset_memory() );
  call->init_req( TypeFunc::FramePtr, frameptr() );
  call->init_req( TypeFunc::ReturnAdr, top() );

  add_safepoint_edges(call, must_throw);

  Node* xcall = _gvn.transform(call);

  if (xcall == top()) {
    set_control(top());
    return;
  }
  assert(xcall == call, "call identity is stable");

  // Re-use the current map to produce the result.

  set_control(_gvn.transform(new (C) ProjNode(call, TypeFunc::Control)));
  set_i_o(    _gvn.transform(new (C) ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
  set_all_memory_call(xcall, separate_io_proj);

  //return xcall;   // no need, caller already has it
}

Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
  if (stopped())  return top();  // maybe the call folded up?

  // Capture the return value, if any.
  Node* ret;
  if (call->method() == NULL ||
      call->method()->return_type()->basic_type() == T_VOID)
        ret = top();
  else  ret = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));

  // Note:  Since any out-of-line call can produce an exception,
  // we always insert an I_O projection from the call into the result.

  make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);

  if (separate_io_proj) {
    // The caller requested separate projections be used by the fall
    // through and exceptional paths, so replace the projections for
    // the fall through path.
    set_i_o(_gvn.transform( new (C) ProjNode(call, TypeFunc::I_O) ));
    set_all_memory(_gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) ));
  }
  return ret;
}

//--------------------set_predefined_input_for_runtime_call--------------------
// Reading and setting the memory state is way conservative here.
// The real problem is that I am not doing real Type analysis on memory,
// so I cannot distinguish card mark stores from other stores.  Across a GC
// point the Store Barrier and the card mark memory has to agree.  I cannot
// have a card mark store and its barrier split across the GC point from
// either above or below.  Here I get that to happen by reading ALL of memory.
// A better answer would be to separate out card marks from other memory.
// For now, return the input memory state, so that it can be reused
// after the call, if this call has restricted memory effects.
Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
  // Set fixed predefined input arguments
  Node* memory = reset_memory();
  Node* m = narrow_mem == NULL ? memory : narrow_mem;
  call->init_req( TypeFunc::Control,   control()  );
  call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
  call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs
  call->init_req( TypeFunc::FramePtr,  frameptr() );
  call->init_req( TypeFunc::ReturnAdr, top()      );
  return memory;
}

//-------------------set_predefined_output_for_runtime_call--------------------
// Set control and memory (not i_o) from the call.
// If keep_mem is not NULL, use it for the output state,
// except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
// If hook_mem is NULL, this call produces no memory effects at all.
// If hook_mem is a Java-visible memory slice (such as arraycopy operands),
// then only that memory slice is taken from the call.
// In the last case, we must put an appropriate memory barrier before
// the call, so as to create the correct anti-dependencies on loads
// preceding the call.
void GraphKit::set_predefined_output_for_runtime_call(Node* call,
                                                      Node* keep_mem,
                                                      const TypePtr* hook_mem) {
  // no i/o
  set_control(_gvn.transform( new (C) ProjNode(call,TypeFunc::Control) ));
  if (keep_mem) {
    // First clone the existing memory state
    set_all_memory(keep_mem);
    if (hook_mem != NULL) {
      // Make memory for the call
      Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );
      // Set the RawPtr memory state only.  This covers all the heap top/GC stuff
      // We also use hook_mem to extract specific effects from arraycopy stubs.
      set_memory(mem, hook_mem);
    }
    // ...else the call has NO memory effects.

    // Make sure the call advertises its memory effects precisely.
    // This lets us build accurate anti-dependences in gcm.cpp.
    assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),
           "call node must be constructed correctly");
  } else {
    assert(hook_mem == NULL, "");
    // This is not a "slow path" call; all memory comes from the call.
    set_all_memory_call(call);
  }
}


// Replace the call with the current state of the kit.
void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
  JVMState* ejvms = NULL;
  if (has_exceptions()) {
    ejvms = transfer_exceptions_into_jvms();
  }

  ReplacedNodes replaced_nodes = map()->replaced_nodes();
  ReplacedNodes replaced_nodes_exception;
  Node* ex_ctl = top();

  SafePointNode* final_state = stop();

  // Find all the needed outputs of this call
  CallProjections callprojs;
  call->extract_projections(&callprojs, true);

  Node* init_mem = call->in(TypeFunc::Memory);
  Node* final_mem = final_state->in(TypeFunc::Memory);
  Node* final_ctl = final_state->in(TypeFunc::Control);
  Node* final_io = final_state->in(TypeFunc::I_O);

  // Replace all the old call edges with the edges from the inlining result
  if (callprojs.fallthrough_catchproj != NULL) {
    C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
  }
  if (callprojs.fallthrough_memproj != NULL) {
    if (final_mem->is_MergeMem()) {
      // Parser's exits MergeMem was not transformed but may be optimized
      final_mem = _gvn.transform(final_mem);
    }
    C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
  }
  if (callprojs.fallthrough_ioproj != NULL) {
    C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_io);
  }

  // Replace the result with the new result if it exists and is used
  if (callprojs.resproj != NULL && result != NULL) {
    C->gvn_replace_by(callprojs.resproj, result);
  }

  if (ejvms == NULL) {
    // No exception edges to simply kill off those paths
    if (callprojs.catchall_catchproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
    }
    if (callprojs.catchall_memproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
    }
    if (callprojs.catchall_ioproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
    }
    // Replace the old exception object with top
    if (callprojs.exobj != NULL) {
      C->gvn_replace_by(callprojs.exobj, C->top());
    }
  } else {
    GraphKit ekit(ejvms);

    // Load my combined exception state into the kit, with all phis transformed:
    SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
    replaced_nodes_exception = ex_map->replaced_nodes();

    Node* ex_oop = ekit.use_exception_state(ex_map);

    if (callprojs.catchall_catchproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
      ex_ctl = ekit.control();
    }
    if (callprojs.catchall_memproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
    }
    if (callprojs.catchall_ioproj != NULL) {
      C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
    }

    // Replace the old exception object with the newly created one
    if (callprojs.exobj != NULL) {
      C->gvn_replace_by(callprojs.exobj, ex_oop);
    }
  }

  // Disconnect the call from the graph
  call->disconnect_inputs(NULL, C);
  C->gvn_replace_by(call, C->top());

  // Clean up any MergeMems that feed other MergeMems since the
  // optimizer doesn't like that.
  if (final_mem->is_MergeMem()) {
    Node_List wl;
    for (SimpleDUIterator i(final_mem); i.has_next(); i.next()) {
      Node* m = i.get();
      if (m->is_MergeMem() && !wl.contains(m)) {
        wl.push(m);
      }
    }
    while (wl.size()  > 0) {
      _gvn.transform(wl.pop());
    }
  }

  if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
    replaced_nodes.apply(C, final_ctl);
  }
  if (!ex_ctl->is_top() && do_replaced_nodes) {
    replaced_nodes_exception.apply(C, ex_ctl);
  }
}


//------------------------------increment_counter------------------------------
// for statistics: increment a VM counter by 1

void GraphKit::increment_counter(address counter_addr) {
  Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
  increment_counter(adr1);
}

void GraphKit::increment_counter(Node* counter_addr) {
  int adr_type = Compile::AliasIdxRaw;
  Node* ctrl = control();
  Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
  Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
  store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered);
}


//------------------------------uncommon_trap----------------------------------
// Bail out to the interpreter in mid-method.  Implemented by calling the
// uncommon_trap blob.  This helper function inserts a runtime call with the
// right debug info.
void GraphKit::uncommon_trap(int trap_request,
                             ciKlass* klass, const char* comment,
                             bool must_throw,
                             bool keep_exact_action) {
  if (failing())  stop();
  if (stopped())  return; // trap reachable?

  // Note:  If ProfileTraps is true, and if a deopt. actually
  // occurs here, the runtime will make sure an MDO exists.  There is
  // no need to call method()->ensure_method_data() at this point.

  // Set the stack pointer to the right value for reexecution:
  set_sp(reexecute_sp());

#ifdef ASSERT
  if (!must_throw) {
    // Make sure the stack has at least enough depth to execute
    // the current bytecode.
    int inputs, ignored_depth;
    if (compute_stack_effects(inputs, ignored_depth)) {
      assert(sp() >= inputs, err_msg_res("must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
             Bytecodes::name(java_bc()), sp(), inputs));
    }
  }
#endif

  Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
  Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);

  switch (action) {
  case Deoptimization::Action_maybe_recompile:
  case Deoptimization::Action_reinterpret:
    // Temporary fix for 6529811 to allow virtual calls to be sure they
    // get the chance to go from mono->bi->mega
    if (!keep_exact_action &&
        Deoptimization::trap_request_index(trap_request) < 0 &&
        too_many_recompiles(reason)) {
      // This BCI is causing too many recompilations.
      if (C->log() != NULL) {
        C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",
                Deoptimization::trap_reason_name(reason),
                Deoptimization::trap_action_name(action));
      }
      action = Deoptimization::Action_none;
      trap_request = Deoptimization::make_trap_request(reason, action);
    } else {
      C->set_trap_can_recompile(true);
    }
    break;
  case Deoptimization::Action_make_not_entrant:
    C->set_trap_can_recompile(true);
    break;
#ifdef ASSERT
  case Deoptimization::Action_none:
  case Deoptimization::Action_make_not_compilable:
    break;
  default:
    fatal(err_msg_res("unknown action %d: %s", action, Deoptimization::trap_action_name(action)));
    break;
#endif
  }

  if (TraceOptoParse) {
    char buf[100];
    tty->print_cr("Uncommon trap %s at bci:%d",
                  Deoptimization::format_trap_request(buf, sizeof(buf),
                                                      trap_request), bci());
  }

  CompileLog* log = C->log();
  if (log != NULL) {
    int kid = (klass == NULL)? -1: log->identify(klass);
    log->begin_elem("uncommon_trap bci='%d'", bci());
    char buf[100];
    log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
                                                          trap_request));
    if (kid >= 0)         log->print(" klass='%d'", kid);
    if (comment != NULL)  log->print(" comment='%s'", comment);
    log->end_elem();
  }

  // Make sure any guarding test views this path as very unlikely
  Node *i0 = control()->in(0);
  if (i0 != NULL && i0->is_If()) {        // Found a guarding if test?
    IfNode *iff = i0->as_If();
    float f = iff->_prob;   // Get prob
    if (control()->Opcode() == Op_IfTrue) {
      if (f > PROB_UNLIKELY_MAG(4))
        iff->_prob = PROB_MIN;
    } else {
      if (f < PROB_LIKELY_MAG(4))
        iff->_prob = PROB_MAX;
    }
  }

  // Clear out dead values from the debug info.
  kill_dead_locals();

  // Now insert the uncommon trap subroutine call
  address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
  const TypePtr* no_memory_effects = NULL;
  // Pass the index of the class to be loaded
  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
                                 (must_throw ? RC_MUST_THROW : 0),
                                 OptoRuntime::uncommon_trap_Type(),
                                 call_addr, "uncommon_trap", no_memory_effects,
                                 intcon(trap_request));
  assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,
         "must extract request correctly from the graph");
  assert(trap_request != 0, "zero value reserved by uncommon_trap_request");

  call->set_req(TypeFunc::ReturnAdr, returnadr());
  // The debug info is the only real input to this call.

  // Halt-and-catch fire here.  The above call should never return!
  HaltNode* halt = new(C) HaltNode(control(), frameptr());
  _gvn.set_type_bottom(halt);
  root()->add_req(halt);

  stop_and_kill_map();
}


//--------------------------just_allocated_object------------------------------
// Report the object that was just allocated.
// It must be the case that there are no intervening safepoints.
// We use this to determine if an object is so "fresh" that
// it does not require card marks.
Node* GraphKit::just_allocated_object(Node* current_control) {
  if (C->recent_alloc_ctl() == current_control)
    return C->recent_alloc_obj();
  return NULL;
}


void GraphKit::round_double_arguments(ciMethod* dest_method) {
  // (Note:  TypeFunc::make has a cache that makes this fast.)
  const TypeFunc* tf    = TypeFunc::make(dest_method);
  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
  for (int j = 0; j < nargs; j++) {
    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
    if( targ->basic_type() == T_DOUBLE ) {
      // If any parameters are doubles, they must be rounded before
      // the call, dstore_rounding does gvn.transform
      Node *arg = argument(j);
      arg = dstore_rounding(arg);
      set_argument(j, arg);
    }
  }
}

/**
 * Record profiling data exact_kls for Node n with the type system so
 * that it can propagate it (speculation)
 *
 * @param n          node that the type applies to
 * @param exact_kls  type from profiling
 *
 * @return           node with improved type
 */
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
  const Type* current_type = _gvn.type(n);
  assert(UseTypeSpeculation, "type speculation must be on");

  const TypeOopPtr* speculative = current_type->speculative();

  if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
    const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
    const TypeOopPtr* xtype = tklass->as_instance_type();
    assert(xtype->klass_is_exact(), "Should be exact");
    // record the new speculative type's depth
    speculative = xtype->with_inline_depth(jvms()->depth());
  }

  if (speculative != current_type->speculative()) {
    // Build a type with a speculative type (what we think we know
    // about the type but will need a guard when we use it)
    const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
    // We're changing the type, we need a new CheckCast node to carry
    // the new type. The new type depends on the control: what
    // profiling tells us is only valid from here as far as we can
    // tell.
    Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
    cast = _gvn.transform(cast);
    replace_in_map(n, cast);
    n = cast;
  }

  return n;
}

/**
 * Record profiling data from receiver profiling at an invoke with the
 * type system so that it can propagate it (speculation)
 *
 * @param n  receiver node
 *
 * @return   node with improved type
 */
Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
  if (!UseTypeSpeculation) {
    return n;
  }
  ciKlass* exact_kls = profile_has_unique_klass();
  return record_profile_for_speculation(n, exact_kls);
}

/**
 * Record profiling data from argument profiling at an invoke with the
 * type system so that it can propagate it (speculation)
 *
 * @param dest_method  target method for the call
 * @param bc           what invoke bytecode is this?
 */
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
  if (!UseTypeSpeculation) {
    return;
  }
  const TypeFunc* tf    = TypeFunc::make(dest_method);
  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
  int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
  for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
    if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
      ciKlass* better_type = method()->argument_profiled_type(bci(), i);
      if (better_type != NULL) {
        record_profile_for_speculation(argument(j), better_type);
      }
      i++;
    }
  }
}

/**
 * Record profiling data from parameter profiling at an invoke with
 * the type system so that it can propagate it (speculation)
 */
void GraphKit::record_profiled_parameters_for_speculation() {
  if (!UseTypeSpeculation) {
    return;
  }
  for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
    if (_gvn.type(local(i))->isa_oopptr()) {
      ciKlass* better_type = method()->parameter_profiled_type(j);
      if (better_type != NULL) {
        record_profile_for_speculation(local(i), better_type);
      }
      j++;
    }
  }
}

void GraphKit::round_double_result(ciMethod* dest_method) {
  // A non-strict method may return a double value which has an extended
  // exponent, but this must not be visible in a caller which is 'strict'
  // If a strict caller invokes a non-strict callee, round a double result

  BasicType result_type = dest_method->return_type()->basic_type();
  assert( method() != NULL, "must have caller context");
  if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
    // Destination method's return value is on top of stack
    // dstore_rounding() does gvn.transform
    Node *result = pop_pair();
    result = dstore_rounding(result);
    push_pair(result);
  }
}

// rounding for strict float precision conformance
Node* GraphKit::precision_rounding(Node* n) {
  return UseStrictFP && _method->flags().is_strict()
    && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding
    ? _gvn.transform( new (C) RoundFloatNode(0, n) )
    : n;
}

// rounding for strict double precision conformance
Node* GraphKit::dprecision_rounding(Node *n) {
  return UseStrictFP && _method->flags().is_strict()
    && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding
    ? _gvn.transform( new (C) RoundDoubleNode(0, n) )
    : n;
}

// rounding for non-strict double stores
Node* GraphKit::dstore_rounding(Node* n) {
  return Matcher::strict_fp_requires_explicit_rounding
    && UseSSE <= 1
    ? _gvn.transform( new (C) RoundDoubleNode(0, n) )
    : n;
}

//=============================================================================
// Generate a fast path/slow path idiom.  Graph looks like:
// [foo] indicates that 'foo' is a parameter
//
//              [in]     NULL
//                 \    /
//                  CmpP
//                  Bool ne
//                   If
//                  /  \
//              True    False-<2>
//              / |
//             /  cast_not_null
//           Load  |    |   ^
//        [fast_test]   |   |
// gvn to   opt_test    |   |
//          /    \      |  <1>
//      True     False  |
//        |         \\  |
//   [slow_call]     \[fast_result]
//    Ctl   Val       \      \
//     |               \      \
//    Catch       <1>   \      \
//   /    \        ^     \      \
//  Ex    No_Ex    |      \      \
//  |       \   \  |       \ <2>  \
//  ...      \  [slow_res] |  |    \   [null_result]
//            \         \--+--+---  |  |
//             \           | /    \ | /
//              --------Region     Phi
//
//=============================================================================
// Code is structured as a series of driver functions all called 'do_XXX' that
// call a set of helper functions.  Helper functions first, then drivers.

//------------------------------null_check_oop---------------------------------
// Null check oop.  Set null-path control into Region in slot 3.
// Make a cast-not-nullness use the other not-null control.  Return cast.
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
                               bool never_see_null, bool safe_for_replace) {
  // Initial NULL check taken path
  (*null_control) = top();
  Node* cast = null_check_common(value, T_OBJECT, false, null_control);

  // Generate uncommon_trap:
  if (never_see_null && (*null_control) != top()) {
    // If we see an unexpected null at a check-cast we record it and force a
    // recompile; the offending check-cast will be compiled to handle NULLs.
    // If we see more than one offending BCI, then all checkcasts in the
    // method will be compiled to handle NULLs.
    PreserveJVMState pjvms(this);
    set_control(*null_control);
    replace_in_map(value, null());
    uncommon_trap(Deoptimization::Reason_null_check,
                  Deoptimization::Action_make_not_entrant);
    (*null_control) = top();    // NULL path is dead
  }
  if ((*null_control) == top() && safe_for_replace) {
    replace_in_map(value, cast);
  }

  // Cast away null-ness on the result
  return cast;
}

//------------------------------opt_iff----------------------------------------
// Optimize the fast-check IfNode.  Set the fast-path region slot 2.
// Return slow-path control.
Node* GraphKit::opt_iff(Node* region, Node* iff) {
  IfNode *opt_iff = _gvn.transform(iff)->as_If();

  // Fast path taken; set region slot 2
  Node *fast_taken = _gvn.transform( new (C) IfFalseNode(opt_iff) );
  region->init_req(2,fast_taken); // Capture fast-control

  // Fast path not-taken, i.e. slow path
  Node *slow_taken = _gvn.transform( new (C) IfTrueNode(opt_iff) );
  return slow_taken;
}

//-----------------------------make_runtime_call-------------------------------
Node* GraphKit::make_runtime_call(int flags,
                                  const TypeFunc* call_type, address call_addr,
                                  const char* call_name,
                                  const TypePtr* adr_type,
                                  // The following parms are all optional.
                                  // The first NULL ends the list.
                                  Node* parm0, Node* parm1,
                                  Node* parm2, Node* parm3,
                                  Node* parm4, Node* parm5,
                                  Node* parm6, Node* parm7) {
  // Slow-path call
  bool is_leaf = !(flags & RC_NO_LEAF);
  bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
  if (call_name == NULL) {
    assert(!is_leaf, "must supply name for leaf");
    call_name = OptoRuntime::stub_name(call_addr);
  }
  CallNode* call;
  if (!is_leaf) {
    call = new(C) CallStaticJavaNode(call_type, call_addr, call_name,
                                           bci(), adr_type);
  } else if (flags & RC_NO_FP) {
    call = new(C) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
  } else {
    call = new(C) CallLeafNode(call_type, call_addr, call_name, adr_type);
  }

  // The following is similar to set_edges_for_java_call,
  // except that the memory effects of the call are restricted to AliasIdxRaw.

  // Slow path call has no side-effects, uses few values
  bool wide_in  = !(flags & RC_NARROW_MEM);
  bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);

  Node* prev_mem = NULL;
  if (wide_in) {
    prev_mem = set_predefined_input_for_runtime_call(call);
  } else {
    assert(!wide_out, "narrow in => narrow out");
    Node* narrow_mem = memory(adr_type);
    prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
  }

  // Hook each parm in order.  Stop looking at the first NULL.
  if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
  if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
  if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
  if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
  if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
  if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
  if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
    /* close each nested if ===> */  } } } } } } } }
  assert(call->in(call->req()-1) != NULL, "must initialize all parms");

  if (!is_leaf) {
    // Non-leaves can block and take safepoints:
    add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));
  }
  // Non-leaves can throw exceptions:
  if (has_io) {
    call->set_req(TypeFunc::I_O, i_o());
  }

  if (flags & RC_UNCOMMON) {
    // Set the count to a tiny probability.  Cf. Estimate_Block_Frequency.
    // (An "if" probability corresponds roughly to an unconditional count.
    // Sort of.)
    call->set_cnt(PROB_UNLIKELY_MAG(4));
  }

  Node* c = _gvn.transform(call);
  assert(c == call, "cannot disappear");

  if (wide_out) {
    // Slow path call has full side-effects.
    set_predefined_output_for_runtime_call(call);
  } else {
    // Slow path call has few side-effects, and/or sets few values.
    set_predefined_output_for_runtime_call(call, prev_mem, adr_type);
  }

  if (has_io) {
    set_i_o(_gvn.transform(new (C) ProjNode(call, TypeFunc::I_O)));
  }
  return call;

}

//------------------------------merge_memory-----------------------------------
// Merge memory from one path into the current memory state.
void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
  for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
    Node* old_slice = mms.force_memory();
    Node* new_slice = mms.memory2();
    if (old_slice != new_slice) {
      PhiNode* phi;
      if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
        if (mms.is_empty()) {
          // clone base memory Phi's inputs for this memory slice
          assert(old_slice == mms.base_memory(), "sanity");
          phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));
          _gvn.set_type(phi, Type::MEMORY);
          for (uint i = 1; i < phi->req(); i++) {
            phi->init_req(i, old_slice->in(i));
          }
        } else {
          phi = old_slice->as_Phi(); // Phi was generated already
        }
      } else {
        phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
        _gvn.set_type(phi, Type::MEMORY);
      }
      phi->set_req(new_path, new_slice);
      mms.set_memory(phi);
    }
  }
}

//------------------------------make_slow_call_ex------------------------------
// Make the exception handler hookups for the slow call
void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) {
  if (stopped())  return;

  // Make a catch node with just two handlers:  fall-through and catch-all
  Node* i_o  = _gvn.transform( new (C) ProjNode(call, TypeFunc::I_O, separate_io_proj) );
  Node* catc = _gvn.transform( new (C) CatchNode(control(), i_o, 2) );
  Node* norm = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );
  Node* excp = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci) );

  { PreserveJVMState pjvms(this);
    set_control(excp);
    set_i_o(i_o);

    if (excp != top()) {
      if (deoptimize) {
        // Deoptimize if an exception is caught. Don't construct exception state in this case.
        uncommon_trap(Deoptimization::Reason_unhandled,
                      Deoptimization::Action_none);
      } else {
        // Create an exception state also.
        // Use an exact type if the caller has specified a specific exception.
        const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
        Node*       ex_oop  = new (C) CreateExNode(ex_type, control(), i_o);
        add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
      }
    }
  }

  // Get the no-exception control from the CatchNode.
  set_control(norm);
}


//-------------------------------gen_subtype_check-----------------------------
// Generate a subtyping check.  Takes as input the subtype and supertype.
// Returns 2 values: sets the default control() to the true path and returns
// the false path.  Only reads invariant memory; sets no (visible) memory.
// The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
// but that's not exposed to the optimizer.  This call also doesn't take in an
// Object; if you wish to check an Object you need to load the Object's class
// prior to coming here.
Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) {
  // Fast check for identical types, perhaps identical constants.
  // The types can even be identical non-constants, in cases
  // involving Array.newInstance, Object.clone, etc.
  if (subklass == superklass)
    return top();             // false path is dead; no test needed.

  if (_gvn.type(superklass)->singleton()) {
    ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
    ciKlass* subk   = _gvn.type(subklass)->is_klassptr()->klass();

    // In the common case of an exact superklass, try to fold up the
    // test before generating code.  You may ask, why not just generate
    // the code and then let it fold up?  The answer is that the generated
    // code will necessarily include null checks, which do not always
    // completely fold away.  If they are also needless, then they turn
    // into a performance loss.  Example:
    //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
    // Here, the type of 'fa' is often exact, so the store check
    // of fa[1]=x will fold up, without testing the nullness of x.
    switch (static_subtype_check(superk, subk)) {
    case SSC_always_false:
      {
        Node* always_fail = control();
        set_control(top());
        return always_fail;
      }
    case SSC_always_true:
      return top();
    case SSC_easy_test:
      {
        // Just do a direct pointer compare and be done.
        Node* cmp = _gvn.transform( new(C) CmpPNode(subklass, superklass) );
        Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
        IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
        set_control( _gvn.transform( new(C) IfTrueNode (iff) ) );
        return       _gvn.transform( new(C) IfFalseNode(iff) );
      }
    case SSC_full_test:
      break;
    default:
      ShouldNotReachHere();
    }
  }

  // %%% Possible further optimization:  Even if the superklass is not exact,
  // if the subklass is the unique subtype of the superklass, the check
  // will always succeed.  We could leave a dependency behind to ensure this.

  // First load the super-klass's check-offset
  Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
  Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
                                                   TypeInt::INT, MemNode::unordered));
  int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
  bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);

  // Load from the sub-klass's super-class display list, or a 1-word cache of
  // the secondary superclass list, or a failing value with a sentinel offset
  // if the super-klass is an interface or exceptionally deep in the Java
  // hierarchy and we have to scan the secondary superclass list the hard way.
  // Worst-case type is a little odd: NULL is allowed as a result (usually
  // klass loads can never produce a NULL).
  Node *chk_off_X = ConvI2X(chk_off);
  Node *p2 = _gvn.transform( new (C) AddPNode(subklass,subklass,chk_off_X) );
  // For some types like interfaces the following loadKlass is from a 1-word
  // cache which is mutable so can't use immutable memory.  Other
  // types load from the super-class display table which is immutable.
  Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
  Node* nkls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));

  // Compile speed common case: ARE a subtype and we canNOT fail
  if( superklass == nkls )
    return top();             // false path is dead; no test needed.

  // See if we get an immediate positive hit.  Happens roughly 83% of the
  // time.  Test to see if the value loaded just previously from the subklass
  // is exactly the superklass.
  Node *cmp1 = _gvn.transform( new (C) CmpPNode( superklass, nkls ) );
  Node *bol1 = _gvn.transform( new (C) BoolNode( cmp1, BoolTest::eq ) );
  IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN );
  Node *iftrue1 = _gvn.transform( new (C) IfTrueNode ( iff1 ) );
  set_control(    _gvn.transform( new (C) IfFalseNode( iff1 ) ) );

  // Compile speed common case: Check for being deterministic right now.  If
  // chk_off is a constant and not equal to cacheoff then we are NOT a
  // subklass.  In this case we need exactly the 1 test above and we can
  // return those results immediately.
  if (!might_be_cache) {
    Node* not_subtype_ctrl = control();
    set_control(iftrue1); // We need exactly the 1 test above
    return not_subtype_ctrl;
  }

  // Gather the various success & failures here
  RegionNode *r_ok_subtype = new (C) RegionNode(4);
  record_for_igvn(r_ok_subtype);
  RegionNode *r_not_subtype = new (C) RegionNode(3);
  record_for_igvn(r_not_subtype);

  r_ok_subtype->init_req(1, iftrue1);

  // Check for immediate negative hit.  Happens roughly 11% of the time (which
  // is roughly 63% of the remaining cases).  Test to see if the loaded
  // check-offset points into the subklass display list or the 1-element
  // cache.  If it points to the display (and NOT the cache) and the display
  // missed then it's not a subtype.
  Node *cacheoff = _gvn.intcon(cacheoff_con);
  Node *cmp2 = _gvn.transform( new (C) CmpINode( chk_off, cacheoff ) );
  Node *bol2 = _gvn.transform( new (C) BoolNode( cmp2, BoolTest::ne ) );
  IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN );
  r_not_subtype->init_req(1, _gvn.transform( new (C) IfTrueNode (iff2) ) );
  set_control(                _gvn.transform( new (C) IfFalseNode(iff2) ) );

  // Check for self.  Very rare to get here, but it is taken 1/3 the time.
  // No performance impact (too rare) but allows sharing of secondary arrays
  // which has some footprint reduction.
  Node *cmp3 = _gvn.transform( new (C) CmpPNode( subklass, superklass ) );
  Node *bol3 = _gvn.transform( new (C) BoolNode( cmp3, BoolTest::eq ) );
  IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN );
  r_ok_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode ( iff3 ) ) );
  set_control(               _gvn.transform( new (C) IfFalseNode( iff3 ) ) );

  // -- Roads not taken here: --
  // We could also have chosen to perform the self-check at the beginning
  // of this code sequence, as the assembler does.  This would not pay off
  // the same way, since the optimizer, unlike the assembler, can perform
  // static type analysis to fold away many successful self-checks.
  // Non-foldable self checks work better here in second position, because
  // the initial primary superclass check subsumes a self-check for most
  // types.  An exception would be a secondary type like array-of-interface,
  // which does not appear in its own primary supertype display.
  // Finally, we could have chosen to move the self-check into the
  // PartialSubtypeCheckNode, and from there out-of-line in a platform
  // dependent manner.  But it is worthwhile to have the check here,
  // where it can be perhaps be optimized.  The cost in code space is
  // small (register compare, branch).

  // Now do a linear scan of the secondary super-klass array.  Again, no real
  // performance impact (too rare) but it's gotta be done.
  // Since the code is rarely used, there is no penalty for moving it
  // out of line, and it can only improve I-cache density.
  // The decision to inline or out-of-line this final check is platform
  // dependent, and is found in the AD file definition of PartialSubtypeCheck.
  Node* psc = _gvn.transform(
    new (C) PartialSubtypeCheckNode(control(), subklass, superklass) );

  Node *cmp4 = _gvn.transform( new (C) CmpPNode( psc, null() ) );
  Node *bol4 = _gvn.transform( new (C) BoolNode( cmp4, BoolTest::ne ) );
  IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN );
  r_not_subtype->init_req(2, _gvn.transform( new (C) IfTrueNode (iff4) ) );
  r_ok_subtype ->init_req(3, _gvn.transform( new (C) IfFalseNode(iff4) ) );

  // Return false path; set default control to true path.
  set_control( _gvn.transform(r_ok_subtype) );
  return _gvn.transform(r_not_subtype);
}

//----------------------------static_subtype_check-----------------------------
// Shortcut important common cases when superklass is exact:
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) {
  if (StressReflectiveCode) {
    return SSC_full_test;       // Let caller generate the general case.
  }

  if (superk == env()->Object_klass()) {
    return SSC_always_true;     // (0) this test cannot fail
  }

  ciType* superelem = superk;
  if (superelem->is_array_klass())
    superelem = superelem->as_array_klass()->base_element_type();

  if (!subk->is_interface()) {  // cannot trust static interface types yet
    if (subk->is_subtype_of(superk)) {
      return SSC_always_true;   // (1) false path dead; no dynamic test needed
    }
    if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
        !superk->is_subtype_of(subk)) {
      return SSC_always_false;
    }
  }

  // If casting to an instance klass, it must have no subtypes
  if (superk->is_interface()) {
    // Cannot trust interfaces yet.
    // %%% S.B. superk->nof_implementors() == 1
  } else if (superelem->is_instance_klass()) {
    ciInstanceKlass* ik = superelem->as_instance_klass();
    if (!ik->has_subklass() && !ik->is_interface()) {
      if (!ik->is_final()) {
        // Add a dependency if there is a chance of a later subclass.
        C->dependencies()->assert_leaf_type(ik);
      }
      return SSC_easy_test;     // (3) caller can do a simple ptr comparison
    }
  } else {
    // A primitive array type has no subtypes.
    return SSC_easy_test;       // (3) caller can do a simple ptr comparison
  }

  return SSC_full_test;
}

// Profile-driven exact type check:
Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
                                    float prob,
                                    Node* *casted_receiver) {
  const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
  Node* recv_klass = load_object_klass(receiver);
  Node* want_klass = makecon(tklass);
  Node* cmp = _gvn.transform( new(C) CmpPNode(recv_klass, want_klass) );
  Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) );
  IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
  set_control( _gvn.transform( new(C) IfTrueNode (iff) ));
  Node* fail = _gvn.transform( new(C) IfFalseNode(iff) );

  const TypeOopPtr* recv_xtype = tklass->as_instance_type();
  assert(recv_xtype->klass_is_exact(), "");

  // Subsume downstream occurrences of receiver with a cast to
  // recv_xtype, since now we know what the type will be.
  Node* cast = new(C) CheckCastPPNode(control(), receiver, recv_xtype);
  (*casted_receiver) = _gvn.transform(cast);
  // (User must make the replace_in_map call.)

  return fail;
}


//------------------------------seems_never_null-------------------------------
// Use null_seen information if it is available from the profile.
// If we see an unexpected null at a type check we record it and force a
// recompile; the offending check will be recompiled to handle NULLs.
// If we see several offending BCIs, then all checks in the
// method will be recompiled.
bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
  if (UncommonNullCast               // Cutout for this technique
      && obj != null()               // And not the -Xcomp stupid case?
      && !too_many_traps(Deoptimization::Reason_null_check)
      ) {
    if (data == NULL)
      // Edge case:  no mature data.  Be optimistic here.
      return true;
    // If the profile has not seen a null, assume it won't happen.
    assert(java_bc() == Bytecodes::_checkcast ||
           java_bc() == Bytecodes::_instanceof ||
           java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
    return !data->as_BitData()->null_seen();
  }
  return false;
}

//------------------------maybe_cast_profiled_receiver-------------------------
// If the profile has seen exactly one type, narrow to exactly that type.
// Subsequent type checks will always fold up.
Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
                                             ciKlass* require_klass,
                                             ciKlass* spec_klass,
                                             bool safe_for_replace) {
  if (!UseTypeProfile || !TypeProfileCasts) return NULL;

  Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;

  // Make sure we haven't already deoptimized from this tactic.
  if (too_many_traps(reason) || too_many_recompiles(reason))
    return NULL;

  // (No, this isn't a call, but it's enough like a virtual call
  // to use the same ciMethod accessor to get the profile info...)
  // If we have a speculative type use it instead of profiling (which
  // may not help us)
  ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
  if (exact_kls != NULL) {// no cast failures here
    if (require_klass == NULL ||
        static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
      // If we narrow the type to match what the type profile sees or
      // the speculative type, we can then remove the rest of the
      // cast.
      // This is a win, even if the exact_kls is very specific,
      // because downstream operations, such as method calls,
      // will often benefit from the sharper type.
      Node* exact_obj = not_null_obj; // will get updated in place...
      Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
                                            &exact_obj);
      { PreserveJVMState pjvms(this);
        set_control(slow_ctl);
        uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
      }
      if (safe_for_replace) {
        replace_in_map(not_null_obj, exact_obj);
      }
      return exact_obj;
    }
    // assert(ssc == SSC_always_true)... except maybe the profile lied to us.
  }

  return NULL;
}

/**
 * Cast obj to type and emit guard unless we had too many traps here
 * already
 *
 * @param obj       node being casted
 * @param type      type to cast the node to
 * @param not_null  true if we know node cannot be null
 */
Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
                                        ciKlass* type,
                                        bool not_null) {
  // type == NULL if profiling tells us this object is always null
  if (type != NULL) {
    Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
    Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
    if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) &&
        !too_many_traps(class_reason) && !too_many_recompiles(class_reason)) {
      Node* not_null_obj = NULL;
      // not_null is true if we know the object is not null and
      // there's no need for a null check
      if (!not_null) {
        Node* null_ctl = top();
        not_null_obj = null_check_oop(obj, &null_ctl, true, true);
        assert(null_ctl->is_top(), "no null control here");
      } else {
        not_null_obj = obj;
      }

      Node* exact_obj = not_null_obj;
      ciKlass* exact_kls = type;
      Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
                                            &exact_obj);
      {
        PreserveJVMState pjvms(this);
        set_control(slow_ctl);
        uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
      }
      replace_in_map(not_null_obj, exact_obj);
      obj = exact_obj;
    }
  } else {
    if (!too_many_traps(Deoptimization::Reason_null_assert) &&
        !too_many_recompiles(Deoptimization::Reason_null_assert)) {
      Node* exact_obj = null_assert(obj);
      replace_in_map(obj, exact_obj);
      obj = exact_obj;
    }
  }
  return obj;
}

//-------------------------------gen_instanceof--------------------------------
// Generate an instance-of idiom.  Used by both the instance-of bytecode
// and the reflective instance-of call.
Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
  kill_dead_locals();           // Benefit all the uncommon traps
  assert( !stopped(), "dead parse path should be checked in callers" );
  assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
         "must check for not-null not-dead klass in callers");

  // Make the merge point
  enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
  RegionNode* region = new(C) RegionNode(PATH_LIMIT);
  Node*       phi    = new(C) PhiNode(region, TypeInt::BOOL);
  C->set_has_split_ifs(true); // Has chance for split-if optimization

  ciProfileData* data = NULL;
  if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
    data = method()->method_data()->bci_to_data(bci());
  }
  bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
                         && seems_never_null(obj, data));

  // Null check; get casted pointer; set region slot 3
  Node* null_ctl = top();
  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);

  // If not_null_obj is dead, only null-path is taken
  if (stopped()) {              // Doing instance-of on a NULL?
    set_control(null_ctl);
    return intcon(0);
  }
  region->init_req(_null_path, null_ctl);
  phi   ->init_req(_null_path, intcon(0)); // Set null path value
  if (null_ctl == top()) {
    // Do this eagerly, so that pattern matches like is_diamond_phi
    // will work even during parsing.
    assert(_null_path == PATH_LIMIT-1, "delete last");
    region->del_req(_null_path);
    phi   ->del_req(_null_path);
  }

  // Do we know the type check always succeed?
  bool known_statically = false;
  if (_gvn.type(superklass)->singleton()) {
    ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
    ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
    if (subk != NULL && subk->is_loaded()) {
      int static_res = static_subtype_check(superk, subk);
      known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
    }
  }

  if (known_statically && UseTypeSpeculation) {
    // If we know the type check always succeeds then we don't use the
    // profiling data at this bytecode. Don't lose it, feed it to the
    // type system as a speculative type.
    not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
  } else {
    const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
    // We may not have profiling here or it may not help us. If we
    // have a speculative type use it to perform an exact cast.
    ciKlass* spec_obj_type = obj_type->speculative_type();
    if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
      Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
      if (stopped()) {            // Profile disagrees with this path.
        set_control(null_ctl);    // Null is the only remaining possibility.
        return intcon(0);
      }
      if (cast_obj != NULL) {
        not_null_obj = cast_obj;
      }
    }
  }

  // Load the object's klass
  Node* obj_klass = load_object_klass(not_null_obj);

  // Generate the subtype check
  Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);

  // Plug in the success path to the general merge in slot 1.
  region->init_req(_obj_path, control());
  phi   ->init_req(_obj_path, intcon(1));

  // Plug in the failing path to the general merge in slot 2.
  region->init_req(_fail_path, not_subtype_ctrl);
  phi   ->init_req(_fail_path, intcon(0));

  // Return final merged results
  set_control( _gvn.transform(region) );
  record_for_igvn(region);
  return _gvn.transform(phi);
}

//-------------------------------gen_checkcast---------------------------------
// Generate a checkcast idiom.  Used by both the checkcast bytecode and the
// array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
// uncommon-trap paths work.  Adjust stack after this call.
// If failure_control is supplied and not null, it is filled in with
// the control edge for the cast failure.  Otherwise, an appropriate
// uncommon trap or exception is thrown.
Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
                              Node* *failure_control) {
  kill_dead_locals();           // Benefit all the uncommon traps
  const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
  const Type *toop = TypeOopPtr::make_from_klass(tk->klass());

  // Fast cutout:  Check the case that the cast is vacuously true.
  // This detects the common cases where the test will short-circuit
  // away completely.  We do this before we perform the null check,
  // because if the test is going to turn into zero code, we don't
  // want a residual null check left around.  (Causes a slowdown,
  // for example, in some objArray manipulations, such as a[i]=a[j].)
  if (tk->singleton()) {
    const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
    if (objtp != NULL && objtp->klass() != NULL) {
      switch (static_subtype_check(tk->klass(), objtp->klass())) {
      case SSC_always_true:
        // If we know the type check always succeed then we don't use
        // the profiling data at this bytecode. Don't lose it, feed it
        // to the type system as a speculative type.
        return record_profiled_receiver_for_speculation(obj);
      case SSC_always_false:
        // It needs a null check because a null will *pass* the cast check.
        // A non-null value will always produce an exception.
        return null_assert(obj);
      }
    }
  }

  ciProfileData* data = NULL;
  bool safe_for_replace = false;
  if (failure_control == NULL) {        // use MDO in regular case only
    assert(java_bc() == Bytecodes::_aastore ||
           java_bc() == Bytecodes::_checkcast,
           "interpreter profiles type checks only for these BCs");
    data = method()->method_data()->bci_to_data(bci());
    safe_for_replace = true;
  }

  // Make the merge point
  enum { _obj_path = 1, _null_path, PATH_LIMIT };
  RegionNode* region = new (C) RegionNode(PATH_LIMIT);
  Node*       phi    = new (C) PhiNode(region, toop);
  C->set_has_split_ifs(true); // Has chance for split-if optimization

  // Use null-cast information if it is available
  bool never_see_null = ((failure_control == NULL)  // regular case only
                         && seems_never_null(obj, data));

  // Null check; get casted pointer; set region slot 3
  Node* null_ctl = top();
  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);

  // If not_null_obj is dead, only null-path is taken
  if (stopped()) {              // Doing instance-of on a NULL?
    set_control(null_ctl);
    return null();
  }
  region->init_req(_null_path, null_ctl);
  phi   ->init_req(_null_path, null());  // Set null path value
  if (null_ctl == top()) {
    // Do this eagerly, so that pattern matches like is_diamond_phi
    // will work even during parsing.
    assert(_null_path == PATH_LIMIT-1, "delete last");
    region->del_req(_null_path);
    phi   ->del_req(_null_path);
  }

  Node* cast_obj = NULL;
  if (tk->klass_is_exact()) {
    // The following optimization tries to statically cast the speculative type of the object
    // (for example obtained during profiling) to the type of the superklass and then do a
    // dynamic check that the type of the object is what we expect. To work correctly
    // for checkcast and aastore the type of superklass should be exact.
    const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
    // We may not have profiling here or it may not help us. If we have
    // a speculative type use it to perform an exact cast.
    ciKlass* spec_obj_type = obj_type->speculative_type();
    if (spec_obj_type != NULL ||
        (data != NULL &&
         // Counter has never been decremented (due to cast failure).
         // ...This is a reasonable thing to expect.  It is true of
         // all casts inserted by javac to implement generic types.
         data->as_CounterData()->count() >= 0)) {
      cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
      if (cast_obj != NULL) {
        if (failure_control != NULL) // failure is now impossible
          (*failure_control) = top();
        // adjust the type of the phi to the exact klass:
        phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
      }
    }
  }

  if (cast_obj == NULL) {
    // Load the object's klass
    Node* obj_klass = load_object_klass(not_null_obj);

    // Generate the subtype check
    Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass );

    // Plug in success path into the merge
    cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(),
                                                         not_null_obj, toop));
    // Failure path ends in uncommon trap (or may be dead - failure impossible)
    if (failure_control == NULL) {
      if (not_subtype_ctrl != top()) { // If failure is possible
        PreserveJVMState pjvms(this);
        set_control(not_subtype_ctrl);
        builtin_throw(Deoptimization::Reason_class_check, obj_klass);
      }
    } else {
      (*failure_control) = not_subtype_ctrl;
    }
  }

  region->init_req(_obj_path, control());
  phi   ->init_req(_obj_path, cast_obj);

  // A merge of NULL or Casted-NotNull obj
  Node* res = _gvn.transform(phi);

  // Note I do NOT always 'replace_in_map(obj,result)' here.
  //  if( tk->klass()->can_be_primary_super()  )
    // This means that if I successfully store an Object into an array-of-String
    // I 'forget' that the Object is really now known to be a String.  I have to
    // do this because we don't have true union types for interfaces - if I store
    // a Baz into an array-of-Interface and then tell the optimizer it's an
    // Interface, I forget that it's also a Baz and cannot do Baz-like field
    // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
  //  replace_in_map( obj, res );

  // Return final merged results
  set_control( _gvn.transform(region) );
  record_for_igvn(region);
  return res;
}

//------------------------------next_monitor-----------------------------------
// What number should be given to the next monitor?
int GraphKit::next_monitor() {
  int current = jvms()->monitor_depth()* C->sync_stack_slots();
  int next = current + C->sync_stack_slots();
  // Keep the toplevel high water mark current:
  if (C->fixed_slots() < next)  C->set_fixed_slots(next);
  return current;
}

//------------------------------insert_mem_bar---------------------------------
// Memory barrier to avoid floating things around
// The membar serves as a pinch point between both control and all memory slices.
Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
  MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
  mb->init_req(TypeFunc::Control, control());
  mb->init_req(TypeFunc::Memory,  reset_memory());
  Node* membar = _gvn.transform(mb);
  set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
  set_all_memory_call(membar);
  return membar;
}

//-------------------------insert_mem_bar_volatile----------------------------
// Memory barrier to avoid floating things around
// The membar serves as a pinch point between both control and memory(alias_idx).
// If you want to make a pinch point on all memory slices, do not use this
// function (even with AliasIdxBot); use insert_mem_bar() instead.
Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
  // When Parse::do_put_xxx updates a volatile field, it appends a series
  // of MemBarVolatile nodes, one for *each* volatile field alias category.
  // The first membar is on the same memory slice as the field store opcode.
  // This forces the membar to follow the store.  (Bug 6500685 broke this.)
  // All the other membars (for other volatile slices, including AliasIdxBot,
  // which stands for all unknown volatile slices) are control-dependent
  // on the first membar.  This prevents later volatile loads or stores
  // from sliding up past the just-emitted store.

  MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
  mb->set_req(TypeFunc::Control,control());
  if (alias_idx == Compile::AliasIdxBot) {
    mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
  } else {
    assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
    mb->set_req(TypeFunc::Memory, memory(alias_idx));
  }
  Node* membar = _gvn.transform(mb);
  set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
  if (alias_idx == Compile::AliasIdxBot) {
    merged_memory()->set_base_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory)));
  } else {
    set_memory(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Memory)),alias_idx);
  }
  return membar;
}

//------------------------------shared_lock------------------------------------
// Emit locking code.
FastLockNode* GraphKit::shared_lock(Node* obj) {
  // bci is either a monitorenter bc or InvocationEntryBci
  // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  assert(SynchronizationEntryBCI == InvocationEntryBci, "");

  if( !GenerateSynchronizationCode )
    return NULL;                // Not locking things?
  if (stopped())                // Dead monitor?
    return NULL;

  assert(dead_locals_are_killed(), "should kill locals before sync. point");

  // Box the stack location
  Node* box = _gvn.transform(new (C) BoxLockNode(next_monitor()));
  Node* mem = reset_memory();

  FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
  if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
    // Create the counters for this fast lock.
    flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
  }

  // Create the rtm counters for this fast lock if needed.
  flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci

  // Add monitor to debug info for the slow path.  If we block inside the
  // slow path and de-opt, we need the monitor hanging around
  map()->push_monitor( flock );

  const TypeFunc *tf = LockNode::lock_type();
  LockNode *lock = new (C) LockNode(C, tf);

  lock->init_req( TypeFunc::Control, control() );
  lock->init_req( TypeFunc::Memory , mem );
  lock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  lock->init_req( TypeFunc::FramePtr, frameptr() );
  lock->init_req( TypeFunc::ReturnAdr, top() );

  lock->init_req(TypeFunc::Parms + 0, obj);
  lock->init_req(TypeFunc::Parms + 1, box);
  lock->init_req(TypeFunc::Parms + 2, flock);
  add_safepoint_edges(lock);

  lock = _gvn.transform( lock )->as_Lock();

  // lock has no side-effects, sets few values
  set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);

  insert_mem_bar(Op_MemBarAcquireLock);

  // Add this to the worklist so that the lock can be eliminated
  record_for_igvn(lock);

#ifndef PRODUCT
  if (PrintLockStatistics) {
    // Update the counter for this lock.  Don't bother using an atomic
    // operation since we don't require absolute accuracy.
    lock->create_lock_counter(map()->jvms());
    increment_counter(lock->counter()->addr());
  }
#endif

  return flock;
}


//------------------------------shared_unlock----------------------------------
// Emit unlocking code.
void GraphKit::shared_unlock(Node* box, Node* obj) {
  // bci is either a monitorenter bc or InvocationEntryBci
  // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  assert(SynchronizationEntryBCI == InvocationEntryBci, "");

  if( !GenerateSynchronizationCode )
    return;
  if (stopped()) {               // Dead monitor?
    map()->pop_monitor();        // Kill monitor from debug info
    return;
  }

  // Memory barrier to avoid floating things down past the locked region
  insert_mem_bar(Op_MemBarReleaseLock);

  const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
  UnlockNode *unlock = new (C) UnlockNode(C, tf);
#ifdef ASSERT
  unlock->set_dbg_jvms(sync_jvms());
#endif
  uint raw_idx = Compile::AliasIdxRaw;
  unlock->init_req( TypeFunc::Control, control() );
  unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
  unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  unlock->init_req( TypeFunc::FramePtr, frameptr() );
  unlock->init_req( TypeFunc::ReturnAdr, top() );

  unlock->init_req(TypeFunc::Parms + 0, obj);
  unlock->init_req(TypeFunc::Parms + 1, box);
  unlock = _gvn.transform(unlock)->as_Unlock();

  Node* mem = reset_memory();

  // unlock has no side-effects, sets few values
  set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);

  // Kill monitor from debug info
  map()->pop_monitor( );
}

//-------------------------------get_layout_helper-----------------------------
// If the given klass is a constant or known to be an array,
// fetch the constant layout helper value into constant_value
// and return (Node*)NULL.  Otherwise, load the non-constant
// layout helper value, and return the node which represents it.
// This two-faced routine is useful because allocation sites
// almost always feature constant types.
Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
  const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
  if (!StressReflectiveCode && inst_klass != NULL) {
    ciKlass* klass = inst_klass->klass();
    bool    xklass = inst_klass->klass_is_exact();
    if (xklass || klass->is_array_klass()) {
      jint lhelper = klass->layout_helper();
      if (lhelper != Klass::_lh_neutral_value) {
        constant_value = lhelper;
        return (Node*) NULL;
      }
    }
  }
  constant_value = Klass::_lh_neutral_value;  // put in a known value
  Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
  return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
}

// We just put in an allocate/initialize with a big raw-memory effect.
// Hook selected additional alias categories on the initialization.
static void hook_memory_on_init(GraphKit& kit, int alias_idx,
                                MergeMemNode* init_in_merge,
                                Node* init_out_raw) {
  DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
  assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");

  Node* prevmem = kit.memory(alias_idx);
  init_in_merge->set_memory_at(alias_idx, prevmem);
  kit.set_memory(init_out_raw, alias_idx);
}

//---------------------------set_output_for_allocation-------------------------
Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
                                          const TypeOopPtr* oop_type,
                                          bool deoptimize_on_exception) {
  int rawidx = Compile::AliasIdxRaw;
  alloc->set_req( TypeFunc::FramePtr, frameptr() );
  add_safepoint_edges(alloc);
  Node* allocx = _gvn.transform(alloc);
  set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) );
  // create memory projection for i_o
  set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
  make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);

  // create a memory projection as for the normal control path
  Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory));
  set_memory(malloc, rawidx);

  // a normal slow-call doesn't change i_o, but an allocation does
  // we create a separate i_o projection for the normal control path
  set_i_o(_gvn.transform( new (C) ProjNode(allocx, TypeFunc::I_O, false) ) );
  Node* rawoop = _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Parms) );

  // put in an initialization barrier
  InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
                                                 rawoop)->as_Initialize();
  assert(alloc->initialization() == init,  "2-way macro link must work");
  assert(init ->allocation()     == alloc, "2-way macro link must work");
  {
    // Extract memory strands which may participate in the new object's
    // initialization, and source them from the new InitializeNode.
    // This will allow us to observe initializations when they occur,
    // and link them properly (as a group) to the InitializeNode.
    assert(init->in(InitializeNode::Memory) == malloc, "");
    MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
    init->set_req(InitializeNode::Memory, minit_in);
    record_for_igvn(minit_in); // fold it up later, if possible
    Node* minit_out = memory(rawidx);
    assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
    if (oop_type->isa_aryptr()) {
      const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
      int            elemidx  = C->get_alias_index(telemref);
      hook_memory_on_init(*this, elemidx, minit_in, minit_out);
    } else if (oop_type->isa_instptr()) {
      ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
      for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
        ciField* field = ik->nonstatic_field_at(i);
        if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
          continue;  // do not bother to track really large numbers of fields
        // Find (or create) the alias category for this field:
        int fieldidx = C->alias_type(field)->index();
        hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
      }
    }
  }

  // Cast raw oop to the real thing...
  Node* javaoop = new (C) CheckCastPPNode(control(), rawoop, oop_type);
  javaoop = _gvn.transform(javaoop);
  C->set_recent_alloc(control(), javaoop);
  assert(just_allocated_object(control()) == javaoop, "just allocated");

#ifdef ASSERT
  { // Verify that the AllocateNode::Ideal_allocation recognizers work:
    assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
           "Ideal_allocation works");
    assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
           "Ideal_allocation works");
    if (alloc->is_AllocateArray()) {
      assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
             "Ideal_allocation works");
      assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
             "Ideal_allocation works");
    } else {
      assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
    }
  }
#endif //ASSERT

  return javaoop;
}

//---------------------------new_instance--------------------------------------
// This routine takes a klass_node which may be constant (for a static type)
// or may be non-constant (for reflective code).  It will work equally well
// for either, and the graph will fold nicely if the optimizer later reduces
// the type to a constant.
// The optional arguments are for specialized use by intrinsics:
//  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
//  - If 'return_size_val', report the the total object size to the caller.
//  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
Node* GraphKit::new_instance(Node* klass_node,
                             Node* extra_slow_test,
                             Node* *return_size_val,
                             bool deoptimize_on_exception) {
  // Compute size in doublewords
  // The size is always an integral number of doublewords, represented
  // as a positive bytewise size stored in the klass's layout_helper.
  // The layout_helper also encodes (in a low bit) the need for a slow path.
  jint  layout_con = Klass::_lh_neutral_value;
  Node* layout_val = get_layout_helper(klass_node, layout_con);
  int   layout_is_con = (layout_val == NULL);

  if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
  // Generate the initial go-slow test.  It's either ALWAYS (return a
  // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
  // case) a computed value derived from the layout_helper.
  Node* initial_slow_test = NULL;
  if (layout_is_con) {
    assert(!StressReflectiveCode, "stress mode does not use these paths");
    bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
    initial_slow_test = must_go_slow? intcon(1): extra_slow_test;

  } else {   // reflective case
    // This reflective path is used by Unsafe.allocateInstance.
    // (It may be stress-tested by specifying StressReflectiveCode.)
    // Basically, we want to get into the VM is there's an illegal argument.
    Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
    initial_slow_test = _gvn.transform( new (C) AndINode(layout_val, bit) );
    if (extra_slow_test != intcon(0)) {
      initial_slow_test = _gvn.transform( new (C) OrINode(initial_slow_test, extra_slow_test) );
    }
    // (Macro-expander will further convert this to a Bool, if necessary.)
  }

  // Find the size in bytes.  This is easy; it's the layout_helper.
  // The size value must be valid even if the slow path is taken.
  Node* size = NULL;
  if (layout_is_con) {
    size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));
  } else {   // reflective case
    // This reflective path is used by clone and Unsafe.allocateInstance.
    size = ConvI2X(layout_val);

    // Clear the low bits to extract layout_helper_size_in_bytes:
    assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
    Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
    size = _gvn.transform( new (C) AndXNode(size, mask) );
  }
  if (return_size_val != NULL) {
    (*return_size_val) = size;
  }

  // This is a precise notnull oop of the klass.
  // (Actually, it need not be precise if this is a reflective allocation.)
  // It's what we cast the result to.
  const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
  if (!tklass)  tklass = TypeKlassPtr::OBJECT;
  const TypeOopPtr* oop_type = tklass->as_instance_type();

  // Now generate allocation code

  // The entire memory state is needed for slow path of the allocation
  // since GC and deoptimization can happened.
  Node *mem = reset_memory();
  set_all_memory(mem); // Create new memory state

  AllocateNode* alloc
    = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
                           control(), mem, i_o(),
                           size, klass_node,
                           initial_slow_test);

  return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
}

//-------------------------------new_array-------------------------------------
// helper for both newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
// See comments on new_instance for the meaning of the other arguments.
Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
                          Node* length,         // number of array elements
                          int   nargs,          // number of arguments to push back for uncommon trap
                          Node* *return_size_val,
                          bool deoptimize_on_exception) {
  jint  layout_con = Klass::_lh_neutral_value;
  Node* layout_val = get_layout_helper(klass_node, layout_con);
  int   layout_is_con = (layout_val == NULL);

  if (!layout_is_con && !StressReflectiveCode &&
      !too_many_traps(Deoptimization::Reason_class_check)) {
    // This is a reflective array creation site.
    // Optimistically assume that it is a subtype of Object[],
    // so that we can fold up all the address arithmetic.
    layout_con = Klass::array_layout_helper(T_OBJECT);
    Node* cmp_lh = _gvn.transform( new(C) CmpINode(layout_val, intcon(layout_con)) );
    Node* bol_lh = _gvn.transform( new(C) BoolNode(cmp_lh, BoolTest::eq) );
    { BuildCutout unless(this, bol_lh, PROB_MAX);
      inc_sp(nargs);
      uncommon_trap(Deoptimization::Reason_class_check,
                    Deoptimization::Action_maybe_recompile);
    }
    layout_val = NULL;
    layout_is_con = true;
  }

  // Generate the initial go-slow test.  Make sure we do not overflow
  // if length is huge (near 2Gig) or negative!  We do not need
  // exact double-words here, just a close approximation of needed
  // double-words.  We can't add any offset or rounding bits, lest we
  // take a size -1 of bytes and make it positive.  Use an unsigned
  // compare, so negative sizes look hugely positive.
  int fast_size_limit = FastAllocateSizeLimit;
  if (layout_is_con) {
    assert(!StressReflectiveCode, "stress mode does not use these paths");
    // Increase the size limit if we have exact knowledge of array type.
    int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
    fast_size_limit <<= (LogBytesPerLong - log2_esize);
  }

  Node* initial_slow_cmp  = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) );
  Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) );

  // --- Size Computation ---
  // array_size = round_to_heap(array_header + (length << elem_shift));
  // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
  // and round_to(x, y) == ((x + y-1) & ~(y-1))
  // The rounding mask is strength-reduced, if possible.
  int round_mask = MinObjAlignmentInBytes - 1;
  Node* header_size = NULL;
  int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
  // (T_BYTE has the weakest alignment and size restrictions...)
  if (layout_is_con) {
    int       hsize  = Klass::layout_helper_header_size(layout_con);
    int       eshift = Klass::layout_helper_log2_element_size(layout_con);
    BasicType etype  = Klass::layout_helper_element_type(layout_con);
    if ((round_mask & ~right_n_bits(eshift)) == 0)
      round_mask = 0;  // strength-reduce it if it goes away completely
    assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
    assert(header_size_min <= hsize, "generic minimum is smallest");
    header_size_min = hsize;
    header_size = intcon(hsize + round_mask);
  } else {
    Node* hss   = intcon(Klass::_lh_header_size_shift);
    Node* hsm   = intcon(Klass::_lh_header_size_mask);
    Node* hsize = _gvn.transform( new(C) URShiftINode(layout_val, hss) );
    hsize       = _gvn.transform( new(C) AndINode(hsize, hsm) );
    Node* mask  = intcon(round_mask);
    header_size = _gvn.transform( new(C) AddINode(hsize, mask) );
  }

  Node* elem_shift = NULL;
  if (layout_is_con) {
    int eshift = Klass::layout_helper_log2_element_size(layout_con);
    if (eshift != 0)
      elem_shift = intcon(eshift);
  } else {
    // There is no need to mask or shift this value.
    // The semantics of LShiftINode include an implicit mask to 0x1F.
    assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
    elem_shift = layout_val;
  }

  // Transition to native address size for all offset calculations:
  Node* lengthx = ConvI2X(length);
  Node* headerx = ConvI2X(header_size);
#ifdef _LP64
  { const TypeInt* tilen = _gvn.find_int_type(length);
    if (tilen != NULL && tilen->_lo < 0) {
      // Add a manual constraint to a positive range.  Cf. array_element_address.
      jlong size_max = fast_size_limit;
      if (size_max > tilen->_hi)  size_max = tilen->_hi;
      const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);

      // Only do a narrow I2L conversion if the range check passed.
      IfNode* iff = new (C) IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
      _gvn.transform(iff);
      RegionNode* region = new (C) RegionNode(3);
      _gvn.set_type(region, Type::CONTROL);
      lengthx = new (C) PhiNode(region, TypeLong::LONG);
      _gvn.set_type(lengthx, TypeLong::LONG);

      // Range check passed. Use ConvI2L node with narrow type.
      Node* passed = IfFalse(iff);
      region->init_req(1, passed);
      // Make I2L conversion control dependent to prevent it from
      // floating above the range check during loop optimizations.
      lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));

      // Range check failed. Use ConvI2L with wide type because length may be invalid.
      region->init_req(2, IfTrue(iff));
      lengthx->init_req(2, ConvI2X(length));

      set_control(region);
      record_for_igvn(region);
      record_for_igvn(lengthx);
    }
  }
#endif

  // Combine header size (plus rounding) and body size.  Then round down.
  // This computation cannot overflow, because it is used only in two
  // places, one where the length is sharply limited, and the other
  // after a successful allocation.
  Node* abody = lengthx;
  if (elem_shift != NULL)
    abody     = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) );
  Node* size  = _gvn.transform( new(C) AddXNode(headerx, abody) );
  if (round_mask != 0) {
    Node* mask = MakeConX(~round_mask);
    size       = _gvn.transform( new(C) AndXNode(size, mask) );
  }
  // else if round_mask == 0, the size computation is self-rounding

  if (return_size_val != NULL) {
    // This is the size
    (*return_size_val) = size;
  }

  // Now generate allocation code

  // The entire memory state is needed for slow path of the allocation
  // since GC and deoptimization can happened.
  Node *mem = reset_memory();
  set_all_memory(mem); // Create new memory state

  if (initial_slow_test->is_Bool()) {
    // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
    initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
  }

  // Create the AllocateArrayNode and its result projections
  AllocateArrayNode* alloc
    = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
                                control(), mem, i_o(),
                                size, klass_node,
                                initial_slow_test,
                                length);

  // Cast to correct type.  Note that the klass_node may be constant or not,
  // and in the latter case the actual array type will be inexact also.
  // (This happens via a non-constant argument to inline_native_newArray.)
  // In any case, the value of klass_node provides the desired array type.
  const TypeInt* length_type = _gvn.find_int_type(length);
  const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
  if (ary_type->isa_aryptr() && length_type != NULL) {
    // Try to get a better type than POS for the size
    ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
  }

  Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);

  // Cast length on remaining path to be as narrow as possible
  if (map()->find_edge(length) >= 0) {
    Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
    if (ccast != length) {
      _gvn.set_type_bottom(ccast);
      record_for_igvn(ccast);
      replace_in_map(length, ccast);
    }
  }

  return javaoop;
}

// The following "Ideal_foo" functions are placed here because they recognize
// the graph shapes created by the functions immediately above.

//---------------------------Ideal_allocation----------------------------------
// Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
  if (ptr == NULL) {     // reduce dumb test in callers
    return NULL;
  }
  if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
    ptr = ptr->in(1);
    if (ptr == NULL) return NULL;
  }
  // Return NULL for allocations with several casts:
  //   j.l.reflect.Array.newInstance(jobject, jint)
  //   Object.clone()
  // to keep more precise type from last cast.
  if (ptr->is_Proj()) {
    Node* allo = ptr->in(0);
    if (allo != NULL && allo->is_Allocate()) {
      return allo->as_Allocate();
    }
  }
  // Report failure to match.
  return NULL;
}

// Fancy version which also strips off an offset (and reports it to caller).
AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
                                             intptr_t& offset) {
  Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
  if (base == NULL)  return NULL;
  return Ideal_allocation(base, phase);
}

// Trace Initialize <- Proj[Parm] <- Allocate
AllocateNode* InitializeNode::allocation() {
  Node* rawoop = in(InitializeNode::RawAddress);
  if (rawoop->is_Proj()) {
    Node* alloc = rawoop->in(0);
    if (alloc->is_Allocate()) {
      return alloc->as_Allocate();
    }
  }
  return NULL;
}

// Trace Allocate -> Proj[Parm] -> Initialize
InitializeNode* AllocateNode::initialization() {
  ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
  if (rawoop == NULL)  return NULL;
  for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
    Node* init = rawoop->fast_out(i);
    if (init->is_Initialize()) {
      assert(init->as_Initialize()->allocation() == this, "2-way link");
      return init->as_Initialize();
    }
  }
  return NULL;
}

//----------------------------- loop predicates ---------------------------

//------------------------------add_predicate_impl----------------------------
void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
  // Too many traps seen?
  if (too_many_traps(reason)) {
#ifdef ASSERT
    if (TraceLoopPredicate) {
      int tc = C->trap_count(reason);
      tty->print("too many traps=%s tcount=%d in ",
                    Deoptimization::trap_reason_name(reason), tc);
      method()->print(); // which method has too many predicate traps
      tty->cr();
    }
#endif
    // We cannot afford to take more traps here,
    // do not generate predicate.
    return;
  }

  Node *cont    = _gvn.intcon(1);
  Node* opq     = _gvn.transform(new (C) Opaque1Node(C, cont));
  Node *bol     = _gvn.transform(new (C) Conv2BNode(opq));
  IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
  Node* iffalse = _gvn.transform(new (C) IfFalseNode(iff));
  C->add_predicate_opaq(opq);
  {
    PreserveJVMState pjvms(this);
    set_control(iffalse);
    inc_sp(nargs);
    uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
  }
  Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff));
  set_control(iftrue);
}

//------------------------------add_predicate---------------------------------
void GraphKit::add_predicate(int nargs) {
  if (UseLoopPredicate) {
    add_predicate_impl(Deoptimization::Reason_predicate, nargs);
  }
  // loop's limit check predicate should be near the loop.
  if (LoopLimitCheck) {
    add_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
  }
}

//----------------------------- store barriers ----------------------------
#define __ ideal.

void GraphKit::sync_kit(IdealKit& ideal) {
  set_all_memory(__ merged_memory());
  set_i_o(__ i_o());
  set_control(__ ctrl());
}

void GraphKit::final_sync(IdealKit& ideal) {
  // Final sync IdealKit and graphKit.
  sync_kit(ideal);
}

// vanilla/CMS post barrier
// Insert a write-barrier store.  This is to let generational GC work; we have
// to flag all oop-stores before the next GC point.
void GraphKit::write_barrier_post(Node* oop_store,
                                  Node* obj,
                                  Node* adr,
                                  uint  adr_idx,
                                  Node* val,
                                  bool use_precise) {
  // No store check needed if we're storing a NULL or an old object
  // (latter case is probably a string constant). The concurrent
  // mark sweep garbage collector, however, needs to have all nonNull
  // oop updates flagged via card-marks.
  if (val != NULL && val->is_Con()) {
    // must be either an oop or NULL
    const Type* t = val->bottom_type();
    if (t == TypePtr::NULL_PTR || t == Type::TOP)
      // stores of null never (?) need barriers
      return;
  }

  if (use_ReduceInitialCardMarks()
      && obj == just_allocated_object(control())) {
    // We can skip marks on a freshly-allocated object in Eden.
    // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
    // That routine informs GC to take appropriate compensating steps,
    // upon a slow-path allocation, so as to make this card-mark
    // elision safe.
    return;
  }

  if (!use_precise) {
    // All card marks for a (non-array) instance are in one place:
    adr = obj;
  }
  // (Else it's an array (or unknown), and we want more precise card marks.)
  assert(adr != NULL, "");

  IdealKit ideal(this, true);

  // Convert the pointer to an int prior to doing math on it
  Node* cast = __ CastPX(__ ctrl(), adr);

  // Divide by card size
  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
         "Only one we handle so far.");
  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );

  // Combine card table base and card offset
  Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );

  // Get the alias_index for raw card-mark memory
  int adr_type = Compile::AliasIdxRaw;
  Node*   zero = __ ConI(0); // Dirty card value
  BasicType bt = T_BYTE;

  if (UseCondCardMark) {
    // The classic GC reference write barrier is typically implemented
    // as a store into the global card mark table.  Unfortunately
    // unconditional stores can result in false sharing and excessive
    // coherence traffic as well as false transactional aborts.
    // UseCondCardMark enables MP "polite" conditional card mark
    // stores.  In theory we could relax the load from ctrl() to
    // no_ctrl, but that doesn't buy much latitude.
    Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
    __ if_then(card_val, BoolTest::ne, zero);
  }

  // Smash zero into card
  if( !UseConcMarkSweepGC ) {
#if defined(AARCH64)
    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
#else
    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
#endif
  } else {
    // Specialized path for CM store barrier
    __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
  }

  if (UseCondCardMark) {
    __ end_if();
  }

  // Final sync IdealKit and GraphKit.
  final_sync(ideal);
}

// G1 pre/post barriers
void GraphKit::g1_write_barrier_pre(bool do_load,
                                    Node* obj,
                                    Node* adr,
                                    uint alias_idx,
                                    Node* val,
                                    const TypeOopPtr* val_type,
                                    Node* pre_val,
                                    BasicType bt) {

  // Some sanity checks
  // Note: val is unused in this routine.

  if (do_load) {
    // We need to generate the load of the previous value
    assert(obj != NULL, "must have a base");
    assert(adr != NULL, "where are loading from?");
    assert(pre_val == NULL, "loaded already?");
    assert(val_type != NULL, "need a type");
  } else {
    // In this case both val_type and alias_idx are unused.
    assert(pre_val != NULL, "must be loaded already");
    // Nothing to be done if pre_val is null.
    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
    assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
  }
  assert(bt == T_OBJECT, "or we shouldn't be here");

  IdealKit ideal(this, true);

  Node* tls = __ thread(); // ThreadLocalStorage

  Node* no_ctrl = NULL;
  Node* no_base = __ top();
  Node* zero  = __ ConI(0);
  Node* zeroX = __ ConX(0);

  float likely  = PROB_LIKELY(0.999);
  float unlikely  = PROB_UNLIKELY(0.999);

  BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
  assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");

  // Offsets into the thread
  const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
                                          PtrQueue::byte_offset_of_active());
  const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
                                          PtrQueue::byte_offset_of_index());
  const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
                                          PtrQueue::byte_offset_of_buf());

  // Now the actual pointers into the thread
  Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
  Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
  Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));

  // Now some of the values
  Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);

  // if (!marking)
  __ if_then(marking, BoolTest::ne, zero, unlikely); {
    BasicType index_bt = TypeX_X->basic_type();
    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);

    if (do_load) {
      // load original value
      // alias_idx correct??
      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
    }

    // if (pre_val != NULL)
    __ if_then(pre_val, BoolTest::ne, null()); {
      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);

      // is the queue for this thread full?
      __ if_then(index, BoolTest::ne, zeroX, likely); {

        // decrement the index
        Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));

        // Now get the buffer location we will log the previous value into and store it
        Node *log_addr = __ AddP(no_base, buffer, next_index);
        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
        // update the index
        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);

      } __ else_(); {

        // logging buffer is full, call the runtime
        const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
      } __ end_if();  // (!index)
    } __ end_if();  // (pre_val != NULL)
  } __ end_if();  // (!marking)

  // Final sync IdealKit and GraphKit.
  final_sync(ideal);
}

//
// Update the card table and add card address to the queue
//
void GraphKit::g1_mark_card(IdealKit& ideal,
                            Node* card_adr,
                            Node* oop_store,
                            uint oop_alias_idx,
                            Node* index,
                            Node* index_adr,
                            Node* buffer,
                            const TypeFunc* tf) {

  Node* zero  = __ ConI(0);
  Node* zeroX = __ ConX(0);
  Node* no_base = __ top();
  BasicType card_bt = T_BYTE;
  // Smash zero into card. MUST BE ORDERED WRT TO STORE
  __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);

  //  Now do the queue work
  __ if_then(index, BoolTest::ne, zeroX); {

    Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
    Node* log_addr = __ AddP(no_base, buffer, next_index);

    // Order, see storeCM.
    __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);

  } __ else_(); {
    __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
  } __ end_if();

}

void GraphKit::g1_write_barrier_post(Node* oop_store,
                                     Node* obj,
                                     Node* adr,
                                     uint alias_idx,
                                     Node* val,
                                     BasicType bt,
                                     bool use_precise) {
  // If we are writing a NULL then we need no post barrier

  if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
    // Must be NULL
    const Type* t = val->bottom_type();
    assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
    // No post barrier if writing NULLx
    return;
  }

  if (!use_precise) {
    // All card marks for a (non-array) instance are in one place:
    adr = obj;
  }
  // (Else it's an array (or unknown), and we want more precise card marks.)
  assert(adr != NULL, "");

  IdealKit ideal(this, true);

  Node* tls = __ thread(); // ThreadLocalStorage

  Node* no_base = __ top();
  float likely  = PROB_LIKELY(0.999);
  float unlikely  = PROB_UNLIKELY(0.999);
  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
  Node* zeroX = __ ConX(0);

  // Get the alias_index for raw card-mark memory
  const TypePtr* card_type = TypeRawPtr::BOTTOM;

  const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();

  // Offsets into the thread
  const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
                                     PtrQueue::byte_offset_of_index());
  const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
                                     PtrQueue::byte_offset_of_buf());

  // Pointers into the thread

  Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
  Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));

  // Now some values
  // Use ctrl to avoid hoisting these values past a safepoint, which could
  // potentially reset these fields in the JavaThread.
  Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
  Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);

  // Convert the store obj pointer to an int prior to doing math on it
  // Must use ctrl to prevent "integerized oop" existing across safepoint
  Node* cast =  __ CastPX(__ ctrl(), adr);

  // Divide pointer by card size
  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );

  // Combine card table base and card offset
  Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );

  // If we know the value being stored does it cross regions?

  if (val != NULL) {
    // Does the store cause us to cross regions?

    // Should be able to do an unsigned compare of region_size instead of
    // and extra shift. Do we have an unsigned compare??
    // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
    Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));

    // if (xor_res == 0) same region so skip
    __ if_then(xor_res, BoolTest::ne, zeroX); {

      // No barrier if we are storing a NULL
      __ if_then(val, BoolTest::ne, null(), unlikely); {

        // Ok must mark the card if not already dirty

        // load the original value of the card
        Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);

        __ if_then(card_val, BoolTest::ne, young_card); {
          sync_kit(ideal);
          // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
          insert_mem_bar(Op_MemBarVolatile, oop_store);
          __ sync_kit(this);

          Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
          __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
            g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
          } __ end_if();
        } __ end_if();
      } __ end_if();
    } __ end_if();
  } else {
    // Object.clone() instrinsic uses this path.
    g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
  }

  // Final sync IdealKit and GraphKit.
  final_sync(ideal);
}
#undef __



Node* GraphKit::load_String_offset(Node* ctrl, Node* str) {
  if (java_lang_String::has_offset_field()) {
    int offset_offset = java_lang_String::offset_offset_in_bytes();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                       false, NULL, 0);
    const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
    int offset_field_idx = C->get_alias_index(offset_field_type);
    return make_load(ctrl,
                     basic_plus_adr(str, str, offset_offset),
                     TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
  } else {
    return intcon(0);
  }
}

Node* GraphKit::load_String_length(Node* ctrl, Node* str) {
  if (java_lang_String::has_count_field()) {
    int count_offset = java_lang_String::count_offset_in_bytes();
    const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                       false, NULL, 0);
    const TypePtr* count_field_type = string_type->add_offset(count_offset);
    int count_field_idx = C->get_alias_index(count_field_type);
    return make_load(ctrl,
                     basic_plus_adr(str, str, count_offset),
                     TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
  } else {
    return load_array_length(load_String_value(ctrl, str));
  }
}

Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
  int value_offset = java_lang_String::value_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* value_field_type = string_type->add_offset(value_offset);
  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
  int value_field_idx = C->get_alias_index(value_field_type);
  Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
                         value_type, T_OBJECT, value_field_idx, MemNode::unordered);
  // String.value field is known to be @Stable.
  if (UseImplicitStableValues) {
    load = cast_array_to_stable(load, value_type);
  }
  return load;
}

void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
  int offset_offset = java_lang_String::offset_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
  int offset_field_idx = C->get_alias_index(offset_field_type);
  store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
                  value, T_INT, offset_field_idx, MemNode::unordered);
}

void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
  int value_offset = java_lang_String::value_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* value_field_type = string_type->add_offset(value_offset);

  store_oop_to_object(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
      value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
}

void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
  int count_offset = java_lang_String::count_offset_in_bytes();
  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                     false, NULL, 0);
  const TypePtr* count_field_type = string_type->add_offset(count_offset);
  int count_field_idx = C->get_alias_index(count_field_type);
  store_to_memory(ctrl, basic_plus_adr(str, count_offset),
                  value, T_INT, count_field_idx, MemNode::unordered);
}

Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
  // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
  // assumption of CCP analysis.
  return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
}
C:\hotspot-69087d08d473\src\share\vm/opto/graphKit.hpp
/*
 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
#define SHARE_VM_OPTO_GRAPHKIT_HPP

#include "ci/ciEnv.hpp"
#include "ci/ciMethodData.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/divnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
#include "runtime/deoptimization.hpp"

class FastLockNode;
class FastUnlockNode;
class IdealKit;
class LibraryCallKit;
class Parse;
class RootNode;

//-----------------------------------------------------------------------------
//----------------------------GraphKit-----------------------------------------
// Toolkit for building the common sorts of subgraphs.
// Does not know about bytecode parsing or type-flow results.
// It is able to create graphs implementing the semantics of most
// or all bytecodes, so that it can expand intrinsics and calls.
// It may depend on JVMState structure, but it must not depend
// on specific bytecode streams.
class GraphKit : public Phase {
  friend class PreserveJVMState;

 protected:
  ciEnv*            _env;       // Compilation environment
  PhaseGVN         &_gvn;       // Some optimizations while parsing
  SafePointNode*    _map;       // Parser map from JVM to Nodes
  SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
  int               _bci;       // JVM Bytecode Pointer
  ciMethod*         _method;    // JVM Current Method

 private:
  int               _sp;        // JVM Expression Stack Pointer; don't modify directly!

 private:
  SafePointNode*     map_not_null() const {
    assert(_map != NULL, "must call stopped() to test for reset compiler map");
    return _map;
  }

 public:
  GraphKit();                   // empty constructor
  GraphKit(JVMState* jvms);     // the JVM state on which to operate

#ifdef ASSERT
  ~GraphKit() {
    assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
  }
#endif

  virtual Parse*          is_Parse()          const { return NULL; }
  virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }

  ciEnv*        env()           const { return _env; }
  PhaseGVN&     gvn()           const { return _gvn; }

  void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile

  // Handy well-known nodes:
  Node*         null()          const { return zerocon(T_OBJECT); }
  Node*         top()           const { return C->top(); }
  RootNode*     root()          const { return C->root(); }

  // Create or find a constant node
  Node* intcon(jint con)        const { return _gvn.intcon(con); }
  Node* longcon(jlong con)      const { return _gvn.longcon(con); }
  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
  Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
  // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)

  // Helper for byte_map_base
  Node* byte_map_base_node() {
    // Get base of card map
    CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
    assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
    if (ct->byte_map_base != NULL) {
      return makecon(TypeRawPtr::make((address)ct->byte_map_base));
    } else {
      return null();
    }
  }

  jint  find_int_con(Node* n, jint value_if_unknown) {
    return _gvn.find_int_con(n, value_if_unknown);
  }
  jlong find_long_con(Node* n, jlong value_if_unknown) {
    return _gvn.find_long_con(n, value_if_unknown);
  }
  // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)

  // JVM State accessors:
  // Parser mapping from JVM indices into Nodes.
  // Low slots are accessed by the StartNode::enum.
  // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
  // Then come JVM stack slots.
  // Finally come the monitors, if any.
  // See layout accessors in class JVMState.

  SafePointNode*     map()      const { return _map; }
  bool               has_exceptions() const { return _exceptions != NULL; }
  JVMState*          jvms()     const { return map_not_null()->_jvms; }
  int                sp()       const { return _sp; }
  int                bci()      const { return _bci; }
  Bytecodes::Code    java_bc()  const;
  ciMethod*          method()   const { return _method; }

  void set_jvms(JVMState* jvms)       { set_map(jvms->map());
                                        assert(jvms == this->jvms(), "sanity");
                                        _sp = jvms->sp();
                                        _bci = jvms->bci();
                                        _method = jvms->has_method() ? jvms->method() : NULL; }
  void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
  void set_sp(int sp)                 { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
  void clean_stack(int from_sp); // clear garbage beyond from_sp to top

  void inc_sp(int i)                  { set_sp(sp() + i); }
  void dec_sp(int i)                  { set_sp(sp() - i); }
  void set_bci(int bci)               { _bci = bci; }

  // Make sure jvms has current bci & sp.
  JVMState* sync_jvms() const;
  JVMState* sync_jvms_for_reexecute();

#ifdef ASSERT
  // Make sure JVMS has an updated copy of bci and sp.
  // Also sanity-check method, depth, and monitor depth.
  bool jvms_in_sync() const;

  // Make sure the map looks OK.
  void verify_map() const;

  // Make sure a proposed exception state looks OK.
  static void verify_exception_state(SafePointNode* ex_map);
#endif

  // Clone the existing map state.  (Implements PreserveJVMState.)
  SafePointNode* clone_map();

  // Set the map to a clone of the given one.
  void set_map_clone(SafePointNode* m);

  // Tell if the compilation is failing.
  bool failing() const { return C->failing(); }

  // Set _map to NULL, signalling a stop to further bytecode execution.
  // Preserve the map intact for future use, and return it back to the caller.
  SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }

  // Stop, but first smash the map's inputs to NULL, to mark it dead.
  void stop_and_kill_map();

  // Tell if _map is NULL, or control is top.
  bool stopped();

  // Tell if this method or any caller method has exception handlers.
  bool has_ex_handler();

  // Save an exception without blowing stack contents or other JVM state.
  // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
  static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);

  // Recover a saved exception from its map.
  static Node* saved_ex_oop(SafePointNode* ex_map);

  // Recover a saved exception from its map, and remove it from the map.
  static Node* clear_saved_ex_oop(SafePointNode* ex_map);

#ifdef ASSERT
  // Recover a saved exception from its map, and remove it from the map.
  static bool has_saved_ex_oop(SafePointNode* ex_map);
#endif

  // Push an exception in the canonical position for handlers (stack(0)).
  void push_ex_oop(Node* ex_oop) {
    ensure_stack(1);  // ensure room to push the exception
    set_stack(0, ex_oop);
    set_sp(1);
    clean_stack(1);
  }

  // Detach and return an exception state.
  SafePointNode* pop_exception_state() {
    SafePointNode* ex_map = _exceptions;
    if (ex_map != NULL) {
      _exceptions = ex_map->next_exception();
      ex_map->set_next_exception(NULL);
      debug_only(verify_exception_state(ex_map));
    }
    return ex_map;
  }

  // Add an exception, using the given JVM state, without commoning.
  void push_exception_state(SafePointNode* ex_map) {
    debug_only(verify_exception_state(ex_map));
    ex_map->set_next_exception(_exceptions);
    _exceptions = ex_map;
  }

  // Turn the current JVM state into an exception state, appending the ex_oop.
  SafePointNode* make_exception_state(Node* ex_oop);

  // Add an exception, using the given JVM state.
  // Combine all exceptions with a common exception type into a single state.
  // (This is done via combine_exception_states.)
  void add_exception_state(SafePointNode* ex_map);

  // Combine all exceptions of any sort whatever into a single master state.
  SafePointNode* combine_and_pop_all_exception_states() {
    if (_exceptions == NULL)  return NULL;
    SafePointNode* phi_map = pop_exception_state();
    SafePointNode* ex_map;
    while ((ex_map = pop_exception_state()) != NULL) {
      combine_exception_states(ex_map, phi_map);
    }
    return phi_map;
  }

  // Combine the two exception states, building phis as necessary.
  // The second argument is updated to include contributions from the first.
  void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);

  // Reset the map to the given state.  If there are any half-finished phis
  // in it (created by combine_exception_states), transform them now.
  // Returns the exception oop.  (Caller must call push_ex_oop if required.)
  Node* use_exception_state(SafePointNode* ex_map);

  // Collect exceptions from a given JVM state into my exception list.
  void add_exception_states_from(JVMState* jvms);

  // Collect all raised exceptions into the current JVM state.
  // Clear the current exception list and map, returns the combined states.
  JVMState* transfer_exceptions_into_jvms();

  // Helper to throw a built-in exception.
  // Range checks take the offending index.
  // Cast and array store checks take the offending class.
  // Others do not take the optional argument.
  // The JVMS must allow the bytecode to be re-executed
  // via an uncommon trap.
  void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);

  // Helper to check the JavaThread::_should_post_on_exceptions flag
  // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
  void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
                                                  bool must_throw) ;

  // Helper Functions for adding debug information
  void kill_dead_locals();
#ifdef ASSERT
  bool dead_locals_are_killed();
#endif
  // The call may deoptimize.  Supply required JVM state as debug info.
  // If must_throw is true, the call is guaranteed not to return normally.
  void add_safepoint_edges(SafePointNode* call,
                           bool must_throw = false);

  // How many stack inputs does the current BC consume?
  // And, how does the stack change after the bytecode?
  // Returns false if unknown.
  bool compute_stack_effects(int& inputs, int& depth);

  // Add a fixed offset to a pointer
  Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
    return basic_plus_adr(base, ptr, MakeConX(offset));
  }
  Node* basic_plus_adr(Node* base, intptr_t offset) {
    return basic_plus_adr(base, base, MakeConX(offset));
  }
  // Add a variable offset to a pointer
  Node* basic_plus_adr(Node* base, Node* offset) {
    return basic_plus_adr(base, base, offset);
  }
  Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);


  // Some convenient shortcuts for common nodes
  Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new (C) IfTrueNode(iff));      }
  Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new (C) IfFalseNode(iff));     }

  Node* AddI(Node* l, Node* r)                { return _gvn.transform(new (C) AddINode(l, r));       }
  Node* SubI(Node* l, Node* r)                { return _gvn.transform(new (C) SubINode(l, r));       }
  Node* MulI(Node* l, Node* r)                { return _gvn.transform(new (C) MulINode(l, r));       }
  Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new (C) DivINode(ctl, l, r));  }

  Node* AndI(Node* l, Node* r)                { return _gvn.transform(new (C) AndINode(l, r));       }
  Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new (C) OrINode(l, r));        }
  Node* XorI(Node* l, Node* r)                { return _gvn.transform(new (C) XorINode(l, r));       }

  Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new (C) MaxINode(l, r));       }
  Node* MinI(Node* l, Node* r)                { return _gvn.transform(new (C) MinINode(l, r));       }

  Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new (C) LShiftINode(l, r));    }
  Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new (C) RShiftINode(l, r));    }
  Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new (C) URShiftINode(l, r));   }

  Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new (C) CmpINode(l, r));       }
  Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new (C) CmpLNode(l, r));       }
  Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new (C) CmpPNode(l, r));       }
  Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); }

  Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new (C) AddPNode(b, a, o));    }

  // Convert between int and long, and size_t.
  // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
  Node* ConvI2L(Node* offset);
  Node* ConvI2UL(Node* offset);
  Node* ConvL2I(Node* offset);
  // Find out the klass of an object.
  Node* load_object_klass(Node* object);
  // Find out the length of an array.
  Node* load_array_length(Node* array);


  // Helper function to do a NULL pointer check or ZERO check based on type.
  // Throw an exception if a given value is null.
  // Return the value cast to not-null.
  // Be clever about equivalent dominating null checks.
  Node* null_check_common(Node* value, BasicType type,
                          bool assert_null = false, Node* *null_control = NULL);
  Node* null_check(Node* value, BasicType type = T_OBJECT) {
    return null_check_common(value, type);
  }
  Node* null_check_receiver() {
    assert(argument(0)->bottom_type()->isa_ptr(), "must be");
    return null_check(argument(0));
  }
  Node* zero_check_int(Node* value) {
    assert(value->bottom_type()->basic_type() == T_INT,
        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
    return null_check_common(value, T_INT);
  }
  Node* zero_check_long(Node* value) {
    assert(value->bottom_type()->basic_type() == T_LONG,
        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
    return null_check_common(value, T_LONG);
  }
  // Throw an uncommon trap if a given value is __not__ null.
  // Return the value cast to null, and be clever about dominating checks.
  Node* null_assert(Node* value, BasicType type = T_OBJECT) {
    return null_check_common(value, type, true);
  }

  // Null check oop.  Return null-path control into (*null_control).
  // Return a cast-not-null node which depends on the not-null control.
  // If never_see_null, use an uncommon trap (*null_control sees a top).
  // The cast is not valid along the null path; keep a copy of the original.
  // If safe_for_replace, then we can replace the value with the cast
  // in the parsing map (the cast is guaranteed to dominate the map)
  Node* null_check_oop(Node* value, Node* *null_control,
                       bool never_see_null = false, bool safe_for_replace = false);

  // Check the null_seen bit.
  bool seems_never_null(Node* obj, ciProfileData* data);

  // Check for unique class for receiver at call
  ciKlass* profile_has_unique_klass() {
    ciCallProfile profile = method()->call_profile_at_bci(bci());
    if (profile.count() >= 0 &&         // no cast failures here
        profile.has_receiver(0) &&
        profile.morphism() == 1) {
      return profile.receiver(0);
    }
    return NULL;
  }

  // record type from profiling with the type system
  Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
  Node* record_profiled_receiver_for_speculation(Node* n);
  void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
  void record_profiled_parameters_for_speculation();

  // Use the type profile to narrow an object type.
  Node* maybe_cast_profiled_receiver(Node* not_null_obj,
                                     ciKlass* require_klass,
                                     ciKlass* spec,
                                     bool safe_for_replace);

  // Cast obj to type and emit guard unless we had too many traps here already
  Node* maybe_cast_profiled_obj(Node* obj,
                                ciKlass* type,
                                bool not_null = false);

  // Cast obj to not-null on this path
  Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
  // Replace all occurrences of one node by another.
  void replace_in_map(Node* old, Node* neww);

  void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++        , n); }
  Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp             ); }
  Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1   ); }

  void push_pair(Node* ldval) {
    push(ldval);
    push(top());  // the halfword is merely a placeholder
  }
  void push_pair_local(int i) {
    // longs are stored in locals in "push" order
    push(  local(i+0) );  // the real value
    assert(local(i+1) == top(), "");
    push(top());  // halfword placeholder
  }
  Node* pop_pair() {
    // the second half is pushed last & popped first; it contains exactly nothing
    Node* halfword = pop();
    assert(halfword == top(), "");
    // the long bits are pushed first & popped last:
    return pop();
  }
  void set_pair_local(int i, Node* lval) {
    // longs are stored in locals as a value/half pair (like doubles)
    set_local(i+0, lval);
    set_local(i+1, top());
  }

  // Push the node, which may be zero, one, or two words.
  void push_node(BasicType n_type, Node* n) {
    int n_size = type2size[n_type];
    if      (n_size == 1)  push(      n );  // T_INT, ...
    else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
    else                   { assert(n_size == 0, "must be T_VOID"); }
  }

  Node* pop_node(BasicType n_type) {
    int n_size = type2size[n_type];
    if      (n_size == 1)  return pop();
    else if (n_size == 2)  return pop_pair();
    else                   return NULL;
  }

  Node* control()               const { return map_not_null()->control(); }
  Node* i_o()                   const { return map_not_null()->i_o(); }
  Node* returnadr()             const { return map_not_null()->returnadr(); }
  Node* frameptr()              const { return map_not_null()->frameptr(); }
  Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
  Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
  Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
  Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
  Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }

  void set_control  (Node* c)         { map_not_null()->set_control(c); }
  void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
  void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
  void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
  void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
  void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }

  // Access unaliased memory
  Node* memory(uint alias_idx);
  Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
  Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }

  // Access immutable memory
  Node* immutable_memory() { return C->immutable_memory(); }

  // Set unaliased memory
  void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
  void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
  void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }

  // Get the entire memory state (probably a MergeMemNode), and reset it
  // (The resetting prevents somebody from using the dangling Node pointer.)
  Node* reset_memory();

  // Get the entire memory state, asserted to be a MergeMemNode.
  MergeMemNode* merged_memory() {
    Node* mem = map_not_null()->memory();
    assert(mem->is_MergeMem(), "parse memory is always pre-split");
    return mem->as_MergeMem();
  }

  // Set the entire memory state; produce a new MergeMemNode.
  void set_all_memory(Node* newmem);

  // Create a memory projection from the call, then set_all_memory.
  void set_all_memory_call(Node* call, bool separate_io_proj = false);

  // Create a LoadNode, reading from the parser's memory state.
  // (Note:  require_atomic_access is useful only with T_LONG.)
  //
  // We choose the unordered semantics by default because we have
  // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
  // of volatile fields.
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false) {
    // This version computes alias_index from bottom_type
    return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
                     mo, control_dependency, require_atomic_access,
                     unaligned, mismatched);
  }
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false) {
    // This version computes alias_index from an address type
    assert(adr_type != NULL, "use other make_load factory");
    return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
                     mo, control_dependency, require_atomic_access,
                     unaligned, mismatched);
  }
  // This is the base version which is given an alias index.
  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
                  MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                  bool require_atomic_access = false, bool unaligned = false,
                  bool mismatched = false);

  // Create & transform a StoreNode and store the effect into the
  // parser's memory state.
  //
  // We must ensure that stores of object references will be visible
  // only after the object's initialization. So the clients of this
  // procedure must indicate that the store requires `release'
  // semantics, if the stored value is an object reference that might
  // point to a new object and may become externally visible.
  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                        const TypePtr* adr_type,
                        MemNode::MemOrd mo,
                        bool require_atomic_access = false,
                        bool unaligned = false,
                        bool mismatched = false) {
    // This version computes alias_index from an address type
    assert(adr_type != NULL, "use other store_to_memory factory");
    return store_to_memory(ctl, adr, val, bt,
                           C->get_alias_index(adr_type),
                           mo, require_atomic_access,
                           unaligned, mismatched);
  }
  // This is the base version which is given alias index
  // Return the new StoreXNode
  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                        int adr_idx,
                        MemNode::MemOrd,
                        bool require_atomic_access = false,
                        bool unaligned = false,
                        bool mismatched = false);


  // All in one pre-barrier, store, post_barrier
  // Insert a write-barrier'd store.  This is to let generational GC
  // work; we have to flag all oop-stores before the next GC point.
  //
  // It comes in 3 flavors of store to an object, array, or unknown.
  // We use precise card marks for arrays to avoid scanning the entire
  // array. We use imprecise for object. We use precise for unknown
  // since we don't know if we have an array or and object or even
  // where the object starts.
  //
  // If val==NULL, it is taken to be a completely unknown value. QQQ

  Node* store_oop(Node* ctl,
                  Node* obj,   // containing obj
                  Node* adr,   // actual adress to store val at
                  const TypePtr* adr_type,
                  Node* val,
                  const TypeOopPtr* val_type,
                  BasicType bt,
                  bool use_precise,
                  MemNode::MemOrd mo,
                  bool mismatched = false);

  Node* store_oop_to_object(Node* ctl,
                            Node* obj,   // containing obj
                            Node* adr,   // actual adress to store val at
                            const TypePtr* adr_type,
                            Node* val,
                            const TypeOopPtr* val_type,
                            BasicType bt,
                            MemNode::MemOrd mo) {
    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
  }

  Node* store_oop_to_array(Node* ctl,
                           Node* obj,   // containing obj
                           Node* adr,   // actual adress to store val at
                           const TypePtr* adr_type,
                           Node* val,
                           const TypeOopPtr* val_type,
                           BasicType bt,
                           MemNode::MemOrd mo) {
    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
  }

  // Could be an array or object we don't know at compile time (unsafe ref.)
  Node* store_oop_to_unknown(Node* ctl,
                             Node* obj,   // containing obj
                             Node* adr,   // actual adress to store val at
                             const TypePtr* adr_type,
                             Node* val,
                             BasicType bt,
                             MemNode::MemOrd mo,
                             bool mismatched = false);

  // For the few case where the barriers need special help
  void pre_barrier(bool do_load, Node* ctl,
                   Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
                   Node* pre_val,
                   BasicType bt);

  void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
                    Node* val, BasicType bt, bool use_precise);

  // Return addressing for an array element.
  Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
                              // Optional constraint on the array size:
                              const TypeInt* sizetype = NULL,
                              // Optional control dependency (for example, on range check)
                              Node* ctrl = NULL);

  // Return a load of array element at idx.
  Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);

  //---------------- Dtrace support --------------------
  void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
  void make_dtrace_method_entry(ciMethod* method) {
    make_dtrace_method_entry_exit(method, true);
  }
  void make_dtrace_method_exit(ciMethod* method) {
    make_dtrace_method_entry_exit(method, false);
  }

  //--------------- stub generation -------------------
 public:
  void gen_stub(address C_function,
                const char *name,
                int is_fancy_jump,
                bool pass_tls,
                bool return_pc);

  //---------- help for generating calls --------------

  // Do a null check on the receiver as it would happen before the call to
  // callee (with all arguments still on the stack).
  Node* null_check_receiver_before_call(ciMethod* callee) {
    assert(!callee->is_static(), "must be a virtual method");
    // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
    // Use callsite signature always.
    ciMethod* declared_method = method()->get_method_at_bci(bci());
    const int nargs = declared_method->arg_size();
    inc_sp(nargs);
    Node* n = null_check_receiver();
    dec_sp(nargs);
    return n;
  }

  // Fill in argument edges for the call from argument(0), argument(1), ...
  // (The next step is to call set_edges_for_java_call.)
  void  set_arguments_for_java_call(CallJavaNode* call);

  // Fill in non-argument edges for the call.
  // Transform the call, and update the basics: control, i_o, memory.
  // (The next step is usually to call set_results_for_java_call.)
  void set_edges_for_java_call(CallJavaNode* call,
                               bool must_throw = false, bool separate_io_proj = false);

  // Finish up a java call that was started by set_edges_for_java_call.
  // Call add_exception on any throw arising from the call.
  // Return the call result (transformed).
  Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);

  // Similar to set_edges_for_java_call, but simplified for runtime calls.
  void  set_predefined_output_for_runtime_call(Node* call) {
    set_predefined_output_for_runtime_call(call, NULL, NULL);
  }
  void  set_predefined_output_for_runtime_call(Node* call,
                                               Node* keep_mem,
                                               const TypePtr* hook_mem);
  Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);

  // Replace the call with the current state of the kit.  Requires
  // that the call was generated with separate io_projs so that
  // exceptional control flow can be handled properly.
  void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);

  // helper functions for statistics
  void increment_counter(address counter_addr);   // increment a debug counter
  void increment_counter(Node*   counter_addr);   // increment a debug counter

  // Bail out to the interpreter right now
  // The optional klass is the one causing the trap.
  // The optional reason is debug information written to the compile log.
  // Optional must_throw is the same as with add_safepoint_edges.
  void uncommon_trap(int trap_request,
                     ciKlass* klass = NULL, const char* reason_string = NULL,
                     bool must_throw = false, bool keep_exact_action = false);

  // Shorthand, to avoid saying "Deoptimization::" so many times.
  void uncommon_trap(Deoptimization::DeoptReason reason,
                     Deoptimization::DeoptAction action,
                     ciKlass* klass = NULL, const char* reason_string = NULL,
                     bool must_throw = false, bool keep_exact_action = false) {
    uncommon_trap(Deoptimization::make_trap_request(reason, action),
                  klass, reason_string, must_throw, keep_exact_action);
  }

  // Bail out to the interpreter and keep exact action (avoid switching to Action_none).
  void uncommon_trap_exact(Deoptimization::DeoptReason reason,
                           Deoptimization::DeoptAction action,
                           ciKlass* klass = NULL, const char* reason_string = NULL,
                           bool must_throw = false) {
    uncommon_trap(Deoptimization::make_trap_request(reason, action),
                  klass, reason_string, must_throw, /*keep_exact_action=*/true);
  }

  // SP when bytecode needs to be reexecuted.
  virtual int reexecute_sp() { return sp(); }

  // Report if there were too many traps at the current method and bci.
  // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
  // If there is no MDO at all, report no trap unless told to assume it.
  bool too_many_traps(Deoptimization::DeoptReason reason) {
    return C->too_many_traps(method(), bci(), reason);
  }

  // Report if there were too many recompiles at the current method and bci.
  bool too_many_recompiles(Deoptimization::DeoptReason reason) {
    return C->too_many_recompiles(method(), bci(), reason);
  }

  // Returns the object (if any) which was created the moment before.
  Node* just_allocated_object(Node* current_control);

  static bool use_ReduceInitialCardMarks() {
    return (ReduceInitialCardMarks
            && Universe::heap()->can_elide_tlab_store_barriers());
  }

  // Sync Ideal and Graph kits.
  void sync_kit(IdealKit& ideal);
  void final_sync(IdealKit& ideal);

  // vanilla/CMS post barrier
  void write_barrier_post(Node *store, Node* obj,
                          Node* adr,  uint adr_idx, Node* val, bool use_precise);

  // Allow reordering of pre-barrier with oop store and/or post-barrier.
  // Used for load_store operations which loads old value.
  bool can_move_pre_barrier() const;

  // G1 pre/post barriers
  void g1_write_barrier_pre(bool do_load,
                            Node* obj,
                            Node* adr,
                            uint alias_idx,
                            Node* val,
                            const TypeOopPtr* val_type,
                            Node* pre_val,
                            BasicType bt);

  void g1_write_barrier_post(Node* store,
                             Node* obj,
                             Node* adr,
                             uint alias_idx,
                             Node* val,
                             BasicType bt,
                             bool use_precise);
  // Helper function for g1
  private:
  void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
                    Node* index, Node* index_adr,
                    Node* buffer, const TypeFunc* tf);

  public:
  // Helper function to round double arguments before a call
  void round_double_arguments(ciMethod* dest_method);
  void round_double_result(ciMethod* dest_method);

  // rounding for strict float precision conformance
  Node* precision_rounding(Node* n);

  // rounding for strict double precision conformance
  Node* dprecision_rounding(Node* n);

  // rounding for non-strict double stores
  Node* dstore_rounding(Node* n);

  // Helper functions for fast/slow path codes
  Node* opt_iff(Node* region, Node* iff);
  Node* make_runtime_call(int flags,
                          const TypeFunc* call_type, address call_addr,
                          const char* call_name,
                          const TypePtr* adr_type, // NULL if no memory effects
                          Node* parm0 = NULL, Node* parm1 = NULL,
                          Node* parm2 = NULL, Node* parm3 = NULL,
                          Node* parm4 = NULL, Node* parm5 = NULL,
                          Node* parm6 = NULL, Node* parm7 = NULL);
  enum {  // flag values for make_runtime_call
    RC_NO_FP = 1,               // CallLeafNoFPNode
    RC_NO_IO = 2,               // do not hook IO edges
    RC_NO_LEAF = 4,             // CallStaticJavaNode
    RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
    RC_NARROW_MEM = 16,         // input memory is same as output
    RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
    RC_LEAF = 0                 // null value:  no flags set
  };

  // merge in all memory slices from new_mem, along the given path
  void merge_memory(Node* new_mem, Node* region, int new_path);
  void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false);

  // Helper functions to build synchronizations
  int next_monitor();
  Node* insert_mem_bar(int opcode, Node* precedent = NULL);
  Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
  // Optional 'precedent' is appended as an extra edge, to force ordering.
  FastLockNode* shared_lock(Node* obj);
  void shared_unlock(Node* box, Node* obj);

  // helper functions for the fast path/slow path idioms
  Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);

  // Generate an instance-of idiom.  Used by both the instance-of bytecode
  // and the reflective instance-of call.
  Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);

  // Generate a check-cast idiom.  Used by both the check-cast bytecode
  // and the array-store bytecode
  Node* gen_checkcast( Node *subobj, Node* superkls,
                       Node* *failure_control = NULL );

  // Generate a subtyping check.  Takes as input the subtype and supertype.
  // Returns 2 values: sets the default control() to the true path and
  // returns the false path.  Only reads from constant memory taken from the
  // default memory; does not write anything.  It also doesn't take in an
  // Object; if you wish to check an Object you need to load the Object's
  // class prior to coming here.
  Node* gen_subtype_check(Node* subklass, Node* superklass);

  // Static parse-time type checking logic for gen_subtype_check:
  enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
  int static_subtype_check(ciKlass* superk, ciKlass* subk);

  // Exact type check used for predicted calls and casts.
  // Rewrites (*casted_receiver) to be casted to the stronger type.
  // (Caller is responsible for doing replace_in_map.)
  Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                            Node* *casted_receiver);

  // implementation of object creation
  Node* set_output_for_allocation(AllocateNode* alloc,
                                  const TypeOopPtr* oop_type,
                                  bool deoptimize_on_exception=false);
  Node* get_layout_helper(Node* klass_node, jint& constant_value);
  Node* new_instance(Node* klass_node,
                     Node* slow_test = NULL,
                     Node* *return_size_val = NULL,
                     bool deoptimize_on_exception = false);
  Node* new_array(Node* klass_node, Node* count_val, int nargs,
                  Node* *return_size_val = NULL,
                  bool deoptimize_on_exception = false);

  // java.lang.String helpers
  Node* load_String_offset(Node* ctrl, Node* str);
  Node* load_String_length(Node* ctrl, Node* str);
  Node* load_String_value(Node* ctrl, Node* str);
  void store_String_offset(Node* ctrl, Node* str, Node* value);
  void store_String_length(Node* ctrl, Node* str, Node* value);
  void store_String_value(Node* ctrl, Node* str, Node* value);

  // Handy for making control flow
  IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
    IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
    _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
    // Place 'if' on worklist if it will be in graph
    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
    return iff;
  }

  IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
    IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
    _gvn.transform(iff);                           // Value may be known at parse-time
    // Place 'if' on worklist if it will be in graph
    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
    return iff;
  }

  // Insert a loop predicate into the graph
  void add_predicate(int nargs = 0);
  void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);

  // Produce new array node of stable type
  Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
};

// Helper class to support building of control flow branches. Upon
// creation the map and sp at bci are cloned and restored upon de-
// struction. Typical use:
//
// { PreserveJVMState pjvms(this);
//   // code of new branch
// }
// // here the JVM state at bci is established

class PreserveJVMState: public StackObj {
 protected:
  GraphKit*      _kit;
#ifdef ASSERT
  int            _block;  // PO of current block, if a Parse
  int            _bci;
#endif
  SafePointNode* _map;
  uint           _sp;

 public:
  PreserveJVMState(GraphKit* kit, bool clone_map = true);
  ~PreserveJVMState();
};

// Helper class to build cutouts of the form if (p) ; else {x...}.
// The code {x...} must not fall through.
// The kit's main flow of control is set to the "then" continuation of if(p).
class BuildCutout: public PreserveJVMState {
 public:
  BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
  ~BuildCutout();
};

// Helper class to preserve the original _reexecute bit and _sp and restore
// them back
class PreserveReexecuteState: public StackObj {
 protected:
  GraphKit*                 _kit;
  uint                      _sp;
  JVMState::ReexecuteState  _reexecute;

 public:
  PreserveReexecuteState(GraphKit* kit);
  ~PreserveReexecuteState();
};

#endif // SHARE_VM_OPTO_GRAPHKIT_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/idealGraphPrinter.cpp
/*
 * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "opto/chaitin.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/machnode.hpp"
#include "opto/parse.hpp"
#include "runtime/threadCritical.hpp"

#ifndef PRODUCT

// Constants
// Keep consistent with Java constants
const char *IdealGraphPrinter::INDENT = "  ";
const char *IdealGraphPrinter::TOP_ELEMENT = "graphDocument";
const char *IdealGraphPrinter::GROUP_ELEMENT = "group";
const char *IdealGraphPrinter::GRAPH_ELEMENT = "graph";
const char *IdealGraphPrinter::PROPERTIES_ELEMENT = "properties";
const char *IdealGraphPrinter::EDGES_ELEMENT = "edges";
const char *IdealGraphPrinter::PROPERTY_ELEMENT = "p";
const char *IdealGraphPrinter::EDGE_ELEMENT = "edge";
const char *IdealGraphPrinter::NODE_ELEMENT = "node";
const char *IdealGraphPrinter::NODES_ELEMENT = "nodes";
const char *IdealGraphPrinter::REMOVE_EDGE_ELEMENT = "removeEdge";
const char *IdealGraphPrinter::REMOVE_NODE_ELEMENT = "removeNode";
const char *IdealGraphPrinter::METHOD_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::METHOD_IS_PUBLIC_PROPERTY = "public";
const char *IdealGraphPrinter::METHOD_IS_STATIC_PROPERTY = "static";
const char *IdealGraphPrinter::TRUE_VALUE = "true";
const char *IdealGraphPrinter::NODE_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::EDGE_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::NODE_ID_PROPERTY = "id";
const char *IdealGraphPrinter::FROM_PROPERTY = "from";
const char *IdealGraphPrinter::TO_PROPERTY = "to";
const char *IdealGraphPrinter::PROPERTY_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::GRAPH_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::INDEX_PROPERTY = "index";
const char *IdealGraphPrinter::METHOD_ELEMENT = "method";
const char *IdealGraphPrinter::INLINE_ELEMENT = "inline";
const char *IdealGraphPrinter::BYTECODES_ELEMENT = "bytecodes";
const char *IdealGraphPrinter::METHOD_BCI_PROPERTY = "bci";
const char *IdealGraphPrinter::METHOD_SHORT_NAME_PROPERTY = "shortName";
const char *IdealGraphPrinter::CONTROL_FLOW_ELEMENT = "controlFlow";
const char *IdealGraphPrinter::BLOCK_NAME_PROPERTY = "name";
const char *IdealGraphPrinter::BLOCK_DOMINATOR_PROPERTY = "dom";
const char *IdealGraphPrinter::BLOCK_ELEMENT = "block";
const char *IdealGraphPrinter::SUCCESSORS_ELEMENT = "successors";
const char *IdealGraphPrinter::SUCCESSOR_ELEMENT = "successor";
const char *IdealGraphPrinter::ASSEMBLY_ELEMENT = "assembly";

int IdealGraphPrinter::_file_count = 0;

IdealGraphPrinter *IdealGraphPrinter::printer() {
  if (PrintIdealGraphLevel == 0) return NULL;

  JavaThread *thread = JavaThread::current();
  if (!thread->is_Compiler_thread()) return NULL;

  CompilerThread *compiler_thread = (CompilerThread *)thread;
  if (compiler_thread->ideal_graph_printer() == NULL) {
    IdealGraphPrinter *printer = new IdealGraphPrinter();
    compiler_thread->set_ideal_graph_printer(printer);
  }

  return compiler_thread->ideal_graph_printer();
}

void IdealGraphPrinter::clean_up() {
  JavaThread *p;
  for (p = Threads::first(); p; p = p->next()) {
    if (p->is_Compiler_thread()) {
      CompilerThread *c = (CompilerThread *)p;
      IdealGraphPrinter *printer = c->ideal_graph_printer();
      if (printer) {
        delete printer;
      }
      c->set_ideal_graph_printer(NULL);
    }
  }
}

// Constructor, either file or network output
IdealGraphPrinter::IdealGraphPrinter() {

  // By default dump both ins and outs since dead or unreachable code
  // needs to appear in the graph.  There are also some special cases
  // in the mach where kill projections have no users but should
  // appear in the dump.
  _traverse_outs = true;
  _should_send_method = true;
  _output = NULL;
  buffer[0] = 0;
  _depth = 0;
  _current_method = NULL;
  assert(!_current_method, "current method must be initialized to NULL");
  _stream = NULL;

  if (PrintIdealGraphFile != NULL) {
    ThreadCritical tc;
    // User wants all output to go to files
    if (_file_count != 0) {
      ResourceMark rm;
      stringStream st;
      const char* dot = strrchr(PrintIdealGraphFile, '.');
      if (dot) {
        st.write(PrintIdealGraphFile, dot - PrintIdealGraphFile);
        st.print("%d%s", _file_count, dot);
      } else {
        st.print("%s%d", PrintIdealGraphFile, _file_count);
      }
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(st.as_string());
      _output = stream;
    } else {
      fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(PrintIdealGraphFile);
      _output = stream;
    }
    _file_count++;
  } else {
    _stream = new (ResourceObj::C_HEAP, mtCompiler) networkStream();

    // Try to connect to visualizer
    if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) {
      char c = 0;
      _stream->read(&c, 1);
      if (c != 'y') {
        tty->print_cr("Client available, but does not want to receive data!");
        _stream->close();
        delete _stream;
        _stream = NULL;
        return;
      }
      _output = _stream;
    } else {
      // It would be nice if we could shut down cleanly but it should
      // be an error if we can't connect to the visualizer.
      fatal(err_msg_res("Couldn't connect to visualizer at %s:" INTX_FORMAT,
                        PrintIdealGraphAddress, PrintIdealGraphPort));
    }
  }

  _xml = new (ResourceObj::C_HEAP, mtCompiler) xmlStream(_output);

  head(TOP_ELEMENT);
}

// Destructor, close file or network stream
IdealGraphPrinter::~IdealGraphPrinter() {

  tail(TOP_ELEMENT);

  // tty->print_cr("Walk time: %d", (int)_walk_time.milliseconds());
  // tty->print_cr("Output time: %d", (int)_output_time.milliseconds());
  // tty->print_cr("Build blocks time: %d", (int)_build_blocks_time.milliseconds());

  if(_xml) {
    delete _xml;
    _xml = NULL;
  }

  if (_stream) {
    delete _stream;
    if (_stream == _output) {
      _output = NULL;
    }
    _stream = NULL;
  }

  if (_output) {
    delete _output;
    _output = NULL;
  }
}


void IdealGraphPrinter::begin_elem(const char *s) {
  _xml->begin_elem("%s", s);
}

void IdealGraphPrinter::end_elem() {
  _xml->end_elem();
}

void IdealGraphPrinter::begin_head(const char *s) {
  _xml->begin_head("%s", s);
}

void IdealGraphPrinter::end_head() {
  _xml->end_head();
}

void IdealGraphPrinter::print_attr(const char *name, intptr_t val) {
  stringStream stream;
  stream.print(INTX_FORMAT, val);
  print_attr(name, stream.as_string());
}

void IdealGraphPrinter::print_attr(const char *name, const char *val) {
  _xml->print(" %s='", name);
  text(val);
  _xml->print("'");
}

void IdealGraphPrinter::head(const char *name) {
  _xml->head("%s", name);
}

void IdealGraphPrinter::tail(const char *name) {
  _xml->tail(name);
}

void IdealGraphPrinter::text(const char *s) {
  _xml->text("%s", s);
}

void IdealGraphPrinter::print_prop(const char *name, int val) {

  stringStream stream;
  stream.print("%d", val);
  print_prop(name, stream.as_string());
}

void IdealGraphPrinter::print_prop(const char *name, const char *val) {
  begin_head(PROPERTY_ELEMENT);
  print_attr(PROPERTY_NAME_PROPERTY, name);
  end_head();
  text(val);
  tail(PROPERTY_ELEMENT);
}

void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree) {
  begin_head(METHOD_ELEMENT);

  stringStream str;
  method->print_name(&str);

  stringStream shortStr;
  method->print_short_name(&shortStr);

  print_attr(METHOD_NAME_PROPERTY, str.as_string());
  print_attr(METHOD_SHORT_NAME_PROPERTY, shortStr.as_string());
  print_attr(METHOD_BCI_PROPERTY, bci);

  end_head();

  head(BYTECODES_ELEMENT);
  output()->print_cr("<![CDATA[");
  method->print_codes_on(output());
  output()->print_cr("]]>");
  tail(BYTECODES_ELEMENT);

  head(INLINE_ELEMENT);
  if (tree != NULL) {
    GrowableArray<InlineTree *> subtrees = tree->subtrees();
    for (int i = 0; i < subtrees.length(); i++) {
      print_inline_tree(subtrees.at(i));
    }
  }
  tail(INLINE_ELEMENT);

  tail(METHOD_ELEMENT);
  output()->flush();
}

void IdealGraphPrinter::print_inline_tree(InlineTree *tree) {

  if (tree == NULL) return;

  ciMethod *method = tree->method();
  print_method(tree->method(), tree->caller_bci(), tree);

}

void IdealGraphPrinter::print_inlining(Compile* compile) {

  // Print inline tree
  if (_should_send_method) {
    InlineTree *inlineTree = compile->ilt();
    if (inlineTree != NULL) {
      print_inline_tree(inlineTree);
    } else {
      // print this method only
    }
  }
}

// Has to be called whenever a method is compiled
void IdealGraphPrinter::begin_method(Compile* compile) {

  ciMethod *method = compile->method();
  assert(_output, "output stream must exist!");
  assert(method, "null methods are not allowed!");
  assert(!_current_method, "current method must be null!");

  head(GROUP_ELEMENT);

  head(PROPERTIES_ELEMENT);

  // Print properties
  // Add method name
  stringStream strStream;
  method->print_name(&strStream);
  print_prop(METHOD_NAME_PROPERTY, strStream.as_string());

  if (method->flags().is_public()) {
    print_prop(METHOD_IS_PUBLIC_PROPERTY, TRUE_VALUE);
  }

  if (method->flags().is_static()) {
    print_prop(METHOD_IS_STATIC_PROPERTY, TRUE_VALUE);
  }

  tail(PROPERTIES_ELEMENT);

  if (_stream) {
    char answer = 0;
    _xml->flush();
    int result = _stream->read(&answer, 1);
    _should_send_method = (answer == 'y');
  }

  this->_current_method = method;

  _xml->flush();
}

// Has to be called whenever a method has finished compilation
void IdealGraphPrinter::end_method() {

  nmethod* method = (nmethod*)this->_current_method->code();

  tail(GROUP_ELEMENT);
  _current_method = NULL;
  _xml->flush();
}

// Print indent
void IdealGraphPrinter::print_indent() {
  tty->print_cr("printing ident %d", _depth);
  for (int i = 0; i < _depth; i++) {
    _xml->print("%s", INDENT);
  }
}

bool IdealGraphPrinter::traverse_outs() {
  return _traverse_outs;
}

void IdealGraphPrinter::set_traverse_outs(bool b) {
  _traverse_outs = b;
}

intptr_t IdealGraphPrinter::get_node_id(Node *n) {
  return (intptr_t)(n);
}

void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {

  if (edges) {

    // Output edge
    intptr_t dest_id = get_node_id(n);
    for ( uint i = 0; i < n->len(); i++ ) {
      if ( n->in(i) ) {
        Node *source = n->in(i);
        begin_elem(EDGE_ELEMENT);
        intptr_t source_id = get_node_id(source);
        print_attr(FROM_PROPERTY, source_id);
        print_attr(TO_PROPERTY, dest_id);
        print_attr(INDEX_PROPERTY, i);
        end_elem();
      }
    }

  } else {

    // Output node
    begin_head(NODE_ELEMENT);
    print_attr(NODE_ID_PROPERTY, get_node_id(n));
    end_head();

    head(PROPERTIES_ELEMENT);

    Node *node = n;
#ifndef PRODUCT
    Compile::current()->_in_dump_cnt++;
    print_prop(NODE_NAME_PROPERTY, (const char *)node->Name());
    const Type *t = node->bottom_type();
    print_prop("type", t->msg());
    print_prop("idx", node->_idx);
#ifdef ASSERT
    print_prop("debug_idx", node->_debug_idx);
#endif

    if (C->cfg() != NULL) {
      Block* block = C->cfg()->get_block_for_node(node);
      if (block == NULL) {
        print_prop("block", C->cfg()->get_block(0)->_pre_order);
      } else {
        print_prop("block", block->_pre_order);
      }
    }

    const jushort flags = node->flags();
    if (flags & Node::Flag_is_Copy) {
      print_prop("is_copy", "true");
    }
    if (flags & Node::Flag_rematerialize) {
      print_prop("rematerialize", "true");
    }
    if (flags & Node::Flag_needs_anti_dependence_check) {
      print_prop("needs_anti_dependence_check", "true");
    }
    if (flags & Node::Flag_is_macro) {
      print_prop("is_macro", "true");
    }
    if (flags & Node::Flag_is_Con) {
      print_prop("is_con", "true");
    }
    if (flags & Node::Flag_is_cisc_alternate) {
      print_prop("is_cisc_alternate", "true");
    }
    if (flags & Node::Flag_is_dead_loop_safe) {
      print_prop("is_dead_loop_safe", "true");
    }
    if (flags & Node::Flag_may_be_short_branch) {
      print_prop("may_be_short_branch", "true");
    }
    if (flags & Node::Flag_has_call) {
      print_prop("has_call", "true");
    }

    if (C->matcher() != NULL) {
      if (C->matcher()->is_shared(node)) {
        print_prop("is_shared", "true");
      } else {
        print_prop("is_shared", "false");
      }
      if (C->matcher()->is_dontcare(node)) {
        print_prop("is_dontcare", "true");
      } else {
        print_prop("is_dontcare", "false");
      }

#ifdef ASSERT
      Node* old = C->matcher()->find_old_node(node);
      if (old != NULL) {
        print_prop("old_node_idx", old->_idx);
      }
#endif
    }

    if (node->is_Proj()) {
      print_prop("con", (int)node->as_Proj()->_con);
    }

    if (node->is_Mach()) {
      print_prop("idealOpcode", (const char *)NodeClassNames[node->as_Mach()->ideal_Opcode()]);
    }

    buffer[0] = 0;
    stringStream s2(buffer, sizeof(buffer) - 1);

    node->dump_spec(&s2);
    if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
      const TypeInstPtr  *toop = t->isa_instptr();
      const TypeKlassPtr *tkls = t->isa_klassptr();
      ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
      if( klass && klass->is_loaded() && klass->is_interface() ) {
        s2.print("  Interface:");
      } else if( toop ) {
        s2.print("  Oop:");
      } else if( tkls ) {
        s2.print("  Klass:");
      }
      t->dump_on(&s2);
    } else if( t == Type::MEMORY ) {
      s2.print("  Memory:");
      MemNode::dump_adr_type(node, node->adr_type(), &s2);
    }

    assert(s2.size() < sizeof(buffer), "size in range");
    print_prop("dump_spec", buffer);

    if (node->is_block_proj()) {
      print_prop("is_block_proj", "true");
    }

    if (node->is_block_start()) {
      print_prop("is_block_start", "true");
    }

    const char *short_name = "short_name";
    if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) {
      int index = node->as_Proj()->_con - TypeFunc::Parms;
      if (index >= 10) {
        print_prop(short_name, "PA");
      } else {
        sprintf(buffer, "P%d", index);
        print_prop(short_name, buffer);
      }
    } else if (strcmp(node->Name(), "IfTrue") == 0) {
      print_prop(short_name, "T");
    } else if (strcmp(node->Name(), "IfFalse") == 0) {
      print_prop(short_name, "F");
    } else if ((node->is_Con() && node->is_Type()) || node->is_Proj()) {

      if (t->base() == Type::Int && t->is_int()->is_con()) {
        const TypeInt *typeInt = t->is_int();
        assert(typeInt->is_con(), "must be constant");
        jint value = typeInt->get_con();

        // max. 2 chars allowed
        if (value >= -9 && value <= 99) {
          sprintf(buffer, "%d", value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "I");
        }
      } else if (t == Type::TOP) {
        print_prop(short_name, "^");
      } else if (t->base() == Type::Long && t->is_long()->is_con()) {
        const TypeLong *typeLong = t->is_long();
        assert(typeLong->is_con(), "must be constant");
        jlong value = typeLong->get_con();

        // max. 2 chars allowed
        if (value >= -9 && value <= 99) {
          sprintf(buffer, JLONG_FORMAT, value);
          print_prop(short_name, buffer);
        } else {
          print_prop(short_name, "L");
        }
      } else if (t->base() == Type::KlassPtr) {
        const TypeKlassPtr *typeKlass = t->is_klassptr();
        print_prop(short_name, "CP");
      } else if (t->base() == Type::Control) {
        print_prop(short_name, "C");
      } else if (t->base() == Type::Memory) {
        print_prop(short_name, "M");
      } else if (t->base() == Type::Abio) {
        print_prop(short_name, "IO");
      } else if (t->base() == Type::Return_Address) {
        print_prop(short_name, "RA");
      } else if (t->base() == Type::AnyPtr) {
        print_prop(short_name, "P");
      } else if (t->base() == Type::RawPtr) {
        print_prop(short_name, "RP");
      } else if (t->base() == Type::AryPtr) {
        print_prop(short_name, "AP");
      }
    }

    JVMState* caller = NULL;
    if (node->is_SafePoint()) {
      caller = node->as_SafePoint()->jvms();
    } else {
      Node_Notes* notes = C->node_notes_at(node->_idx);
      if (notes != NULL) {
        caller = notes->jvms();
      }
    }

    if (caller != NULL) {
      stringStream bciStream;
      ciMethod* last = NULL;
      int last_bci;
      while(caller) {
        if (caller->has_method()) {
          last = caller->method();
          last_bci = caller->bci();
        }
        bciStream.print("%d ", caller->bci());
        caller = caller->caller();
      }
      print_prop("bci", bciStream.as_string());
      if (last != NULL && last->has_linenumber_table() && last_bci >= 0) {
        print_prop("line", last->line_number_from_bci(last_bci));
      }
    }

#ifdef ASSERT
    if (node->debug_orig() != NULL) {
      temp_set->Clear();
      stringStream dorigStream;
      Node* dorig = node->debug_orig();
      while (dorig && temp_set->test_set(dorig->_idx)) {
        dorigStream.print("%d ", dorig->_idx);
      }
      print_prop("debug_orig", dorigStream.as_string());
    }
#endif

    if (_chaitin && _chaitin != (PhaseChaitin *)((intptr_t)0xdeadbeef)) {
      buffer[0] = 0;
      _chaitin->dump_register(node, buffer);
      print_prop("reg", buffer);
      uint lrg_id = 0;
      if (node->_idx < _chaitin->_lrg_map.size()) {
        lrg_id = _chaitin->_lrg_map.live_range_id(node);
      }
      print_prop("lrg", lrg_id);
    }

    Compile::current()->_in_dump_cnt--;
#endif

    tail(PROPERTIES_ELEMENT);
    tail(NODE_ELEMENT);
  }
}

void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set) {


  VectorSet visited(Thread::current()->resource_area());
  GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL);
  nodeStack.push(start);
  visited.test_set(start->_idx);
  if (C->cfg() != NULL) {
    // once we have a CFG there are some nodes that aren't really
    // reachable but are in the CFG so add them here.
    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
      Block* block = C->cfg()->get_block(i);
      for (uint s = 0; s < block->number_of_nodes(); s++) {
        nodeStack.push(block->get_node(s));
      }
    }
  }

  while(nodeStack.length() > 0) {

    Node *n = nodeStack.pop();
    visit_node(n, edges, temp_set);

    if (_traverse_outs) {
      for (DUIterator i = n->outs(); n->has_out(i); i++) {
        Node* p = n->out(i);
        if (!visited.test_set(p->_idx)) {
          nodeStack.push(p);
        }
      }
    }

    for ( uint i = 0; i < n->len(); i++ ) {
      if ( n->in(i) ) {
        if (!visited.test_set(n->in(i)->_idx)) {
          nodeStack.push(n->in(i));
        }
      }
    }
  }
}

void IdealGraphPrinter::print_method(Compile* compile, const char *name, int level, bool clear_nodes) {
  print(compile, name, (Node *)compile->root(), level, clear_nodes);
}

// Print current ideal graph
void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, int level, bool clear_nodes) {

  if (!_current_method || !_should_send_method || level > PrintIdealGraphLevel) return;

  this->C = compile;

  // Warning, unsafe cast?
  _chaitin = (PhaseChaitin *)C->regalloc();

  begin_head(GRAPH_ELEMENT);
  print_attr(GRAPH_NAME_PROPERTY, (const char *)name);
  end_head();

  VectorSet temp_set(Thread::current()->resource_area());

  head(NODES_ELEMENT);
  walk_nodes(node, false, &temp_set);
  tail(NODES_ELEMENT);

  head(EDGES_ELEMENT);
  walk_nodes(node, true, &temp_set);
  tail(EDGES_ELEMENT);
  if (C->cfg() != NULL) {
    head(CONTROL_FLOW_ELEMENT);
    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
      Block* block = C->cfg()->get_block(i);
      begin_head(BLOCK_ELEMENT);
      print_attr(BLOCK_NAME_PROPERTY, block->_pre_order);
      end_head();

      head(SUCCESSORS_ELEMENT);
      for (uint s = 0; s < block->_num_succs; s++) {
        begin_elem(SUCCESSOR_ELEMENT);
        print_attr(BLOCK_NAME_PROPERTY, block->_succs[s]->_pre_order);
        end_elem();
      }
      tail(SUCCESSORS_ELEMENT);

      head(NODES_ELEMENT);
      for (uint s = 0; s < block->number_of_nodes(); s++) {
        begin_elem(NODE_ELEMENT);
        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
        end_elem();
      }
      tail(NODES_ELEMENT);

      tail(BLOCK_ELEMENT);
    }
    tail(CONTROL_FLOW_ELEMENT);
  }
  tail(GRAPH_ELEMENT);
  output()->flush();
}

extern const char *NodeClassNames[];

outputStream *IdealGraphPrinter::output() {
  return _xml;
}

#endif
C:\hotspot-69087d08d473\src\share\vm/opto/idealGraphPrinter.hpp
/*
 * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP
#define SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP

#include "libadt/dict.hpp"
#include "libadt/vectset.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
#include "utilities/xmlstream.hpp"

#ifndef PRODUCT

class Compile;
class PhaseIFG;
class PhaseChaitin;
class Matcher;
class Node;
class InlineTree;
class ciMethod;

class IdealGraphPrinter : public CHeapObj<mtCompiler> {
 private:

  static const char *INDENT;
  static const char *TOP_ELEMENT;
  static const char *GROUP_ELEMENT;
  static const char *GRAPH_ELEMENT;
  static const char *PROPERTIES_ELEMENT;
  static const char *EDGES_ELEMENT;
  static const char *PROPERTY_ELEMENT;
  static const char *EDGE_ELEMENT;
  static const char *NODE_ELEMENT;
  static const char *NODES_ELEMENT;
  static const char *CONTROL_FLOW_ELEMENT;
  static const char *REMOVE_EDGE_ELEMENT;
  static const char *REMOVE_NODE_ELEMENT;
  static const char *METHOD_NAME_PROPERTY;
  static const char *BLOCK_NAME_PROPERTY;
  static const char *BLOCK_DOMINATOR_PROPERTY;
  static const char *BLOCK_ELEMENT;
  static const char *SUCCESSORS_ELEMENT;
  static const char *SUCCESSOR_ELEMENT;
  static const char *METHOD_IS_PUBLIC_PROPERTY;
  static const char *METHOD_IS_STATIC_PROPERTY;
  static const char *TRUE_VALUE;
  static const char *NODE_NAME_PROPERTY;
  static const char *EDGE_NAME_PROPERTY;
  static const char *NODE_ID_PROPERTY;
  static const char *FROM_PROPERTY;
  static const char *TO_PROPERTY;
  static const char *PROPERTY_NAME_PROPERTY;
  static const char *GRAPH_NAME_PROPERTY;
  static const char *INDEX_PROPERTY;
  static const char *METHOD_ELEMENT;
  static const char *INLINE_ELEMENT;
  static const char *BYTECODES_ELEMENT;
  static const char *METHOD_BCI_PROPERTY;
  static const char *METHOD_SHORT_NAME_PROPERTY;
  static const char *ASSEMBLY_ELEMENT;

  elapsedTimer _walk_time;
  elapsedTimer _output_time;
  elapsedTimer _build_blocks_time;

  static int _file_count;
  networkStream *_stream;
  xmlStream *_xml;
  outputStream *_output;
  ciMethod *_current_method;
  int _depth;
  char buffer[128];
  bool _should_send_method;
  PhaseChaitin* _chaitin;
  bool _traverse_outs;
  Compile *C;

  static void pre_node(Node* node, void *env);
  static void post_node(Node* node, void *env);

  void print_indent();
  void print_method(ciMethod *method, int bci, InlineTree *tree);
  void print_inline_tree(InlineTree *tree);
  void visit_node(Node *n, bool edges, VectorSet* temp_set);
  void walk_nodes(Node *start, bool edges, VectorSet* temp_set);
  void begin_elem(const char *s);
  void end_elem();
  void begin_head(const char *s);
  void end_head();
  void print_attr(const char *name, const char *val);
  void print_attr(const char *name, intptr_t val);
  void print_prop(const char *name, const char *val);
  void print_prop(const char *name, int val);
  void tail(const char *name);
  void head(const char *name);
  void text(const char *s);
  intptr_t get_node_id(Node *n);
  IdealGraphPrinter();
  ~IdealGraphPrinter();

 public:

  static void clean_up();
  static IdealGraphPrinter *printer();

  bool traverse_outs();
  void set_traverse_outs(bool b);
  outputStream *output();
  void print_inlining(Compile* compile);
  void begin_method(Compile* compile);
  void end_method();
  void print_method(Compile* compile, const char *name, int level=1, bool clear_nodes = false);
  void print(Compile* compile, const char *name, Node *root, int level=1, bool clear_nodes = false);
  void print_xml(const char *name);
};

#endif

#endif // SHARE_VM_OPTO_IDEALGRAPHPRINTER_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/idealKit.cpp
/*
 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/idealKit.hpp"
#include "opto/runtime.hpp"

// Static initialization

// This declares the position where vars are kept in the cvstate
// For some degree of consistency we use the TypeFunc enum to
// soak up spots in the inputs even though we only use early Control
// and Memory slots. (So far.)
const uint IdealKit::first_var = TypeFunc::Parms + 1;

//----------------------------IdealKit-----------------------------------------
IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarations) :
  _gvn(gkit->gvn()), C(gkit->C) {
  _initial_ctrl = gkit->control();
  _initial_memory = gkit->merged_memory();
  _initial_i_o = gkit->i_o();
  _delay_all_transforms = delay_all_transforms;
  _var_ct = 0;
  _cvstate = NULL;
  // We can go memory state free or else we need the entire memory state
  assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
  assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
  int init_size = 5;
  _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
  DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
  if (!has_declarations) {
     declarations_done();
  }
}

//----------------------------sync_kit-----------------------------------------
void IdealKit::sync_kit(GraphKit* gkit) {
  set_all_memory(gkit->merged_memory());
  set_i_o(gkit->i_o());
  set_ctrl(gkit->control());
}

//-------------------------------if_then-------------------------------------
// Create:  if(left relop right)
//          /  \
//   iffalse    iftrue
// Push the iffalse cvstate onto the stack. The iftrue becomes the current cvstate.
void IdealKit::if_then(Node* left, BoolTest::mask relop,
                       Node* right, float prob, float cnt, bool push_new_state) {
  assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new If");
  Node* bol;
  if (left->bottom_type()->isa_ptr() == NULL) {
    if (left->bottom_type()->isa_int() != NULL) {
      bol = Bool(CmpI(left, right), relop);
    } else {
      assert(left->bottom_type()->isa_long() != NULL, "what else?");
      bol = Bool(CmpL(left, right), relop);
    }

  } else {
    bol = Bool(CmpP(left, right), relop);
  }
  // Delay gvn.tranform on if-nodes until construction is finished
  // to prevent a constant bool input from discarding a control output.
  IfNode* iff = delay_transform(new (C) IfNode(ctrl(), bol, prob, cnt))->as_If();
  Node* then  = IfTrue(iff);
  Node* elsen = IfFalse(iff);
  Node* else_cvstate = copy_cvstate();
  else_cvstate->set_req(TypeFunc::Control, elsen);
  _pending_cvstates->push(else_cvstate);
  DEBUG_ONLY(if (push_new_state) _state->push(IfThenS));
  set_ctrl(then);
}

//-------------------------------else_-------------------------------------
// Pop the else cvstate off the stack, and push the (current) then cvstate.
// The else cvstate becomes the current cvstate.
void IdealKit::else_() {
  assert(state() == IfThenS, "bad state for new Else");
  Node* else_cvstate = _pending_cvstates->pop();
  DEBUG_ONLY(_state->pop());
  // save current (then) cvstate for later use at endif
  _pending_cvstates->push(_cvstate);
  DEBUG_ONLY(_state->push(ElseS));
  _cvstate = else_cvstate;
}

//-------------------------------end_if-------------------------------------
// Merge the "then" and "else" cvstates.
//
// The if_then() pushed a copy of the current state for later use
// as the initial state for a future "else" clause.  The
// current state then became the initial state for the
// then clause.  If an "else" clause was encountered, it will
// pop the top state and use it for it's initial state.
// It will also push the current state (the state at the end of
// the "then" clause) for latter use at the end_if.
//
// At the endif, the states are:
// 1) else exists a) current state is end of "else" clause
//                b) top stack state is end of "then" clause
//
// 2) no else:    a) current state is end of "then" clause
//                b) top stack state is from the "if_then" which
//                   would have been the initial state of the else.
//
// Merging the states is accomplished by:
//   1) make a label for the merge
//   2) terminate the current state with a goto to the label
//   3) pop the top state from the stack and make it the
//        current state
//   4) bind the label at the current state.  Binding a label
//        terminates the current state with a goto to the
//        label and makes the label's state the current state.
//
void IdealKit::end_if() {
  assert(state() & (IfThenS|ElseS), "bad state for new Endif");
  Node* lab = make_label(1);

  // Node* join_state = _pending_cvstates->pop();
                  /* merging, join */
  goto_(lab);
  _cvstate = _pending_cvstates->pop();

  bind(lab);
  DEBUG_ONLY(_state->pop());
}

//-------------------------------loop-------------------------------------
// Create the loop head portion (*) of:
//  *     iv = init
//  *  top: (region node)
//  *     if (iv relop limit) {
//           loop body
//           i = i + 1
//           goto top
//  *     } else // exits loop
//
// Pushes the loop top cvstate first, then the else (loop exit) cvstate
// onto the stack.
void IdealKit::loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) {
  assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new loop");
  if (UseLoopPredicate) {
    // Sync IdealKit and graphKit.
    gkit->sync_kit(*this);
    // Add loop predicate.
    gkit->add_predicate(nargs);
    // Update IdealKit memory.
    sync_kit(gkit);
  }
  set(iv, init);
  Node* head = make_label(1);
  bind(head);
  _pending_cvstates->push(head); // push for use at end_loop
  _cvstate = copy_cvstate();
  if_then(value(iv), relop, limit, prob, cnt, false /* no new state */);
  DEBUG_ONLY(_state->push(LoopS));
  assert(ctrl()->is_IfTrue(), "true branch stays in loop");
  assert(_pending_cvstates->top()->in(TypeFunc::Control)->is_IfFalse(), "false branch exits loop");
}

//-------------------------------end_loop-------------------------------------
// Creates the goto top label.
// Expects the else (loop exit) cvstate to be on top of the
// stack, and the loop top cvstate to be 2nd.
void IdealKit::end_loop() {
  assert((state() == LoopS), "bad state for new end_loop");
  Node* exit = _pending_cvstates->pop();
  Node* head = _pending_cvstates->pop();
  goto_(head);
  clear(head);
  DEBUG_ONLY(_state->pop());
  _cvstate = exit;
}

//-------------------------------make_label-------------------------------------
// Creates a label.  The number of goto's
// must be specified (which should be 1 less than
// the number of precedessors.)
Node* IdealKit::make_label(int goto_ct) {
  assert(_cvstate != NULL, "must declare variables before labels");
  Node* lab = new_cvstate();
  int sz = 1 + goto_ct + 1 /* fall thru */;
  Node* reg = delay_transform(new (C) RegionNode(sz));
  lab->init_req(TypeFunc::Control, reg);
  return lab;
}

//-------------------------------bind-------------------------------------
// Bind a label at the current cvstate by simulating
// a goto to the label.
void IdealKit::bind(Node* lab) {
  goto_(lab, true /* bind */);
  _cvstate = lab;
}

//-------------------------------goto_-------------------------------------
// Make the current cvstate a predecessor of the label,
// creating phi's to merge values.  If bind is true and
// this is not the last control edge, then ensure that
// all live values have phis created. Used to create phis
// at loop-top regions.
void IdealKit::goto_(Node* lab, bool bind) {
  Node* reg = lab->in(TypeFunc::Control);
  // find next empty slot in region
  uint slot = 1;
  while (slot < reg->req() && reg->in(slot) != NULL) slot++;
  assert(slot < reg->req(), "too many gotos");
  // If this is last predecessor, then don't force phi creation
  if (slot == reg->req() - 1) bind = false;
  reg->init_req(slot, ctrl());
  assert(first_var + _var_ct == _cvstate->req(), "bad _cvstate size");
  for (uint i = first_var; i < _cvstate->req(); i++) {

    // l is the value of var reaching the label. Could be a single value
    // reaching the label, or a phi that merges multiples values reaching
    // the label.  The latter is true if the label's input: in(..) is
    // a phi whose control input is the region node for the label.

    Node* l = lab->in(i);
    // Get the current value of the var
    Node* m = _cvstate->in(i);
    // If the var went unused no need for a phi
    if (m == NULL) {
      continue;
    } else if (l == NULL || m == l) {
      // Only one unique value "m" is known to reach this label so a phi
      // is not yet necessary unless:
      //    the label is being bound and all predecessors have not been seen,
      //    in which case "bind" will be true.
      if (bind) {
        m = promote_to_phi(m, reg);
      }
      // Record the phi/value used for this var in the label's cvstate
      lab->set_req(i, m);
    } else {
      // More than one value for the variable reaches this label so
      // a create a phi if one does not already exist.
      if (!was_promoted_to_phi(l, reg)) {
        l = promote_to_phi(l, reg);
        lab->set_req(i, l);
      }
      // Record in the phi, the var's value from the current state
      l->set_req(slot, m);
    }
  }
  do_memory_merge(_cvstate, lab);
  stop();
}

//-----------------------------promote_to_phi-----------------------------------
Node* IdealKit::promote_to_phi(Node* n, Node* reg) {
  assert(!was_promoted_to_phi(n, reg), "n already promoted to phi on this region");
  // Get a conservative type for the phi
  const BasicType bt = n->bottom_type()->basic_type();
  const Type* ct = Type::get_const_basic_type(bt);
  return delay_transform(PhiNode::make(reg, n, ct));
}

//-----------------------------declarations_done-------------------------------
void IdealKit::declarations_done() {
  _cvstate = new_cvstate();   // initialize current cvstate
  set_ctrl(_initial_ctrl);    // initialize control in current cvstate
  set_all_memory(_initial_memory);// initialize memory in current cvstate
  set_i_o(_initial_i_o);      // initialize i_o in current cvstate
  DEBUG_ONLY(_state->push(BlockS));
}

//-----------------------------transform-----------------------------------
Node* IdealKit::transform(Node* n) {
  if (_delay_all_transforms) {
    return delay_transform(n);
  } else {
    n = gvn().transform(n);
    C->record_for_igvn(n);
    return n;
  }
}

//-----------------------------delay_transform-----------------------------------
Node* IdealKit::delay_transform(Node* n) {
  // Delay transform until IterativeGVN
  gvn().set_type(n, n->bottom_type());
  C->record_for_igvn(n);
  return n;
}

//-----------------------------new_cvstate-----------------------------------
Node* IdealKit::new_cvstate() {
  uint sz = _var_ct + first_var;
  return new (C) Node(sz);
}

//-----------------------------copy_cvstate-----------------------------------
Node* IdealKit::copy_cvstate() {
  Node* ns = new_cvstate();
  for (uint i = 0; i < ns->req(); i++) ns->init_req(i, _cvstate->in(i));
  // We must clone memory since it will be updated as we do stores.
  ns->set_req(TypeFunc::Memory, MergeMemNode::make(C, ns->in(TypeFunc::Memory)));
  return ns;
}

//-----------------------------clear-----------------------------------
void IdealKit::clear(Node* m) {
  for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
}

//-----------------------------IdealVariable----------------------------
IdealVariable::IdealVariable(IdealKit &k) {
  k.declare(this);
}

Node* IdealKit::memory(uint alias_idx) {
  MergeMemNode* mem = merged_memory();
  Node* p = mem->memory_at(alias_idx);
  _gvn.set_type(p, Type::MEMORY);  // must be mapped
  return p;
}

void IdealKit::set_memory(Node* mem, uint alias_idx) {
  merged_memory()->set_memory_at(alias_idx, mem);
}

//----------------------------- make_load ----------------------------
Node* IdealKit::load(Node* ctl,
                     Node* adr,
                     const Type* t,
                     BasicType bt,
                     int adr_idx,
                     bool require_atomic_access) {

  assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
  const TypePtr* adr_type = NULL; // debug-mode-only argument
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node* mem = memory(adr_idx);
  Node* ld;
  if (require_atomic_access && bt == T_LONG) {
    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, MemNode::unordered);
  } else {
    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, MemNode::unordered);
  }
  return transform(ld);
}

Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
                      int adr_idx,
                      MemNode::MemOrd mo, bool require_atomic_access,
                      bool mismatched) {
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
  const TypePtr* adr_type = NULL;
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node *mem = memory(adr_idx);
  Node* st;
  if (require_atomic_access && bt == T_LONG) {
    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
  } else {
    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
  }
  if (mismatched) {
    st->as_Store()->set_mismatched_access();
  }
  st = transform(st);
  set_memory(st, adr_idx);

  return st;
}

// Card mark store. Must be ordered so that it will come after the store of
// the oop.
Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store, int oop_adr_idx,
                        BasicType bt,
                        int adr_idx) {
  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
  const TypePtr* adr_type = NULL;
  debug_only(adr_type = C->get_adr_type(adr_idx));
  Node *mem = memory(adr_idx);

  // Add required edge to oop_store, optimizer does not support precedence edges.
  // Convert required edge to precedence edge before allocation.
  Node* st = new (C) StoreCMNode(ctl, mem, adr, adr_type, val, oop_store, oop_adr_idx);

  st = transform(st);
  set_memory(st, adr_idx);

  return st;
}

//---------------------------- do_memory_merge --------------------------------
// The memory from one merging cvstate needs to be merged with the memory for another
// join cvstate. If the join cvstate doesn't have a merged memory yet then we
// can just copy the state from the merging cvstate

// Merge one slow path into the rest of memory.
void IdealKit::do_memory_merge(Node* merging, Node* join) {

  // Get the region for the join state
  Node* join_region = join->in(TypeFunc::Control);
  assert(join_region != NULL, "join region must exist");
  if (join->in(TypeFunc::I_O) == NULL ) {
    join->set_req(TypeFunc::I_O,  merging->in(TypeFunc::I_O));
  }
  if (join->in(TypeFunc::Memory) == NULL ) {
    join->set_req(TypeFunc::Memory,  merging->in(TypeFunc::Memory));
    return;
  }

  // The control flow for merging must have already been attached to the join region
  // we need its index for the phis.
  uint slot;
  for (slot = 1; slot < join_region->req() ; slot ++ ) {
    if (join_region->in(slot) == merging->in(TypeFunc::Control)) break;
  }
  assert(slot !=  join_region->req(), "edge must already exist");

  MergeMemNode* join_m    = join->in(TypeFunc::Memory)->as_MergeMem();
  MergeMemNode* merging_m = merging->in(TypeFunc::Memory)->as_MergeMem();

  // join_m should be an ancestor mergemem of merging
  // Slow path memory comes from the current map (which is from a slow call)
  // Fast path/null path memory comes from the call's input

  // Merge the other fast-memory inputs with the new slow-default memory.
  // for (MergeMemStream mms(merged_memory(), fast_mem->as_MergeMem()); mms.next_non_empty2(); ) {
  for (MergeMemStream mms(join_m, merging_m); mms.next_non_empty2(); ) {
    Node* join_slice = mms.force_memory();
    Node* merging_slice = mms.memory2();
    if (join_slice != merging_slice) {
      PhiNode* phi;
      // bool new_phi = false;
      // Is the phi for this slice one that we created for this join region or simply
      // one we copied? If it is ours then add
      if (join_slice->is_Phi() && join_slice->as_Phi()->region() == join_region) {
        phi = join_slice->as_Phi();
      } else {
        // create the phi with join_slice filling supplying memory for all of the
        // control edges to the join region
        phi = PhiNode::make(join_region, join_slice, Type::MEMORY, mms.adr_type(C));
        phi = (PhiNode*) delay_transform(phi);
        // gvn().set_type(phi, Type::MEMORY);
        // new_phi = true;
      }
      // Now update the phi with the slice for the merging slice
      phi->set_req(slot, merging_slice/* slow_path, slow_slice */);
      // this updates join_m with the phi
      mms.set_memory(phi);
    }
  }

  Node* join_io    = join->in(TypeFunc::I_O);
  Node* merging_io = merging->in(TypeFunc::I_O);
  if (join_io != merging_io) {
    PhiNode* phi;
    if (join_io->is_Phi() && join_io->as_Phi()->region() == join_region) {
      phi = join_io->as_Phi();
    } else {
      phi = PhiNode::make(join_region, join_io, Type::ABIO);
      phi = (PhiNode*) delay_transform(phi);
      join->set_req(TypeFunc::I_O, phi);
    }
    phi->set_req(slot, merging_io);
  }
}


//----------------------------- make_call  ----------------------------
// Trivial runtime call
void IdealKit::make_leaf_call(const TypeFunc *slow_call_type,
                              address slow_call,
                              const char *leaf_name,
                              Node* parm0,
                              Node* parm1,
                              Node* parm2,
                              Node* parm3) {

  // We only handle taking in RawMem and modifying RawMem
  const TypePtr* adr_type = TypeRawPtr::BOTTOM;
  uint adr_idx = C->get_alias_index(adr_type);

  // Slow-path leaf call
  CallNode *call =  (CallNode*)new (C) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);

  // Set fixed predefined input arguments
  call->init_req( TypeFunc::Control, ctrl() );
  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  // Narrow memory as only memory input
  call->init_req( TypeFunc::Memory , memory(adr_idx));
  call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
  call->init_req( TypeFunc::ReturnAdr, top() );

  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
  if (parm2 != NULL)  call->init_req(TypeFunc::Parms+2, parm2);
  if (parm3 != NULL)  call->init_req(TypeFunc::Parms+3, parm3);

  // Node *c = _gvn.transform(call);
  call = (CallNode *) _gvn.transform(call);
  Node *c = call; // dbx gets confused with call call->dump()

  // Slow leaf call has no side-effects, sets few values

  set_ctrl(transform( new (C) ProjNode(call,TypeFunc::Control) ));

  // Make memory for the call
  Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );

  // Set the RawPtr memory state only.
  set_memory(mem, adr_idx);

  assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
         "call node must be constructed correctly");
}


void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type,
                              address slow_call,
                              const char *leaf_name,
                              const TypePtr* adr_type,
                              Node* parm0,
                              Node* parm1,
                              Node* parm2,
                              Node* parm3) {

  // We only handle taking in RawMem and modifying RawMem
  uint adr_idx = C->get_alias_index(adr_type);

  // Slow-path leaf call
  CallNode *call =  (CallNode*)new (C) CallLeafNoFPNode( slow_call_type, slow_call, leaf_name, adr_type);

  // Set fixed predefined input arguments
  call->init_req( TypeFunc::Control, ctrl() );
  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  // Narrow memory as only memory input
  call->init_req( TypeFunc::Memory , memory(adr_idx));
  call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ );
  call->init_req( TypeFunc::ReturnAdr, top() );

  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
  if (parm2 != NULL)  call->init_req(TypeFunc::Parms+2, parm2);
  if (parm3 != NULL)  call->init_req(TypeFunc::Parms+3, parm3);

  // Node *c = _gvn.transform(call);
  call = (CallNode *) _gvn.transform(call);
  Node *c = call; // dbx gets confused with call call->dump()

  // Slow leaf call has no side-effects, sets few values

  set_ctrl(transform( new (C) ProjNode(call,TypeFunc::Control) ));

  // Make memory for the call
  Node* mem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory) );

  // Set the RawPtr memory state only.
  set_memory(mem, adr_idx);

  assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type),
         "call node must be constructed correctly");
}
C:\hotspot-69087d08d473\src\share\vm/opto/idealKit.hpp
/*
 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_IDEALKIT_HPP
#define SHARE_VM_OPTO_IDEALKIT_HPP

#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"

//-----------------------------------------------------------------------------
//----------------------------IdealKit-----------------------------------------
// Set of utilities for creating control flow and scalar SSA data flow.
// Control:
//    if_then(left, relop, right)
//    else_ (optional)
//    end_if
//    loop(iv variable, initial, relop, limit)
//       - sets iv to initial for first trip
//       - exits when relation on limit is true
//       - the values of initial and limit should be loop invariant
//       - no increment, must be explicitly coded
//       - final value of iv is available after end_loop (until dead())
//    end_loop
//    make_label(number of gotos)
//    goto_(label)
//    bind(label)
// Data:
//    ConI(integer constant)     - create an integer constant
//    set(variable, value)       - assignment
//    value(variable)            - reference value
//    dead(variable)             - variable's value is no longer live
//    increment(variable, value) - increment variable by value
//    simple operations: AddI, SubI, AndI, LShiftI, etc.
// Example:
//    Node* limit = ??
//    IdealVariable i(kit), j(kit);
//    declarations_done();
//    Node* exit = make_label(1); // 1 goto
//    set(j, ConI(0));
//    loop(i, ConI(0), BoolTest::lt, limit); {
//       if_then(value(i), BoolTest::gt, ConI(5)) {
//         set(j, ConI(1));
//         goto_(exit); dead(i);
//       } end_if();
//       increment(i, ConI(1));
//    } end_loop(); dead(i);
//    bind(exit);
//
// See string_indexOf for a more complete example.

class IdealKit;

// Variable definition for IdealKit
class IdealVariable: public StackObj {
 friend class IdealKit;
 private:
  int _id;
  void set_id(int id) { _id = id; }
 public:
  IdealVariable(IdealKit &k);
  int id() { assert(has_id(),"uninitialized id"); return _id; }
  bool has_id() { return _id >= 0; }
};

class IdealKit: public StackObj {
 friend class IdealVariable;
  // The main state (called a cvstate for Control and Variables)
  // contains both the current values of the variables and the
  // current set of predecessor control edges.  The variable values
  // are managed via a Node [in(1)..in(_var_ct)], and the predecessor
  // control edges managed via a RegionNode. The in(0) of the Node
  // for variables points to the RegionNode for the control edges.
 protected:
  Compile * const C;
  PhaseGVN &_gvn;
  GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
  Node* _cvstate;                          // current cvstate (control, memory and variables)
  uint _var_ct;                            // number of variables
  bool _delay_all_transforms;              // flag forcing all transforms to be delayed
  Node* _initial_ctrl;                     // saves initial control until variables declared
  Node* _initial_memory;                   // saves initial memory  until variables declared
  Node* _initial_i_o;                      // saves initial i_o  until variables declared

  PhaseGVN& gvn() const { return _gvn; }
  // Create a new cvstate filled with nulls
  Node* new_cvstate();                     // Create a new cvstate
  Node* cvstate() { return _cvstate; }     // current cvstate
  Node* copy_cvstate();                    // copy current cvstate

  void set_memory(Node* mem, uint alias_idx );
  void do_memory_merge(Node* merging, Node* join);
  void clear(Node* m);                     // clear a cvstate
  void stop() { clear(_cvstate); }         // clear current cvstate
  Node* delay_transform(Node* n);
  Node* transform(Node* n);                // gvn.transform or skip it
  Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
  bool was_promoted_to_phi(Node* n, Node* reg) {
    return (n->is_Phi() && n->in(0) == reg);
  }
  void declare(IdealVariable* v) { v->set_id(_var_ct++); }
  // This declares the position where vars are kept in the cvstate
  // For some degree of consistency we use the TypeFunc enum to
  // soak up spots in the inputs even though we only use early Control
  // and Memory slots. (So far.)
  static const uint first_var; // = TypeFunc::Parms + 1;

#ifdef ASSERT
  enum State { NullS=0, BlockS=1, LoopS=2, IfThenS=4, ElseS=8, EndifS= 16 };
  GrowableArray<int>* _state;
  State state() { return (State)(_state->top()); }
#endif

  // Users should not care about slices only MergedMem so no access for them.
  Node* memory(uint alias_idx);

 public:
  IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false);
  ~IdealKit() {
    stop();
  }
  void sync_kit(GraphKit* gkit);

  // Control
  Node* ctrl()                          { return _cvstate->in(TypeFunc::Control); }
  void set_ctrl(Node* ctrl)             { _cvstate->set_req(TypeFunc::Control, ctrl); }
  Node* top()                           { return C->top(); }
  MergeMemNode* merged_memory()         { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
  void set_all_memory(Node* mem)        { _cvstate->set_req(TypeFunc::Memory, mem); }
  Node* i_o()                           { return _cvstate->in(TypeFunc::I_O); }
  void set_i_o(Node* c)                 { _cvstate->set_req(TypeFunc::I_O, c); }
  void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
  Node* value(IdealVariable& v)         { return _cvstate->in(first_var + v.id()); }
  void dead(IdealVariable& v)           { set(v, (Node*)NULL); }
  void if_then(Node* left, BoolTest::mask relop, Node* right,
               float prob = PROB_FAIR, float cnt = COUNT_UNKNOWN,
               bool push_new_state = true);
  void else_();
  void end_if();
  void loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit,
            float prob = PROB_LIKELY(0.9), float cnt = COUNT_UNKNOWN);
  void end_loop();
  Node* make_label(int goto_ct);
  void bind(Node* lab);
  void goto_(Node* lab, bool bind = false);
  void declarations_done();

  Node* IfTrue(IfNode* iff)  { return transform(new (C) IfTrueNode(iff)); }
  Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); }

  // Data
  Node* ConI(jint k) { return (Node*)gvn().intcon(k); }
  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }

  Node* AddI(Node* l, Node* r) { return transform(new (C) AddINode(l, r)); }
  Node* SubI(Node* l, Node* r) { return transform(new (C) SubINode(l, r)); }
  Node* AndI(Node* l, Node* r) { return transform(new (C) AndINode(l, r)); }
  Node* MaxI(Node* l, Node* r) { return transform(new (C) MaxINode(l, r)); }
  Node* LShiftI(Node* l, Node* r) { return transform(new (C) LShiftINode(l, r)); }
  Node* CmpI(Node* l, Node* r) { return transform(new (C) CmpINode(l, r)); }
  Node* Bool(Node* cmp, BoolTest::mask relop) { return transform(new (C) BoolNode(cmp, relop)); }
  void  increment(IdealVariable& v, Node* j)  { set(v, AddI(value(v), j)); }
  void  decrement(IdealVariable& v, Node* j)  { set(v, SubI(value(v), j)); }

  Node* CmpL(Node* l, Node* r) { return transform(new (C) CmpLNode(l, r)); }

  // TLS
  Node* thread()  {  return gvn().transform(new (C) ThreadLocalNode()); }

  // Pointers

  // Raw address should be transformed regardless 'delay_transform' flag
  // to produce canonical form CastX2P(offset).
  Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new (C) AddPNode(base, ptr, off)); }

  Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); }
#ifdef _LP64
  Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); }
#else // _LP64
  Node* XorX(Node* l, Node* r) { return transform(new (C) XorINode(l, r)); }
#endif // _LP64
  Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); }
  Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
  Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); }

  // Memory operations

  // This is the base version which is given an alias index.
  Node* load(Node* ctl,
             Node* adr,
             const Type* t,
             BasicType bt,
             int adr_idx,
             bool require_atomic_access = false);

  // Return the new StoreXNode
  Node* store(Node* ctl,
              Node* adr,
              Node* val,
              BasicType bt,
              int adr_idx,
              MemNode::MemOrd mo,
              bool require_atomic_access = false,
              bool mismatched = false
              );

  // Store a card mark ordered after store_oop
  Node* storeCM(Node* ctl,
                Node* adr,
                Node* val,
                Node* oop_store,
                int oop_adr_idx,
                BasicType bt,
                int adr_idx);

  // Trivial call
  void make_leaf_call(const TypeFunc *slow_call_type,
                      address slow_call,
                      const char *leaf_name,
                      Node* parm0,
                      Node* parm1 = NULL,
                      Node* parm2 = NULL,
                      Node* parm3 = NULL);

  void make_leaf_call_no_fp(const TypeFunc *slow_call_type,
                            address slow_call,
                            const char *leaf_name,
                            const TypePtr* adr_type,
                            Node* parm0,
                            Node* parm1,
                            Node* parm2,
                            Node* parm3);

};

#endif // SHARE_VM_OPTO_IDEALKIT_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/ifg.cpp
/*
 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "compiler/oopMap.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/coalesce.hpp"
#include "opto/connode.hpp"
#include "opto/indexSet.hpp"
#include "opto/machnode.hpp"
#include "opto/memnode.hpp"
#include "opto/opcodes.hpp"

PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
}

void PhaseIFG::init( uint maxlrg ) {
  _maxlrg = maxlrg;
  _yanked = new (_arena) VectorSet(_arena);
  _is_square = false;
  // Make uninitialized adjacency lists
  _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg);
  // Also make empty live range structures
  _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) );
  memset(_lrgs,0,sizeof(LRG)*maxlrg);
  // Init all to empty
  for( uint i = 0; i < maxlrg; i++ ) {
    _adjs[i].initialize(maxlrg);
    _lrgs[i].Set_All();
  }
}

// Add edge between vertices a & b.  These are sorted (triangular matrix),
// then the smaller number is inserted in the larger numbered array.
int PhaseIFG::add_edge( uint a, uint b ) {
  lrgs(a).invalid_degree();
  lrgs(b).invalid_degree();
  // Sort a and b, so that a is bigger
  assert( !_is_square, "only on triangular" );
  if( a < b ) { uint tmp = a; a = b; b = tmp; }
  return _adjs[a].insert( b );
}

// Add an edge between 'a' and everything in the vector.
void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
  // IFG is triangular, so do the inserts where 'a' < 'b'.
  assert( !_is_square, "only on triangular" );
  IndexSet *adjs_a = &_adjs[a];
  if( !vec->count() ) return;

  IndexSetIterator elements(vec);
  uint neighbor;
  while ((neighbor = elements.next()) != 0) {
    add_edge( a, neighbor );
  }
}

// Is there an edge between a and b?
int PhaseIFG::test_edge( uint a, uint b ) const {
  // Sort a and b, so that a is larger
  assert( !_is_square, "only on triangular" );
  if( a < b ) { uint tmp = a; a = b; b = tmp; }
  return _adjs[a].member(b);
}

// Convert triangular matrix to square matrix
void PhaseIFG::SquareUp() {
  assert( !_is_square, "only on triangular" );

  // Simple transpose
  for( uint i = 0; i < _maxlrg; i++ ) {
    IndexSetIterator elements(&_adjs[i]);
    uint datum;
    while ((datum = elements.next()) != 0) {
      _adjs[datum].insert( i );
    }
  }
  _is_square = true;
}

// Compute effective degree in bulk
void PhaseIFG::Compute_Effective_Degree() {
  assert( _is_square, "only on square" );

  for( uint i = 0; i < _maxlrg; i++ )
    lrgs(i).set_degree(effective_degree(i));
}

int PhaseIFG::test_edge_sq( uint a, uint b ) const {
  assert( _is_square, "only on square" );
  // Swap, so that 'a' has the lesser count.  Then binary search is on
  // the smaller of a's list and b's list.
  if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; }
  //return _adjs[a].unordered_member(b);
  return _adjs[a].member(b);
}

// Union edges of B into A
void PhaseIFG::Union( uint a, uint b ) {
  assert( _is_square, "only on square" );
  IndexSet *A = &_adjs[a];
  IndexSetIterator b_elements(&_adjs[b]);
  uint datum;
  while ((datum = b_elements.next()) != 0) {
    if(A->insert(datum)) {
      _adjs[datum].insert(a);
      lrgs(a).invalid_degree();
      lrgs(datum).invalid_degree();
    }
  }
}

// Yank a Node and all connected edges from the IFG.  Return a
// list of neighbors (edges) yanked.
IndexSet *PhaseIFG::remove_node( uint a ) {
  assert( _is_square, "only on square" );
  assert( !_yanked->test(a), "" );
  _yanked->set(a);

  // I remove the LRG from all neighbors.
  IndexSetIterator elements(&_adjs[a]);
  LRG &lrg_a = lrgs(a);
  uint datum;
  while ((datum = elements.next()) != 0) {
    _adjs[datum].remove(a);
    lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) );
  }
  return neighbors(a);
}

// Re-insert a yanked Node.
void PhaseIFG::re_insert( uint a ) {
  assert( _is_square, "only on square" );
  assert( _yanked->test(a), "" );
  (*_yanked) >>= a;

  IndexSetIterator elements(&_adjs[a]);
  uint datum;
  while ((datum = elements.next()) != 0) {
    _adjs[datum].insert(a);
    lrgs(datum).invalid_degree();
  }
}

// Compute the degree between 2 live ranges.  If both live ranges are
// aligned-adjacent powers-of-2 then we use the MAX size.  If either is
// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
// MULTIPLY the sizes.  Inspect Brigg's thesis on register pairs to see why
// this is so.
int LRG::compute_degree( LRG &l ) const {
  int tmp;
  int num_regs = _num_regs;
  int nregs = l.num_regs();
  tmp =  (_fat_proj || l._fat_proj)     // either is a fat-proj?
    ? (num_regs * nregs)                // then use product
    : MAX2(num_regs,nregs);             // else use max
  return tmp;
}

// Compute effective degree for this live range.  If both live ranges are
// aligned-adjacent powers-of-2 then we use the MAX size.  If either is
// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
// MULTIPLY the sizes.  Inspect Brigg's thesis on register pairs to see why
// this is so.
int PhaseIFG::effective_degree( uint lidx ) const {
  int eff = 0;
  int num_regs = lrgs(lidx).num_regs();
  int fat_proj = lrgs(lidx)._fat_proj;
  IndexSet *s = neighbors(lidx);
  IndexSetIterator elements(s);
  uint nidx;
  while((nidx = elements.next()) != 0) {
    LRG &lrgn = lrgs(nidx);
    int nregs = lrgn.num_regs();
    eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj?
      ? (num_regs * nregs)              // then use product
      : MAX2(num_regs,nregs);           // else use max
  }
  return eff;
}


#ifndef PRODUCT
void PhaseIFG::dump() const {
  tty->print_cr("-- Interference Graph --%s--",
                _is_square ? "square" : "triangular" );
  if( _is_square ) {
    for( uint i = 0; i < _maxlrg; i++ ) {
      tty->print( (*_yanked)[i] ? "XX " : "  ");
      tty->print("L%d: { ",i);
      IndexSetIterator elements(&_adjs[i]);
      uint datum;
      while ((datum = elements.next()) != 0) {
        tty->print("L%d ", datum);
      }
      tty->print_cr("}");

    }
    return;
  }

  // Triangular
  for( uint i = 0; i < _maxlrg; i++ ) {
    uint j;
    tty->print( (*_yanked)[i] ? "XX " : "  ");
    tty->print("L%d: { ",i);
    for( j = _maxlrg; j > i; j-- )
      if( test_edge(j - 1,i) ) {
        tty->print("L%d ",j - 1);
      }
    tty->print("| ");
    IndexSetIterator elements(&_adjs[i]);
    uint datum;
    while ((datum = elements.next()) != 0) {
      tty->print("L%d ", datum);
    }
    tty->print("}\n");
  }
  tty->print("\n");
}

void PhaseIFG::stats() const {
  ResourceMark rm;
  int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
  memset( h_cnt, 0, sizeof(int)*_maxlrg*2 );
  uint i;
  for( i = 0; i < _maxlrg; i++ ) {
    h_cnt[neighbor_cnt(i)]++;
  }
  tty->print_cr("--Histogram of counts--");
  for( i = 0; i < _maxlrg*2; i++ )
    if( h_cnt[i] )
      tty->print("%d/%d ",i,h_cnt[i]);
  tty->cr();
}

void PhaseIFG::verify( const PhaseChaitin *pc ) const {
  // IFG is square, sorted and no need for Find
  for( uint i = 0; i < _maxlrg; i++ ) {
    assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" );
    IndexSet *set = &_adjs[i];
    IndexSetIterator elements(set);
    uint idx;
    uint last = 0;
    while ((idx = elements.next()) != 0) {
      assert(idx != i, "Must have empty diagonal");
      assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find");
      assert(_adjs[idx].member(i), "IFG not square");
      assert(!(*_yanked)[idx], "No yanked neighbors");
      assert(last < idx, "not sorted increasing");
      last = idx;
    }
    assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong");
  }
}
#endif

// Interfere this register with everything currently live.  Use the RegMasks
// to trim the set of possible interferences. Return a count of register-only
// interferences as an estimate of register pressure.
void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
  uint retval = 0;
  // Interfere with everything live.
  const RegMask &rm = lrgs(r).mask();
  // Check for interference by checking overlap of regmasks.
  // Only interfere if acceptable register masks overlap.
  IndexSetIterator elements(liveout);
  uint l;
  while( (l = elements.next()) != 0 )
    if( rm.overlap( lrgs(l).mask() ) )
      _ifg->add_edge( r, l );
}

// Actually build the interference graph.  Uses virtual registers only, no
// physical register masks.  This allows me to be very aggressive when
// coalescing copies.  Some of this aggressiveness will have to be undone
// later, but I'd rather get all the copies I can now (since unremoved copies
// at this point can end up in bad places).  Copies I re-insert later I have
// more opportunity to insert them in low-frequency locations.
void PhaseChaitin::build_ifg_virtual( ) {

  // For all blocks (in any order) do...
  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
    Block* block = _cfg.get_block(i);
    IndexSet* liveout = _live->live(block);

    // The IFG is built by a single reverse pass over each basic block.
    // Starting with the known live-out set, we remove things that get
    // defined and add things that become live (essentially executing one
    // pass of a standard LIVE analysis). Just before a Node defines a value
    // (and removes it from the live-ness set) that value is certainly live.
    // The defined value interferes with everything currently live.  The
    // value is then removed from the live-ness set and it's inputs are
    // added to the live-ness set.
    for (uint j = block->end_idx() + 1; j > 1; j--) {
      Node* n = block->get_node(j - 1);

      // Get value being defined
      uint r = _lrg_map.live_range_id(n);

      // Some special values do not allocate
      if (r) {

        // Remove from live-out set
        liveout->remove(r);

        // Copies do not define a new value and so do not interfere.
        // Remove the copies source from the liveout set before interfering.
        uint idx = n->is_Copy();
        if (idx) {
          liveout->remove(_lrg_map.live_range_id(n->in(idx)));
        }

        // Interfere with everything live
        interfere_with_live(r, liveout);
      }

      // Make all inputs live
      if (!n->is_Phi()) {      // Phi function uses come from prior block
        for(uint k = 1; k < n->req(); k++) {
          liveout->insert(_lrg_map.live_range_id(n->in(k)));
        }
      }

      // 2-address instructions always have the defined value live
      // on entry to the instruction, even though it is being defined
      // by the instruction.  We pretend a virtual copy sits just prior
      // to the instruction and kills the src-def'd register.
      // In other words, for 2-address instructions the defined value
      // interferes with all inputs.
      uint idx;
      if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
        const MachNode *mach = n->as_Mach();
        // Sometimes my 2-address ADDs are commuted in a bad way.
        // We generally want the USE-DEF register to refer to the
        // loop-varying quantity, to avoid a copy.
        uint op = mach->ideal_Opcode();
        // Check that mach->num_opnds() == 3 to ensure instruction is
        // not subsuming constants, effectively excludes addI_cin_imm
        // Can NOT swap for instructions like addI_cin_imm since it
        // is adding zero to yhi + carry and the second ideal-input
        // points to the result of adding low-halves.
        // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
        if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
            n->in(1)->bottom_type()->base() == Type::Int &&
            // See if the ADD is involved in a tight data loop the wrong way
            n->in(2)->is_Phi() &&
            n->in(2)->in(2) == n ) {
          Node *tmp = n->in(1);
          n->set_req( 1, n->in(2) );
          n->set_req( 2, tmp );
        }
        // Defined value interferes with all inputs
        uint lidx = _lrg_map.live_range_id(n->in(idx));
        for (uint k = 1; k < n->req(); k++) {
          uint kidx = _lrg_map.live_range_id(n->in(k));
          if (kidx != lidx) {
            _ifg->add_edge(r, kidx);
          }
        }
      }
    } // End of forall instructions in block
  } // End of forall blocks
}

uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
  IndexSetIterator elements(liveout);
  uint lidx;
  uint cnt = 0;
  while ((lidx = elements.next()) != 0) {
    if( lrgs(lidx).mask().is_UP() &&
        lrgs(lidx).mask_size() &&
        !lrgs(lidx)._is_float &&
        !lrgs(lidx)._is_vector &&
        lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) )
      cnt += lrgs(lidx).reg_pressure();
  }
  return cnt;
}

uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
  IndexSetIterator elements(liveout);
  uint lidx;
  uint cnt = 0;
  while ((lidx = elements.next()) != 0) {
    if( lrgs(lidx).mask().is_UP() &&
        lrgs(lidx).mask_size() &&
        (lrgs(lidx)._is_float || lrgs(lidx)._is_vector))
      cnt += lrgs(lidx).reg_pressure();
  }
  return cnt;
}

// Adjust register pressure down by 1.  Capture last hi-to-low transition,
static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
  if (lrg->mask().is_UP() && lrg->mask_size()) {
    if (lrg->_is_float || lrg->_is_vector) {
      pressure[1] -= lrg->reg_pressure();
      if( pressure[1] == (uint)FLOATPRESSURE ) {
        hrp_index[1] = where;
        if( pressure[1] > b->_freg_pressure )
          b->_freg_pressure = pressure[1]+1;
      }
    } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
      pressure[0] -= lrg->reg_pressure();
      if( pressure[0] == (uint)INTPRESSURE   ) {
        hrp_index[0] = where;
        if( pressure[0] > b->_reg_pressure )
          b->_reg_pressure = pressure[0]+1;
      }
    }
  }
}

// Build the interference graph using physical registers when available.
// That is, if 2 live ranges are simultaneously alive but in their acceptable
// register sets do not overlap, then they do not interfere.
uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
  NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )

  uint must_spill = 0;

  // For all blocks (in any order) do...
  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
    Block* block = _cfg.get_block(i);
    // Clone (rather than smash in place) the liveout info, so it is alive
    // for the "collect_gc_info" phase later.
    IndexSet liveout(_live->live(block));
    uint last_inst = block->end_idx();
    // Compute first nonphi node index
    uint first_inst;
    for (first_inst = 1; first_inst < last_inst; first_inst++) {
      if (!block->get_node(first_inst)->is_Phi()) {
        break;
      }
    }

    // Spills could be inserted before CreateEx node which should be
    // first instruction in block after Phis. Move CreateEx up.
    for (uint insidx = first_inst; insidx < last_inst; insidx++) {
      Node *ex = block->get_node(insidx);
      if (ex->is_SpillCopy()) {
        continue;
      }
      if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
        // If the CreateEx isn't above all the MachSpillCopies
        // then move it to the top.
        block->remove_node(insidx);
        block->insert_node(ex, first_inst);
      }
      // Stop once a CreateEx or any other node is found
      break;
    }

    // Reset block's register pressure values for each ifg construction
    uint pressure[2], hrp_index[2];
    pressure[0] = pressure[1] = 0;
    hrp_index[0] = hrp_index[1] = last_inst+1;
    block->_reg_pressure = block->_freg_pressure = 0;
    // Liveout things are presumed live for the whole block.  We accumulate
    // 'area' accordingly.  If they get killed in the block, we'll subtract
    // the unused part of the block from the area.
    int inst_count = last_inst - first_inst;
    double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
    assert(!(cost < 0.0), "negative spill cost" );
    IndexSetIterator elements(&liveout);
    uint lidx;
    while ((lidx = elements.next()) != 0) {
      LRG &lrg = lrgs(lidx);
      lrg._area += cost;
      // Compute initial register pressure
      if (lrg.mask().is_UP() && lrg.mask_size()) {
        if (lrg._is_float || lrg._is_vector) {   // Count float pressure
          pressure[1] += lrg.reg_pressure();
          if (pressure[1] > block->_freg_pressure) {
            block->_freg_pressure = pressure[1];
          }
          // Count int pressure, but do not count the SP, flags
        } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
          pressure[0] += lrg.reg_pressure();
          if (pressure[0] > block->_reg_pressure) {
            block->_reg_pressure = pressure[0];
          }
        }
      }
    }
    assert( pressure[0] == count_int_pressure  (&liveout), "" );
    assert( pressure[1] == count_float_pressure(&liveout), "" );

    // The IFG is built by a single reverse pass over each basic block.
    // Starting with the known live-out set, we remove things that get
    // defined and add things that become live (essentially executing one
    // pass of a standard LIVE analysis).  Just before a Node defines a value
    // (and removes it from the live-ness set) that value is certainly live.
    // The defined value interferes with everything currently live.  The
    // value is then removed from the live-ness set and it's inputs are added
    // to the live-ness set.
    uint j;
    for (j = last_inst + 1; j > 1; j--) {
      Node* n = block->get_node(j - 1);

      // Get value being defined
      uint r = _lrg_map.live_range_id(n);

      // Some special values do not allocate
      if(r) {
        // A DEF normally costs block frequency; rematerialized values are
        // removed from the DEF sight, so LOWER costs here.
        lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;

        // If it is not live, then this instruction is dead.  Probably caused
        // by spilling and rematerialization.  Who cares why, yank this baby.
        if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) {
          Node *def = n->in(0);
          if( !n->is_Proj() ||
              // Could also be a flags-projection of a dead ADD or such.
              (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
            bool remove = true;
            if (n->is_MachProj()) {
              // Don't remove KILL projections if their "defining" nodes have
              // memory effects (have SCMemProj projection node) -
              // they are not dead even when their result is not used.
              // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
              // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
              // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
              // in block in such order that KILL MachProj nodes are processed first.
              uint cnt = def->outcnt();
              for (uint i = 0; i < cnt; i++) {
                Node* proj = def->raw_out(i);
                if (proj->Opcode() == Op_SCMemProj) {
                  remove = false;
                  break;
                }
              }
            }
            if (remove) {
              block->remove_node(j - 1);
              if (lrgs(r)._def == n) {
                lrgs(r)._def = 0;
              }
              n->disconnect_inputs(NULL, C);
              _cfg.unmap_node_from_block(n);
              n->replace_by(C->top());
              // Since yanking a Node from block, high pressure moves up one
              hrp_index[0]--;
              hrp_index[1]--;
              continue;
            }
          }

          // Fat-projections kill many registers which cannot be used to
          // hold live ranges.
          if (lrgs(r)._fat_proj) {
            // Count the int-only registers
            RegMask itmp = lrgs(r).mask();
            itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
            int iregs = itmp.Size();
            if (pressure[0]+iregs > block->_reg_pressure) {
              block->_reg_pressure = pressure[0] + iregs;
            }
            if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
              hrp_index[0] = j - 1;
            }
            // Count the float-only registers
            RegMask ftmp = lrgs(r).mask();
            ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
            int fregs = ftmp.Size();
            if (pressure[1] + fregs > block->_freg_pressure) {
              block->_freg_pressure = pressure[1] + fregs;
            }
            if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
              hrp_index[1] = j - 1;
            }
          }

        } else {                // Else it is live
          // A DEF also ends 'area' partway through the block.
          lrgs(r)._area -= cost;
          assert(!(lrgs(r)._area < 0.0), "negative spill area" );

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值