sssssssssss56


const MachOper*  MachNode::memory_inputs(Node* &base, Node* &index) const {
  const MachOper* oper = memory_operand();
  if (oper == (MachOper*)-1) {
    base = NodeSentinel;
    index = NodeSentinel;
  } else {
    base = NULL;
    index = NULL;
    if (oper != NULL) {
      int oper_idx = num_opnds();
      while (--oper_idx >= 0) {
        if (_opnds[oper_idx] == oper)  break;
      }
      int oper_pos = operand_index(oper_idx);
      int base_pos = oper->base_position();
      if (base_pos >= 0) {
        base = _in[oper_pos+base_pos];
      } 
      int index_pos = oper->index_position();
      if (index_pos >= 0) {
        index = _in[oper_pos+index_pos];
      }
    }
  }
  return oper;
}
const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_type) const {
  Node* base;
  Node* index;
  const MachOper* oper = memory_inputs(base, index);
  if (oper == NULL) {
    offset = 0;
  } else if (oper == (MachOper*)-1) {
    offset = Type::OffsetBot;
  } else {
    intptr_t disp = oper->constant_disp();
    int scale = oper->scale();
    if (index != NULL) {
      const Type* t_index = index->bottom_type();
      if (t_index->isa_narrowoop() || t_index->isa_narrowklass()) { // EncodeN, LoadN, LoadConN, LoadNKlass,
        assert(base == NULL, "Memory references through narrow oops have no base");
        offset = disp;
        adr_type = t_index->make_ptr()->add_offset(offset);
        return NULL;
      } else if (!index->is_Con()) {
        disp = Type::OffsetBot;
      } else if (disp != Type::OffsetBot) {
        const TypeX* ti = t_index->isa_intptr_t();
        if (ti == NULL) {
          disp = Type::OffsetBot;  // a random constant??
        } else {
          disp += ti->get_con() << scale;
        }
      }
    }
    offset = disp;
    if( adr_type == TYPE_PTR_SENTINAL ) {
      const TypePtr *t_disp = oper->disp_as_type();  // only !NULL for indOffset32X
      if (t_disp != NULL) {
        offset = Type::OffsetBot;
        const Type* t_base = base->bottom_type();
        if (t_base->isa_intptr_t()) {
          const TypeX *t_offset = t_base->is_intptr_t();
          if( t_offset->is_con() ) {
            offset = t_offset->get_con();
          }
        }
        adr_type = t_disp->add_offset(offset);
      } else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) {
        const TypePtr *tp = oper->type()->isa_ptr();
        if( tp != NULL) {
          adr_type = tp;
        }
      }
    }
  }
  return base;
}
const class TypePtr *MachNode::adr_type() const {
  intptr_t offset = 0;
  const TypePtr *adr_type = TYPE_PTR_SENTINAL;  // attempt computing adr_type
  const Node *base = get_base_and_disp(offset, adr_type);
  if( adr_type != TYPE_PTR_SENTINAL ) {
    return adr_type;      // get_base_and_disp has the answer
  }
  if (base == NULL) {
    if (offset == 0) {
      return NULL;
    }
    if (offset == Type::OffsetBot) {
      return TypePtr::BOTTOM;
    }
    assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr");
    return TypeRawPtr::BOTTOM;
  }
  if (base == NodeSentinel)  return TypePtr::BOTTOM;
  const Type* t = base->bottom_type();
  if (t->isa_narrowoop() && Universe::narrow_oop_shift() == 0) {
    t = t->make_ptr();
  }
  if (t->isa_narrowklass() && Universe::narrow_klass_shift() == 0) {
    t = t->make_ptr();
  }
  if (t->isa_intptr_t() && offset != 0 && offset != Type::OffsetBot) {
    return TypeRawPtr::BOTTOM;
  }
  const TypePtr *tp = t->isa_ptr();
  if (tp == NULL) {
    assert(false, "this path may produce not optimal code");
    return TypePtr::BOTTOM;
  }
  assert(tp->base() != Type::AnyPtr, "not a bare pointer");
  return tp->add_offset(offset);
}
int MachNode::operand_index( uint operand ) const {
  if( operand < 1 )  return -1;
  assert(operand < num_opnds(), "oob");
  if( _opnds[operand]->num_edges() == 0 )  return -1;
  uint skipped   = oper_input_base(); // Sum of leaves skipped so far
  for (uint opcnt = 1; opcnt < operand; opcnt++) {
    uint num_edges = _opnds[opcnt]->num_edges(); // leaves for operand
    skipped += num_edges;
  }
  return skipped;
}
int MachNode::operand_index(const MachOper *oper) const {
  uint skipped = oper_input_base(); // Sum of leaves skipped so far
  uint opcnt;
  for (opcnt = 1; opcnt < num_opnds(); opcnt++) {
    if (_opnds[opcnt] == oper) break;
    uint num_edges = _opnds[opcnt]->num_edges(); // leaves for operand
    skipped += num_edges;
  }
  if (_opnds[opcnt] != oper) return -1;
  return skipped;
}
MachNode *MachNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {
  return NULL;
}
void MachNode::add_case_label( int index_num, Label* blockLabel) {
  ShouldNotCallThis();
}
void MachNode::method_set( intptr_t addr ) {
  ShouldNotCallThis();
}
bool MachNode::rematerialize() const {
  if (is_MachTemp()) return true;
  uint r = rule();              // Match rule
  if( r <  Matcher::_begin_rematerialize ||
      r >= Matcher::_end_rematerialize )
    return false;
  if( two_adr() )  return false;
  if( !Matcher::rematerialize_float_constants ) {
    int op = ideal_Opcode();
    if( op == Op_ConF || op == Op_ConD )
      return false;
  }
  if( ideal_reg() == Op_RegFlags )
    return true;
  if( req() > 2 )
    return false;
  uint idx = oper_input_base();
  if (req() > idx) {
    const RegMask &rm = in_RegMask(idx);
    if (rm.is_bound(ideal_reg()))
      return false;
  }
  return true;
}
#ifndef PRODUCT
void MachNode::dump_spec(outputStream *st) const {
  uint cnt = num_opnds();
  for( uint i=0; i<cnt; i++ )
    _opnds[i]->dump_spec(st);
  const TypePtr *t = adr_type();
  if( t ) {
    Compile* C = Compile::current();
    if( C->alias_type(t)->is_volatile() )
      st->print(" Volatile!");
  }
}
void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const {
  format(ra, st); // access to virtual
}
#endif
#ifndef PRODUCT
void MachTypeNode::dump_spec(outputStream *st) const {
  _bottom_type->dump_on(st);
}
#endif
int MachConstantNode::constant_offset() {
  if (_constant.offset() == -1) {
    Compile::ConstantTable& constant_table = Compile::current()->constant_table();
    int offset = constant_table.find_offset(_constant);
    if (Compile::current()->in_scratch_emit_size()) {
      return constant_table.calculate_table_base_offset() + offset;
    }
    _constant.set_offset(constant_table.table_base_offset() + offset);
  }
  return _constant.offset();
}
int MachConstantNode::constant_offset_unchecked() const {
  return _constant.offset();
}
#ifndef PRODUCT
void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
  int reg = ra_->get_reg_first(in(1)->in(_vidx));
  st->print("%s %s", Name(), Matcher::regName[reg]);
}
#endif
void MachNullCheckNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
}
void MachNullCheckNode::label_set(Label* label, uint block_num) {
}
void MachNullCheckNode::save_label( Label** label, uint* block_num ) {
}
const RegMask &MachNullCheckNode::in_RegMask( uint idx ) const {
  if( idx == 0 ) return RegMask::Empty;
  else return in(1)->as_Mach()->out_RegMask();
}
const Type *MachProjNode::bottom_type() const {
  if( _ideal_reg == fat_proj ) return Type::BOTTOM;
  const Type *t = in(0)->bottom_type();
  if( t->base() == Type::Tuple ) {
    const TypeTuple *tt = t->is_tuple();
    if (_con < tt->cnt())
      return tt->field_at(_con);
  }
  assert((uint)_ideal_reg < (uint)_last_machine_leaf && Type::mreg2type[_ideal_reg], "in bounds");
  return Type::mreg2type[_ideal_reg];
}
const TypePtr *MachProjNode::adr_type() const {
  if (bottom_type() == Type::MEMORY) {
    const TypePtr* adr_type = in(0)->adr_type();
    #ifdef ASSERT
    if (!is_error_reported() && !Node::in_dump())
      assert(adr_type != NULL, "source must have adr_type");
    #endif
    return adr_type;
  }
  assert(bottom_type()->base() != Type::Memory, "no other memories?");
  return NULL;
}
#ifndef PRODUCT
void MachProjNode::dump_spec(outputStream *st) const {
  ProjNode::dump_spec(st);
  switch (_ideal_reg) {
  case unmatched_proj:  st->print("/unmatched");                         break;
  case fat_proj:        st->print("/fat"); if (WizardMode) _rout.dump(); break;
  }
}
#endif
#ifndef PRODUCT
void MachIfNode::dump_spec(outputStream *st) const {
  st->print("P=%f, C=%f",_prob, _fcnt);
}
#endif
uint MachReturnNode::size_of() const { return sizeof(*this); }
const RegMask &MachReturnNode::in_RegMask( uint idx ) const {
  return _in_rms[idx];
}
const TypePtr *MachReturnNode::adr_type() const {
  return _adr_type;
}
const Type *MachSafePointNode::bottom_type() const {  return TypeTuple::MEMBAR; }
const RegMask &MachSafePointNode::in_RegMask( uint idx ) const {
  if( idx < TypeFunc::Parms ) return _in_rms[idx];
  if (SafePointNode::needs_polling_address_input() &&
      idx == TypeFunc::Parms &&
      ideal_Opcode() == Op_SafePoint) {
    return MachNode::in_RegMask(idx);
  }
  assert(in(idx)->ideal_reg() != Op_RegFlags, "flags register is not spillable");
  return *Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()];
}
uint MachCallNode::cmp( const Node &n ) const
{ return _tf == ((MachCallNode&)n)._tf; }
const Type *MachCallNode::bottom_type() const { return tf()->range(); }
const Type *MachCallNode::Value(PhaseTransform *phase) const { return tf()->range(); }
#ifndef PRODUCT
void MachCallNode::dump_spec(outputStream *st) const {
  st->print("# ");
  tf()->dump_on(st);
  if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
  if (jvms() != NULL)  jvms()->dump_spec(st);
}
#endif
bool MachCallNode::return_value_is_used() const {
  if (tf()->range()->cnt() == TypeFunc::Parms) {
    return false;
  }
  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
    Node *use = fast_out(i);
    if (!use->is_Proj()) continue;
    if (use->as_Proj()->_con == TypeFunc::Parms) {
      return true;
    }
  }
  return false;
}
bool MachCallNode::returns_pointer() const {
  const TypeTuple *r = tf()->range();
  return (r->cnt() > TypeFunc::Parms &&
          r->field_at(TypeFunc::Parms)->isa_ptr());
}
const RegMask &MachCallNode::in_RegMask(uint idx) const {
  if (idx < tf()->domain()->cnt()) {
    return _in_rms[idx];
  }
  if (idx == mach_constant_base_node_input()) {
    return MachConstantBaseNode::static_out_RegMask();
  }
  return *Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()];
}
uint MachCallJavaNode::size_of() const { return sizeof(*this); }
uint MachCallJavaNode::cmp( const Node &n ) const {
  MachCallJavaNode &call = (MachCallJavaNode&)n;
  return MachCallNode::cmp(call) && _method->equals(call._method);
}
#ifndef PRODUCT
void MachCallJavaNode::dump_spec(outputStream *st) const {
  if (_method_handle_invoke)
    st->print("MethodHandle ");
  if (_method) {
    _method->print_short_name(st);
    st->print(" ");
  }
  MachCallNode::dump_spec(st);
}
#endif
const RegMask &MachCallJavaNode::in_RegMask(uint idx) const {
  if (idx < tf()->domain()->cnt()) {
    return _in_rms[idx];
  }
  if (idx == mach_constant_base_node_input()) {
    return MachConstantBaseNode::static_out_RegMask();
  }
  Matcher* m = Compile::current()->matcher();
  RegMask** debugmask = _method_handle_invoke ? m->idealreg2mhdebugmask : m->idealreg2debugmask;
  return *debugmask[in(idx)->ideal_reg()];
}
uint MachCallStaticJavaNode::size_of() const { return sizeof(*this); }
uint MachCallStaticJavaNode::cmp( const Node &n ) const {
  MachCallStaticJavaNode &call = (MachCallStaticJavaNode&)n;
  return MachCallJavaNode::cmp(call) && _name == call._name;
}
int MachCallStaticJavaNode::uncommon_trap_request() const {
  if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
    return CallStaticJavaNode::extract_uncommon_trap_request(this);
  }
  return 0;
}
#ifndef PRODUCT
void MachCallStaticJavaNode::dump_trap_args(outputStream *st) const {
  int trap_req = uncommon_trap_request();
  if (trap_req != 0) {
    char buf[100];
    st->print("(%s)",
               Deoptimization::format_trap_request(buf, sizeof(buf),
                                                   trap_req));
  }
}
void MachCallStaticJavaNode::dump_spec(outputStream *st) const {
  st->print("Static ");
  if (_name != NULL) {
    st->print("wrapper for: %s", _name );
    dump_trap_args(st);
    st->print(" ");
  }
  MachCallJavaNode::dump_spec(st);
}
#endif
#ifndef PRODUCT
void MachCallDynamicJavaNode::dump_spec(outputStream *st) const {
  st->print("Dynamic ");
  MachCallJavaNode::dump_spec(st);
}
#endif
uint MachCallRuntimeNode::size_of() const { return sizeof(*this); }
uint MachCallRuntimeNode::cmp( const Node &n ) const {
  MachCallRuntimeNode &call = (MachCallRuntimeNode&)n;
  return MachCallNode::cmp(call) && !strcmp(_name,call._name);
}
#ifndef PRODUCT
void MachCallRuntimeNode::dump_spec(outputStream *st) const {
  st->print("%s ",_name);
  MachCallNode::dump_spec(st);
}
#endif
JVMState jvms_for_throw(0);
JVMState *MachHaltNode::jvms() const {
  return &jvms_for_throw;
}
#ifndef PRODUCT
void labelOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {
  st->print("B%d", _block_num);
}
#endif // PRODUCT
#ifndef PRODUCT
void methodOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const {
  st->print(INTPTR_FORMAT, _method);
}
#endif // PRODUCT
C:\hotspot-69087d08d473\src\share\vm/opto/machnode.hpp
#ifndef SHARE_VM_OPTO_MACHNODE_HPP
#define SHARE_VM_OPTO_MACHNODE_HPP
#include "opto/callnode.hpp"
#include "opto/matcher.hpp"
#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/regmask.hpp"
class BiasedLockingCounters;
class BufferBlob;
class CodeBuffer;
class JVMState;
class MachCallDynamicJavaNode;
class MachCallJavaNode;
class MachCallLeafNode;
class MachCallNode;
class MachCallRuntimeNode;
class MachCallStaticJavaNode;
class MachEpilogNode;
class MachIfNode;
class MachNullCheckNode;
class MachOper;
class MachProjNode;
class MachPrologNode;
class MachReturnNode;
class MachSafePointNode;
class MachSpillCopyNode;
class Matcher;
class PhaseRegAlloc;
class RegMask;
class RTMLockingCounters;
class State;
class MachOper : public ResourceObj {
public:
  void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
  virtual uint opcode() const = 0;
  virtual uint num_edges() const { return 1; }
  virtual const RegMask *in_RegMask(int index) const;
  virtual void negate();
  virtual int  reg(PhaseRegAlloc *ra_, const Node *node)   const;
  virtual int  reg(PhaseRegAlloc *ra_, const Node *node, int idx)   const;
  Register  as_Register(PhaseRegAlloc *ra_, const Node *node)   const {
    return ::as_Register(reg(ra_, node));
  }
  Register  as_Register(PhaseRegAlloc *ra_, const Node *node, int idx)   const {
    return ::as_Register(reg(ra_, node, idx));
  }
  FloatRegister  as_FloatRegister(PhaseRegAlloc *ra_, const Node *node)   const {
    return ::as_FloatRegister(reg(ra_, node));
  }
  FloatRegister  as_FloatRegister(PhaseRegAlloc *ra_, const Node *node, int idx)   const {
    return ::as_FloatRegister(reg(ra_, node, idx));
  }
#if defined(IA32) || defined(AMD64)
  XMMRegister  as_XMMRegister(PhaseRegAlloc *ra_, const Node *node)   const {
    return ::as_XMMRegister(reg(ra_, node));
  }
  XMMRegister  as_XMMRegister(PhaseRegAlloc *ra_, const Node *node, int idx)   const {
    return ::as_XMMRegister(reg(ra_, node, idx));
  }
#endif
#if defined(PPC64)
  ConditionRegister as_ConditionRegister(PhaseRegAlloc *ra_, const Node *node) const {
    return ::as_ConditionRegister(reg(ra_, node));
  }
  ConditionRegister as_ConditionRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
    return ::as_ConditionRegister(reg(ra_, node, idx));
  }
#endif
  virtual intptr_t  constant() const;
  virtual relocInfo::relocType constant_reloc() const;
  virtual jdouble constantD() const;
  virtual jfloat  constantF() const;
  virtual jlong   constantL() const;
  virtual TypeOopPtr *oop() const;
  virtual int  ccode() const;
  virtual int  base (PhaseRegAlloc *ra_, const Node *node, int idx) const;
  virtual int  index(PhaseRegAlloc *ra_, const Node *node, int idx) const;
  virtual int  scale() const;
  virtual int  disp (PhaseRegAlloc *ra_, const Node *node, int idx) const;
  virtual relocInfo::relocType disp_reloc() const;
  virtual int  constant_disp() const;   // usu. 0, may return Type::OffsetBot
  virtual int  base_position()  const;  // base edge position, or -1
  virtual int  index_position() const;  // index edge position, or -1
  virtual const TypePtr *disp_as_type() const { return NULL; }
  virtual Label *label() const;
  virtual intptr_t  method() const;
  virtual uint  hash() const;
  virtual uint  cmp( const MachOper &oper ) const;
  virtual MachOper *clone(Compile* C) const = 0;
  virtual const Type *type() const;
  virtual void set_con( jint c0 ) { ShouldNotReachHere();  }
#ifndef PRODUCT
  virtual const char    *Name() const { return "???";}
  virtual void int_format(PhaseRegAlloc *,const MachNode *node, outputStream *st) const = 0;
  virtual void ext_format(PhaseRegAlloc *,const MachNode *node,int idx, outputStream *st) const=0;
  virtual void dump_spec(outputStream *st) const; // Print per-operand info
  static bool notAnOper(const MachOper *o) {
    if (o == NULL)                   return true;
    if (((intptr_t)o & 1) != 0)      return true;
    if (*(address*)o == badAddress)  return true;  // kill by Node::destruct
    return false;
  }
#endif // !PRODUCT
};
class MachNode : public Node {
public:
  MachNode() : Node((uint)0), _num_opnds(0), _opnds(NULL) {
    init_class_id(Class_Mach);
  }
  virtual uint size_of() const { return sizeof(MachNode); }
  virtual int  Opcode() const;          // Always equal to MachNode
  virtual uint rule() const = 0;        // Machine-specific opcode
  virtual uint oper_input_base() const { return 1; }
  virtual uint mach_constant_base_node_input() const { return (uint)-1; }
  void fill_new_machnode(MachNode *n, Compile* C) const;
  virtual MachNode *cisc_version(int offset, Compile* C);
  virtual void use_cisc_RegMask();
  bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
  enum AvoidBackToBackFlag { AVOID_NONE = 0,
                             AVOID_BEFORE = Flag_avoid_back_to_back_before,
                             AVOID_AFTER = Flag_avoid_back_to_back_after,
                             AVOID_BEFORE_AND_AFTER = AVOID_BEFORE | AVOID_AFTER };
  bool avoid_back_to_back(AvoidBackToBackFlag flag_value) const {
    return (flags() & flag_value) == flag_value;
  }
  bool has_call() const { return (flags() & Flag_has_call) != 0; }
  int  operand_index(uint operand) const;
  int  operand_index(const MachOper *oper) const;
  virtual const RegMask &in_RegMask(uint) const;
  virtual const RegMask *cisc_RegMask() const { return NULL; }
  virtual uint two_adr( ) const { return 0; }
  uint _num_opnds;
  MachOper **_opnds;
  uint  num_opnds() const { return _num_opnds; }
  virtual void  emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual bool requires_postalloc_expand() const { return false; }
  virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);
  virtual uint  size(PhaseRegAlloc *ra_) const;
  virtual uint  emit_size(PhaseRegAlloc *ra_) const;
  virtual int   alignment_required() const { return 1; }
  virtual int   compute_padding(int current_offset) const { return 0; }
  virtual int   reloc() const { return 0; }
  virtual int   ins_num_consts() const { return 0; }
  virtual uint  hash() const;
  virtual uint  cmp( const Node &n ) const;
  virtual MachNode *Expand( State *, Node_List &proj_list, Node* mem ) { return this; }
  virtual const class Type *bottom_type() const { return _opnds[0]->type(); }
  virtual uint ideal_reg() const { const Type *t = _opnds[0]->type(); return t == TypeInt::CC ? Op_RegFlags : t->ideal_reg(); }
  #define TYPE_PTR_SENTINAL  ((const TypePtr*)-1)
  const Node* get_base_and_disp(intptr_t &offset, const TypePtr* &adr_type) const;
  const MachOper* memory_inputs(Node* &base, Node* &index) const;
  virtual const MachOper* memory_operand() const { return NULL; }
  virtual const class TypePtr *adr_type() const;
  virtual MachNode *peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C );
  virtual int ideal_Opcode()     const { return Op_Node; }
  virtual void add_case_label( int switch_val, Label* blockLabel);
  virtual void method_set( intptr_t addr );
  bool rematerialize() const;
  static const Pipeline *pipeline_class();
  virtual const Pipeline *pipeline() const;
  virtual bool is_TrapBasedCheckNode() const { return false; }
#ifndef PRODUCT
  virtual const char *Name() const = 0; // Machine-specific name
  virtual void dump_spec(outputStream *st) const; // Print per-node info
  void         dump_format(PhaseRegAlloc *ra, outputStream *st) const; // access to virtual
#endif
};
class MachIdealNode : public MachNode {
public:
  MachIdealNode( ) {}
  virtual uint oper_input_base() const { return 0; }
  virtual uint rule()            const { return 9999999; }
  virtual const class Type *bottom_type() const { return _opnds == NULL ? Type::CONTROL : MachNode::bottom_type(); }
};
class MachTypeNode : public MachNode {
  virtual uint size_of() const { return sizeof(*this); } // Size is bigger
public:
  MachTypeNode( ) {}
  const Type *_bottom_type;
  virtual const class Type *bottom_type() const { return _bottom_type; }
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachBreakpointNode : public MachIdealNode {
public:
  MachBreakpointNode( ) {}
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
#ifndef PRODUCT
  virtual const char *Name() const { return "Breakpoint"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachConstantBaseNode : public MachIdealNode {
public:
  static const RegMask& _out_RegMask;  // We need the out_RegMask statically in MachConstantNode::in_RegMask().
public:
  MachConstantBaseNode() : MachIdealNode() {
    init_class_id(Class_MachConstantBase);
  }
  virtual const class Type* bottom_type() const { return TypeRawPtr::NOTNULL; }
  virtual uint ideal_reg() const { return Op_RegP; }
  virtual uint oper_input_base() const { return 1; }
  virtual bool requires_postalloc_expand() const;
  virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);
  virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const;
  virtual uint size(PhaseRegAlloc* ra_) const;
  virtual bool pinned() const { return UseRDPCForConstantTableBase; }
  static const RegMask& static_out_RegMask() { return _out_RegMask; }
  virtual const RegMask& out_RegMask() const { return static_out_RegMask(); }
#ifndef PRODUCT
  virtual const char* Name() const { return "MachConstantBaseNode"; }
  virtual void format(PhaseRegAlloc*, outputStream* st) const;
#endif
};
class MachConstantNode : public MachTypeNode {
protected:
  Compile::Constant _constant;  // This node's constant.
public:
  MachConstantNode() : MachTypeNode() {
    init_class_id(Class_MachConstant);
  }
  virtual void eval_constant(Compile* C) {
#ifdef ASSERT
    tty->print("missing MachConstantNode eval_constant function: ");
    dump();
#endif
    ShouldNotCallThis();
  }
  virtual const RegMask &in_RegMask(uint idx) const {
    if (idx == mach_constant_base_node_input())
      return MachConstantBaseNode::static_out_RegMask();
    return MachNode::in_RegMask(idx);
  }
  virtual uint mach_constant_base_node_input() const { return req() - 1; }
  int  constant_offset();
  int  constant_offset() const { return ((MachConstantNode*) this)->constant_offset(); }
  int  constant_offset_unchecked() const;
};
class MachUEPNode : public MachIdealNode {
public:
  MachUEPNode( ) {}
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
#ifndef PRODUCT
  virtual const char *Name() const { return "Unvalidated-Entry-Point"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachPrologNode : public MachIdealNode {
public:
  MachPrologNode( ) {}
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
  virtual int reloc() const;
#ifndef PRODUCT
  virtual const char *Name() const { return "Prolog"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachEpilogNode : public MachIdealNode {
public:
  MachEpilogNode(bool do_poll = false) : _do_polling(do_poll) {}
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
  virtual int reloc() const;
  virtual const Pipeline *pipeline() const;
private:
  bool _do_polling;
public:
  bool do_polling() const { return _do_polling; }
  int safepoint_offset() const;
#ifndef PRODUCT
  virtual const char *Name() const { return "Epilog"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachNopNode : public MachIdealNode {
private:
  int _count;
public:
  MachNopNode( ) : _count(1) {}
  MachNopNode( int count ) : _count(count) {}
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
  virtual const class Type *bottom_type() const { return Type::CONTROL; }
  virtual int ideal_Opcode() const { return Op_Con; } // bogus; see output.cpp
  virtual const Pipeline *pipeline() const;
#ifndef PRODUCT
  virtual const char *Name() const { return "Nop"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
  virtual void dump_spec(outputStream *st) const { } // No per-operand info
#endif
};
class MachSpillCopyNode : public MachIdealNode {
  const RegMask *_in;           // RegMask for input
  const RegMask *_out;          // RegMask for output
  const Type *_type;
public:
  MachSpillCopyNode( Node *n, const RegMask &in, const RegMask &out ) :
    MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()) {
    init_class_id(Class_MachSpillCopy);
    init_flags(Flag_is_Copy);
    add_req(NULL);
    add_req(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  void set_out_RegMask(const RegMask &out) { _out = &out; }
  void set_in_RegMask(const RegMask &in) { _in = &in; }
  virtual const RegMask &out_RegMask() const { return *_out; }
  virtual const RegMask &in_RegMask(uint) const { return *_in; }
  virtual const class Type *bottom_type() const { return _type; }
  virtual uint ideal_reg() const { return _type->ideal_reg(); }
  virtual uint oper_input_base() const { return 1; }
  uint implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const;
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
#ifndef PRODUCT
  virtual const char *Name() const { return "MachSpillCopy"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachMergeNode : public MachIdealNode {
public:
  MachMergeNode(Node *n1) {
    init_class_id(Class_MachMerge);
    add_req(NULL);
    add_req(n1);
  }
  virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
  virtual const RegMask &in_RegMask(uint idx) const { return in(1)->in_RegMask(idx); }
  virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
  virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
  virtual uint oper_input_base() const { return 1; }
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
  virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
#ifndef PRODUCT
  virtual const char *Name() const { return "MachMerge"; }
#endif
};
class MachBranchNode : public MachIdealNode {
public:
  MachBranchNode() : MachIdealNode() {
    init_class_id(Class_MachBranch);
  }
  virtual void label_set(Label* label, uint block_num) = 0;
  virtual void save_label(Label** label, uint* block_num) = 0;
  virtual MachNode *short_branch_version(Compile* C) { return NULL; }
  virtual bool pinned() const { return true; };
};
class MachNullCheckNode : public MachBranchNode {
public:
  const uint _vidx;             // Index of memop being tested
  MachNullCheckNode( Node *ctrl, Node *memop, uint vidx ) : MachBranchNode(), _vidx(vidx) {
    init_class_id(Class_MachNullCheck);
    add_req(ctrl);
    add_req(memop);
  }
  virtual uint size_of() const { return sizeof(*this); }
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual void label_set(Label* label, uint block_num);
  virtual void save_label(Label** label, uint* block_num);
  virtual void negate() { }
  virtual const class Type *bottom_type() const { return TypeTuple::IFBOTH; }
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual const RegMask &in_RegMask(uint) const;
  virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
#ifndef PRODUCT
  virtual const char *Name() const { return "NullCheck"; }
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
#endif
};
class MachProjNode : public ProjNode {
public:
  MachProjNode( Node *multi, uint con, const RegMask &out, uint ideal_reg ) : ProjNode(multi,con), _rout(out), _ideal_reg(ideal_reg) {
    init_class_id(Class_MachProj);
  }
  RegMask _rout;
  const uint  _ideal_reg;
  enum projType {
    unmatched_proj = 0,         // Projs for Control, I/O, memory not matched
    fat_proj       = 999        // Projs killing many regs, defined by _rout
  };
  virtual int   Opcode() const;
  virtual const Type *bottom_type() const;
  virtual const TypePtr *adr_type() const;
  virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
  virtual const RegMask &out_RegMask() const { return _rout; }
  virtual uint  ideal_reg() const { return _ideal_reg; }
  virtual uint  size_of() const { return sizeof(MachProjNode); }
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachIfNode : public MachBranchNode {
  virtual uint size_of() const { return sizeof(*this); } // Size is bigger
public:
  float _prob;                  // Probability branch goes either way
  float _fcnt;                  // Frequency counter
  MachIfNode() : MachBranchNode() {
    init_class_id(Class_MachIf);
  }
  virtual void negate() = 0;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachGotoNode : public MachBranchNode {
public:
  MachGotoNode() : MachBranchNode() {
    init_class_id(Class_MachGoto);
  }
};
class MachFastLockNode : public MachNode {
  virtual uint size_of() const { return sizeof(*this); } // Size is bigger
public:
  BiasedLockingCounters*        _counters;
  RTMLockingCounters*       _rtm_counters; // RTM lock counters for inflated locks
  RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks
  MachFastLockNode() : MachNode() {}
};
class MachReturnNode : public MachNode {
  virtual uint size_of() const; // Size is bigger
public:
  RegMask *_in_rms;             // Input register masks, set during allocation
  ReallocMark _nesting;         // assertion check for reallocations
  const TypePtr* _adr_type;     // memory effects of call or return
  MachReturnNode() : MachNode() {
    init_class_id(Class_MachReturn);
    _adr_type = TypePtr::BOTTOM; // the default: all of memory
  }
  void set_adr_type(const TypePtr* atp) { _adr_type = atp; }
  virtual const RegMask &in_RegMask(uint) const;
  virtual bool pinned() const { return true; };
  virtual const TypePtr *adr_type() const;
};
class MachSafePointNode : public MachReturnNode {
public:
  OopMap*         _oop_map;     // Array of OopMap info (8-bit char) for GC
  JVMState*       _jvms;        // Pointer to list of JVM State Objects
  uint            _jvmadj;      // Extra delta to jvms indexes (mach. args)
  OopMap*         oop_map() const { return _oop_map; }
  void            set_oop_map(OopMap* om) { _oop_map = om; }
  MachSafePointNode() : MachReturnNode(), _oop_map(NULL), _jvms(NULL), _jvmadj(0) {
    init_class_id(Class_MachSafePoint);
  }
  virtual JVMState* jvms() const { return _jvms; }
  void set_jvms(JVMState* s) {
    _jvms = s;
  }
  virtual const Type    *bottom_type() const;
  virtual const RegMask &in_RegMask(uint) const;
  Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
  Node *frameptr () const { return in(TypeFunc::FramePtr); }
  Node *local(const JVMState* jvms, uint idx) const {
    assert(verify_jvms(jvms), "jvms must match");
    return in(_jvmadj + jvms->locoff() + idx);
  }
  Node *stack(const JVMState* jvms, uint idx) const {
    assert(verify_jvms(jvms), "jvms must match");
    return in(_jvmadj + jvms->stkoff() + idx);
 }
  Node *monitor_obj(const JVMState* jvms, uint idx) const {
    assert(verify_jvms(jvms), "jvms must match");
    return in(_jvmadj + jvms->monitor_obj_offset(idx));
  }
  Node *monitor_box(const JVMState* jvms, uint idx) const {
    assert(verify_jvms(jvms), "jvms must match");
    return in(_jvmadj + jvms->monitor_box_offset(idx));
  }
  void  set_local(const JVMState* jvms, uint idx, Node *c) {
    assert(verify_jvms(jvms), "jvms must match");
    set_req(_jvmadj + jvms->locoff() + idx, c);
  }
  void  set_stack(const JVMState* jvms, uint idx, Node *c) {
    assert(verify_jvms(jvms), "jvms must match");
    set_req(_jvmadj + jvms->stkoff() + idx, c);
  }
  void  set_monitor(const JVMState* jvms, uint idx, Node *c) {
    assert(verify_jvms(jvms), "jvms must match");
    set_req(_jvmadj + jvms->monoff() + idx, c);
  }
};
class MachCallNode : public MachSafePointNode {
protected:
  virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
  virtual uint cmp( const Node &n ) const;
  virtual uint size_of() const = 0; // Size is bigger
public:
  const TypeFunc *_tf;        // Function type
  address      _entry_point;  // Address of the method being called
  float        _cnt;          // Estimate of number of times called
  uint         _argsize;      // Size of argument block on stack
  const TypeFunc* tf()        const { return _tf; }
  const address entry_point() const { return _entry_point; }
  const float   cnt()         const { return _cnt; }
  uint argsize()              const { return _argsize; }
  void set_tf(const TypeFunc* tf) { _tf = tf; }
  void set_entry_point(address p) { _entry_point = p; }
  void set_cnt(float c)           { _cnt = c; }
  void set_argsize(int s)         { _argsize = s; }
  MachCallNode() : MachSafePointNode() {
    init_class_id(Class_MachCall);
  }
  virtual const Type *bottom_type() const;
  virtual bool  pinned() const { return false; }
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual const RegMask &in_RegMask(uint) const;
  virtual int ret_addr_offset() { return 0; }
  bool returns_long() const { return tf()->return_type() == T_LONG; }
  bool return_value_is_used() const;
  bool returns_pointer() const;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachCallJavaNode : public MachCallNode {
protected:
  virtual uint cmp( const Node &n ) const;
  virtual uint size_of() const; // Size is bigger
public:
  ciMethod* _method;             // Method being direct called
  int        _bci;               // Byte Code index of call byte code
  bool       _optimized_virtual; // Tells if node is a static call or an optimized virtual
  bool       _method_handle_invoke;   // Tells if the call has to preserve SP
  MachCallJavaNode() : MachCallNode() {
    init_class_id(Class_MachCallJava);
  }
  virtual const RegMask &in_RegMask(uint) const;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachCallStaticJavaNode : public MachCallJavaNode {
  virtual uint cmp( const Node &n ) const;
  virtual uint size_of() const; // Size is bigger
public:
  const char *_name;            // Runtime wrapper name
  MachCallStaticJavaNode() : MachCallJavaNode() {
    init_class_id(Class_MachCallStaticJava);
  }
  int uncommon_trap_request() const;
  virtual int ret_addr_offset();
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
  void dump_trap_args(outputStream *st) const;
#endif
};
class MachCallDynamicJavaNode : public MachCallJavaNode {
public:
  int _vtable_index;
  MachCallDynamicJavaNode() : MachCallJavaNode() {
    init_class_id(Class_MachCallDynamicJava);
    DEBUG_ONLY(_vtable_index = -99);  // throw an assert if uninitialized
  }
  virtual int ret_addr_offset();
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachCallRuntimeNode : public MachCallNode {
  virtual uint cmp( const Node &n ) const;
  virtual uint size_of() const; // Size is bigger
public:
  const char *_name;            // Printable name, if _method is NULL
  MachCallRuntimeNode() : MachCallNode() {
    init_class_id(Class_MachCallRuntime);
  }
  virtual int ret_addr_offset();
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};
class MachCallLeafNode: public MachCallRuntimeNode {
public:
  MachCallLeafNode() : MachCallRuntimeNode() {
    init_class_id(Class_MachCallLeaf);
  }
};
class MachHaltNode : public MachReturnNode {
public:
  virtual JVMState* jvms() const;
};
class MachTempNode : public MachNode {
private:
  MachOper *_opnd_array[1];
public:
  virtual const RegMask &out_RegMask() const { return *_opnds[0]->in_RegMask(0); }
  virtual uint rule() const { return 9999999; }
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
  MachTempNode(MachOper* oper) {
    init_class_id(Class_MachTemp);
    _num_opnds = 1;
    _opnds = _opnd_array;
    add_req(NULL);
    _opnds[0] = oper;
  }
  virtual uint size_of() const { return sizeof(MachTempNode); }
#ifndef PRODUCT
  virtual void format(PhaseRegAlloc *, outputStream *st ) const {}
  virtual const char *Name() const { return "MachTemp";}
#endif
};
class labelOper : public MachOper {
private:
  virtual uint           num_edges() const { return 0; }
public:
  Label* _label;                // Label for branch(es)
  uint _block_num;
  labelOper() : _block_num(0), _label(0) {}
  labelOper(Label* label, uint block_num) : _label(label), _block_num(block_num) {}
  labelOper(labelOper* l) : _label(l->_label) , _block_num(l->_block_num) {}
  virtual MachOper *clone(Compile* C) const;
  virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; }
  virtual uint           opcode() const;
  virtual uint           hash()   const;
  virtual uint           cmp( const MachOper &oper ) const;
#ifndef PRODUCT
  virtual const char    *Name()   const { return "Label";}
  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;
  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const { int_format( ra, node, st ); }
#endif
};
class methodOper : public MachOper {
private:
  virtual uint           num_edges() const { return 0; }
public:
  intptr_t _method;             // Address of method
  methodOper() :   _method(0) {}
  methodOper(intptr_t method) : _method(method)  {}
  virtual MachOper *clone(Compile* C) const;
  virtual intptr_t method() const { return _method; }
  virtual uint           opcode() const;
  virtual uint           hash()   const;
  virtual uint           cmp( const MachOper &oper ) const;
#ifndef PRODUCT
  virtual const char    *Name()   const { return "Method";}
  virtual void int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const;
  virtual void ext_format(PhaseRegAlloc *ra, const MachNode *node, int idx, outputStream *st) const { int_format( ra, node, st ); }
#endif
};
#endif // SHARE_VM_OPTO_MACHNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/macro.cpp
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "libadt/vectset.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/macro.hpp"
#include "opto/memnode.hpp"
#include "opto/node.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
#include "runtime/sharedRuntime.hpp"
int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) {
  int nreplacements = 0;
  uint req = use->req();
  for (uint j = 0; j < use->len(); j++) {
    Node *uin = use->in(j);
    if (uin == oldref) {
      if (j < req)
        use->set_req(j, newref);
      else
        use->set_prec(j, newref);
      nreplacements++;
    } else if (j >= req && uin == NULL) {
      break;
    }
  }
  return nreplacements;
}
void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
  uint old_dbg_start = oldcall->tf()->domain()->cnt();
  uint new_dbg_start = newcall->tf()->domain()->cnt();
  int jvms_adj  = new_dbg_start - old_dbg_start;
  assert (new_dbg_start == newcall->req(), "argument count mismatch");
  Dict* sosn_map = new Dict(cmpkey,hashkey);
  for (uint i = old_dbg_start; i < oldcall->req(); i++) {
    Node* old_in = oldcall->in(i);
    if (old_in != NULL && old_in->is_SafePointScalarObject()) {
      SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
      uint old_unique = C->unique();
      Node* new_in = old_sosn->clone(sosn_map);
      if (old_unique != C->unique()) { // New node?
        new_in->set_req(0, C->root()); // reset control edge
        new_in = transform_later(new_in); // Register new node.
      }
      old_in = new_in;
    }
    newcall->add_req(old_in);
  }
  newcall->set_jvms(oldcall->jvms());
  for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) {
    jvms->set_map(newcall);
    jvms->set_locoff(jvms->locoff()+jvms_adj);
    jvms->set_stkoff(jvms->stkoff()+jvms_adj);
    jvms->set_monoff(jvms->monoff()+jvms_adj);
    jvms->set_scloff(jvms->scloff()+jvms_adj);
    jvms->set_endoff(jvms->endoff()+jvms_adj);
  }
}
Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
  Node* cmp;
  if (mask != 0) {
    Node* and_node = transform_later(new (C) AndXNode(word, MakeConX(mask)));
    cmp = transform_later(new (C) CmpXNode(and_node, MakeConX(bits)));
  } else {
    cmp = word;
  }
  Node* bol = transform_later(new (C) BoolNode(cmp, BoolTest::ne));
  IfNode* iff = new (C) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN );
  transform_later(iff);
  Node *fast_taken = transform_later( new (C) IfFalseNode(iff) );
  Node *slow_taken = transform_later( new (C) IfTrueNode(iff) );
  if (return_fast_path) {
    region->init_req(edge, slow_taken); // Capture slow-control
    return fast_taken;
  } else {
    region->init_req(edge, fast_taken); // Capture fast-control
    return slow_taken;
  }
}
void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
  call->init_req( TypeFunc::Control, ctrl );
  call->init_req( TypeFunc::I_O    , oldcall->in( TypeFunc::I_O) );
  call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ?????
  call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) );
  call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) );
}
CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) {
 CallNode *call = leaf_name
   ? (CallNode*)new (C) CallLeafNode      ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
   : (CallNode*)new (C) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
  copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
  if (parm0 != NULL)  call->init_req(TypeFunc::Parms+0, parm0);
  if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
  copy_call_debug_info(oldcall, call);
  call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
  _igvn.replace_node(oldcall, call);
  transform_later(call);
  return call;
}
void PhaseMacroExpand::extract_call_projections(CallNode *call) {
  _fallthroughproj = NULL;
  _fallthroughcatchproj = NULL;
  _ioproj_fallthrough = NULL;
  _ioproj_catchall = NULL;
  _catchallcatchproj = NULL;
  _memproj_fallthrough = NULL;
  _memproj_catchall = NULL;
  _resproj = NULL;
  for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
    ProjNode *pn = call->fast_out(i)->as_Proj();
    switch (pn->_con) {
      case TypeFunc::Control:
      {
        _fallthroughproj = pn;
        DUIterator_Fast jmax, j = pn->fast_outs(jmax);
        const Node *cn = pn->fast_out(j);
        if (cn->is_Catch()) {
          ProjNode *cpn = NULL;
          for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
            cpn = cn->fast_out(k)->as_Proj();
            assert(cpn->is_CatchProj(), "must be a CatchProjNode");
            if (cpn->_con == CatchProjNode::fall_through_index)
              _fallthroughcatchproj = cpn;
            else {
              assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
              _catchallcatchproj = cpn;
            }
          }
        }
        break;
      }
      case TypeFunc::I_O:
        if (pn->_is_io_use)
          _ioproj_catchall = pn;
        else
          _ioproj_fallthrough = pn;
        break;
      case TypeFunc::Memory:
        if (pn->_is_io_use)
          _memproj_catchall = pn;
        else
          _memproj_fallthrough = pn;
        break;
      case TypeFunc::Parms:
        _resproj = pn;
        break;
      default:
        assert(false, "unexpected projection from allocation node.");
    }
  }
}
void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
  assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
  if (!UseG1GC) {
    Node *shift = p2x->unique_out();
    Node *addp = shift->unique_out();
    for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
      Node *mem = addp->last_out(j);
      if (UseCondCardMark && mem->is_Load()) {
        assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
        _igvn.replace_node(mem, intcon(0));
        continue;
      }
      assert(mem->is_Store(), "store required");
      _igvn.replace_node(mem, mem->in(MemNode::Memory));
    }
  } else {
    assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
    Node* this_region = p2x->in(0);
    assert(this_region != NULL, "");
    Node* xorx = NULL;
    for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
      Node* u = p2x->fast_out(i);
      if (u->Opcode() == Op_XorX) {
        xorx = u;
        break;
      }
    }
    assert(xorx != NULL, "missing G1 post barrier");
    Node* shift = xorx->unique_out();
    Node* cmpx = shift->unique_out();
    assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
    cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
    "missing region check in G1 post barrier");
    _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
    if (this_region->is_Region() && this_region->req() == 3) {
      int ind = 1;
      if (!this_region->in(ind)->is_IfFalse()) {
        ind = 2;
      }
      if (this_region->in(ind)->is_IfFalse()) {
        Node* bol = this_region->in(ind)->in(0)->in(1);
        assert(bol->is_Bool(), "");
        cmpx = bol->in(1);
        if (bol->as_Bool()->_test._test == BoolTest::ne &&
            cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
            cmpx->in(1)->is_Load()) {
          Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
          const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
                                              PtrQueue::byte_offset_of_active());
          if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
              adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
              adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
            _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
          }
        }
      }
    }
    assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, "");
    _igvn.replace_node(p2x, top());
  }
}
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
  Node *orig_mem = mem;
  Node *alloc_mem = alloc->in(TypeFunc::Memory);
  const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr();
  while (true) {
    if (mem == alloc_mem || mem == start_mem ) {
      return mem;  // hit one of our sentinels
    } else if (mem->is_MergeMem()) {
      mem = mem->as_MergeMem()->memory_at(alias_idx);
    } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) {
      Node *in = mem->in(0);
      if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) {
        return in;
      } else if (in->is_Call()) {
        CallNode *call = in->as_Call();
        if (!call->may_modify(tinst, phase)) {
          mem = call->in(TypeFunc::Memory);
        }
        mem = in->in(TypeFunc::Memory);
      } else if (in->is_MemBar()) {
        mem = in->in(TypeFunc::Memory);
      } else {
        assert(false, "unexpected projection");
      }
    } else if (mem->is_Store()) {
      const TypePtr* atype = mem->as_Store()->adr_type();
      int adr_idx = Compile::current()->get_alias_index(atype);
      if (adr_idx == alias_idx) {
        assert(atype->isa_oopptr(), "address type must be oopptr");
        int adr_offset = atype->offset();
        uint adr_iid = atype->is_oopptr()->instance_id();
        if (adr_offset == offset && adr_iid == alloc->_idx)
          return mem;
      } else {
        assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
      }
      mem = mem->in(MemNode::Memory);
    } else if (mem->is_ClearArray()) {
      if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
        debug_only(intptr_t offset;)
        assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
        InitializeNode* init = alloc->as_Allocate()->initialization();
        if (init != NULL)
          return init;
        else
          return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
      }
    } else if (mem->Opcode() == Op_SCMemProj) {
      mem = mem->in(0);
      Node* adr = NULL;
      if (mem->is_LoadStore()) {
        adr = mem->in(MemNode::Address);
      } else {
        assert(mem->Opcode() == Op_EncodeISOArray, "sanity");
        adr = mem->in(3); // Destination array
      }
      const TypePtr* atype = adr->bottom_type()->is_ptr();
      int adr_idx = Compile::current()->get_alias_index(atype);
      if (adr_idx == alias_idx) {
        assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
        return NULL;
      }
      mem = mem->in(MemNode::Memory);
    } else {
      return mem;
    }
    assert(mem != orig_mem, "dead memory loop");
  }
}
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) {
  assert(mem->is_Phi(), "sanity");
  int alias_idx = C->get_alias_index(adr_t);
  int offset = adr_t->offset();
  int instance_id = adr_t->instance_id();
  Node* region = mem->in(0);
  for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
    Node* phi = region->fast_out(k);
    if (phi->is_Phi() && phi != mem &&
        phi->as_Phi()->is_same_inst_field(phi_type, (int)mem->_idx, instance_id, alias_idx, offset)) {
      return phi;
    }
  }
  Node* new_phi = value_phis->find(mem->_idx);
  if (new_phi != NULL)
    return new_phi;
  if (level <= 0) {
    return NULL; // Give up: phi tree too deep
  }
  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
  Node *alloc_mem = alloc->in(TypeFunc::Memory);
  uint length = mem->req();
  GrowableArray <Node *> values(length, length, NULL, false);
  PhiNode *phi = new (C) PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
  transform_later(phi);
  value_phis->push(phi, mem->_idx);
  for (uint j = 1; j < length; j++) {
    Node *in = mem->in(j);
    if (in == NULL || in->is_top()) {
      values.at_put(j, in);
    } else  {
      Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
      if (val == start_mem || val == alloc_mem) {
        values.at_put(j, _igvn.zerocon(ft));
        continue;
      }
      if (val->is_Initialize()) {
        val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
      }
      if (val == NULL) {
        return NULL;  // can't find a value on this path
      }
      if (val == mem) {
        values.at_put(j, mem);
      } else if (val->is_Store()) {
        values.at_put(j, val->in(MemNode::ValueIn));
      } else if(val->is_Proj() && val->in(0) == alloc) {
        values.at_put(j, _igvn.zerocon(ft));
      } else if (val->is_Phi()) {
        val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
        if (val == NULL) {
          return NULL;
        }
        values.at_put(j, val);
      } else if (val->Opcode() == Op_SCMemProj) {
        assert(val->in(0)->is_LoadStore() || val->in(0)->Opcode() == Op_EncodeISOArray, "sanity");
        assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
        return NULL;
      } else {
#ifdef ASSERT
        val->dump();
        assert(false, "unknown node on this path");
#endif
        return NULL;  // unknown node on this path
      }
    }
  }
  for (uint j = 1; j < length; j++) {
    if (values.at(j) == mem) {
      phi->init_req(j, phi);
    } else {
      phi->init_req(j, values.at(j));
    }
  }
  return phi;
}
Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) {
  assert(adr_t->is_known_instance_field(), "instance required");
  int instance_id = adr_t->instance_id();
  assert((uint)instance_id == alloc->_idx, "wrong allocation");
  int alias_idx = C->get_alias_index(adr_t);
  int offset = adr_t->offset();
  Node *start_mem = C->start()->proj_out(TypeFunc::Memory);
  Node *alloc_ctrl = alloc->in(TypeFunc::Control);
  Node *alloc_mem = alloc->in(TypeFunc::Memory);
  Arena *a = Thread::current()->resource_area();
  VectorSet visited(a);
  bool done = sfpt_mem == alloc_mem;
  Node *mem = sfpt_mem;
  while (!done) {
    if (visited.test_set(mem->_idx)) {
      return NULL;  // found a loop, give up
    }
    mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn);
    if (mem == start_mem || mem == alloc_mem) {
      done = true;  // hit a sentinel, return appropriate 0 value
    } else if (mem->is_Initialize()) {
      mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
      if (mem == NULL) {
        done = true; // Something go wrong.
      } else if (mem->is_Store()) {
        const TypePtr* atype = mem->as_Store()->adr_type();
        assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
        done = true;
      }
    } else if (mem->is_Store()) {
      const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
      assert(atype != NULL, "address type must be oopptr");
      assert(C->get_alias_index(atype) == alias_idx &&
             atype->is_known_instance_field() && atype->offset() == offset &&
             atype->instance_id() == instance_id, "store is correct memory slice");
      done = true;
    } else if (mem->is_Phi()) {
      Node *unique_input = NULL;
      Node *top = C->top();
      for (uint i = 1; i < mem->req(); i++) {
        Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn);
        if (n == NULL || n == top || n == mem) {
          continue;
        } else if (unique_input == NULL) {
          unique_input = n;
        } else if (unique_input != n) {
          unique_input = top;
          break;
        }
      }
      if (unique_input != NULL && unique_input != top) {
        mem = unique_input;
      } else {
        done = true;
      }
    } else {
      assert(false, "unexpected node");
    }
  }
  if (mem != NULL) {
    if (mem == start_mem || mem == alloc_mem) {
      return _igvn.zerocon(ft);
    } else if (mem->is_Store()) {
      return mem->in(MemNode::ValueIn);
    } else if (mem->is_Phi()) {
      Node_Stack value_phis(a, 8);
      Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit);
      if (phi != NULL) {
        return phi;
      } else {
        while(value_phis.is_nonempty()) {
          Node* n = value_phis.node();
          _igvn.replace_node(n, C->top());
          value_phis.pop();
        }
      }
    }
  }
  return NULL;
}
bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
  NOT_PRODUCT( const char* fail_eliminate = NULL; )
  DEBUG_ONLY( Node* disq_node = NULL; )
  bool  can_eliminate = true;
  Node* res = alloc->result_cast();
  const TypeOopPtr* res_type = NULL;
  if (res == NULL) {
  } else if (!res->is_CheckCastPP()) {
    NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
    can_eliminate = false;
  } else {
    res_type = _igvn.type(res)->isa_oopptr();
    if (res_type == NULL) {
      NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";)
      can_eliminate = false;
    } else if (res_type->isa_aryptr()) {
      int length = alloc->in(AllocateNode::ALength)->find_int_con(-1);
      if (length < 0) {
        NOT_PRODUCT(fail_eliminate = "Array's size is not constant";)
        can_eliminate = false;
      }
    }
  }
  if (can_eliminate && res != NULL) {
    for (DUIterator_Fast jmax, j = res->fast_outs(jmax);
                               j < jmax && can_eliminate; j++) {
      Node* use = res->fast_out(j);
      if (use->is_AddP()) {
        const TypePtr* addp_type = _igvn.type(use)->is_ptr();
        int offset = addp_type->offset();
        if (offset == Type::OffsetTop || offset == Type::OffsetBot) {
          NOT_PRODUCT(fail_eliminate = "Undefined field referrence";)
          can_eliminate = false;
          break;
        }
        for (DUIterator_Fast kmax, k = use->fast_outs(kmax);
                                   k < kmax && can_eliminate; k++) {
          Node* n = use->fast_out(k);
          if (!n->is_Store() && n->Opcode() != Op_CastP2X) {
            DEBUG_ONLY(disq_node = n;)
            if (n->is_Load() || n->is_LoadStore()) {
              NOT_PRODUCT(fail_eliminate = "Field load";)
            } else {
              NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
            }
            can_eliminate = false;
          }
        }
      } else if (use->is_SafePoint()) {
        SafePointNode* sfpt = use->as_SafePoint();
        if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) {
          DEBUG_ONLY(disq_node = use;)
          NOT_PRODUCT(fail_eliminate = "Object is passed as argument";)
          can_eliminate = false;
        }
        Node* sfptMem = sfpt->memory();
        if (sfptMem == NULL || sfptMem->is_top()) {
          DEBUG_ONLY(disq_node = use;)
          NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
          can_eliminate = false;
        } else {
          safepoints.append_if_missing(sfpt);
        }
      } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
        if (use->is_Phi()) {
          if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
            NOT_PRODUCT(fail_eliminate = "Object is return value";)
          } else {
            NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";)
          }
          DEBUG_ONLY(disq_node = use;)
        } else {
          if (use->Opcode() == Op_Return) {
            NOT_PRODUCT(fail_eliminate = "Object is return value";)
          }else {
            NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
          }
          DEBUG_ONLY(disq_node = use;)
        }
        can_eliminate = false;
      }
    }
  }
#ifndef PRODUCT
  if (PrintEliminateAllocations) {
    if (can_eliminate) {
      tty->print("Scalar ");
      if (res == NULL)
        alloc->dump();
      else
        res->dump();
    } else if (alloc->_is_scalar_replaceable) {
      tty->print("NotScalar (%s)", fail_eliminate);
      if (res == NULL)
        alloc->dump();
      else
        res->dump();
#ifdef ASSERT
      if (disq_node != NULL) {
          tty->print("  >>>> ");
          disq_node->dump();
      }
#endif /*ASSERT*/
    }
  }
#endif
  return can_eliminate;
}
bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
  GrowableArray <SafePointNode *> safepoints_done;
  ciKlass* klass = NULL;
  ciInstanceKlass* iklass = NULL;
  int nfields = 0;
  int array_base = 0;
  int element_size = 0;
  BasicType basic_elem_type = T_ILLEGAL;
  ciType* elem_type = NULL;
  Node* res = alloc->result_cast();
  assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
  const TypeOopPtr* res_type = NULL;
  if (res != NULL) { // Could be NULL when there are no users
    res_type = _igvn.type(res)->isa_oopptr();
  }
  if (res != NULL) {
    klass = res_type->klass();
    if (res_type->isa_instptr()) {
      assert(klass->is_instance_klass(), "must be an instance klass.");
      iklass = klass->as_instance_klass();
      nfields = iklass->nof_nonstatic_fields();
    } else {
      nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
      assert(klass->is_array_klass() && nfields >= 0, "must be an array klass.");
      elem_type = klass->as_array_klass()->element_type();
      basic_elem_type = elem_type->basic_type();
      array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
      element_size = type2aelembytes(basic_elem_type);
    }
  }
  while (safepoints.length() > 0) {
    SafePointNode* sfpt = safepoints.pop();
    Node* mem = sfpt->memory();
    assert(sfpt->jvms() != NULL, "missed JVMS");
    uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
    SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
#ifdef ASSERT
                                                 alloc,
#endif
                                                 first_ind, nfields);
    sobj->init_req(0, C->root());
    transform_later(sobj);
    for (int j = 0; j < nfields; j++) {
      intptr_t offset;
      ciField* field = NULL;
      if (iklass != NULL) {
        field = iklass->nonstatic_field_at(j);
        offset = field->offset();
        elem_type = field->type();
        basic_elem_type = field->layout_type();
      } else {
        offset = array_base + j * (intptr_t)element_size;
      }
      const Type *field_type;
      if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
        if (!elem_type->is_loaded()) {
          field_type = TypeInstPtr::BOTTOM;
        } else if (field != NULL && field->is_constant() && field->is_static()) {
          ciObject* con = field->constant_value().as_object();
          field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
          assert(field_type != NULL, "field singleton type must be consistent");
        } else {
          field_type = TypeOopPtr::make_from_klass(elem_type->as_klass());
        }
        if (UseCompressedOops) {
          field_type = field_type->make_narrowoop();
          basic_elem_type = T_NARROWOOP;
        }
      } else {
        field_type = Type::get_const_basic_type(basic_elem_type);
      }
      const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
      Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc);
      if (field_val == NULL) {
        uint last = sfpt->req() - 1;
        for (int k = 0;  k < j; k++) {
          sfpt->del_req(last--);
        }
        while (safepoints_done.length() > 0) {
          SafePointNode* sfpt_done = safepoints_done.pop();
          last = sfpt_done->req() - 1;
          for (int k = 0;  k < nfields; k++) {
            sfpt_done->del_req(last--);
          }
          JVMState *jvms = sfpt_done->jvms();
          jvms->set_endoff(sfpt_done->req());
          int start = jvms->debug_start();
          int end   = jvms->debug_end();
          for (int i = start; i < end; i++) {
            if (sfpt_done->in(i)->is_SafePointScalarObject()) {
              SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
              if (scobj->first_index(jvms) == sfpt_done->req() &&
                  scobj->n_fields() == (uint)nfields) {
                assert(scobj->alloc() == alloc, "sanity");
                sfpt_done->set_req(i, res);
              }
            }
          }
        }
#ifndef PRODUCT
        if (PrintEliminateAllocations) {
          if (field != NULL) {
            tty->print("=== At SafePoint node %d can't find value of Field: ",
                       sfpt->_idx);
            field->print();
            int field_idx = C->get_alias_index(field_addr_type);
            tty->print(" (alias_idx=%d)", field_idx);
          } else { // Array's element
            tty->print("=== At SafePoint node %d can't find value of array element [%d]",
                       sfpt->_idx, j);
          }
          tty->print(", which prevents elimination of: ");
          if (res == NULL)
            alloc->dump();
          else
            res->dump();
        }
#endif
        return false;
      }
      if (UseCompressedOops && field_type->isa_narrowoop()) {
        if (field_val->is_EncodeP()) {
          field_val = field_val->in(1);
        } else {
          field_val = transform_later(new (C) DecodeNNode(field_val, field_val->get_ptr_type()));
        }
      }
      sfpt->add_req(field_val);
    }
    JVMState *jvms = sfpt->jvms();
    jvms->set_endoff(sfpt->req());
    int start = jvms->debug_start();
    int end   = jvms->debug_end();
    sfpt->replace_edges_in_range(res, sobj, start, end);
    safepoints_done.append_if_missing(sfpt); // keep it for rollback
  }
  return true;
}
void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
  Node* res = alloc->result_cast();
  if (res != NULL) {
    for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
      Node *use = res->last_out(j);
      uint oc1 = res->outcnt();
      if (use->is_AddP()) {
        for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
          Node *n = use->last_out(k);
          uint oc2 = use->outcnt();
          if (n->is_Store()) {
#ifdef ASSERT
            for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
                                       p < pmax; p++) {
              Node* mb = n->fast_out(p);
              assert(mb->is_Initialize() || !mb->is_MemBar() ||
                     mb->req() <= MemBarNode::Precedent ||
                     mb->in(MemBarNode::Precedent) != n,
                     "MemBarVolatile should be eliminated for non-escaping object");
            }
#endif
            _igvn.replace_node(n, n->in(MemNode::Memory));
          } else {
            eliminate_card_mark(n);
          }
          k -= (oc2 - use->outcnt());
        }
      } else {
        eliminate_card_mark(use);
      }
      j -= (oc1 - res->outcnt());
    }
    assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
    _igvn.remove_dead_node(res);
  }
  if (_resproj != NULL && _resproj->outcnt() != 0) {
    for (DUIterator_Fast jmax, j = _resproj->fast_outs(jmax);  j < jmax; j++) {
      Node *use = _resproj->fast_out(j);
      if (use->is_AddP()) {
        _igvn.replace_node(use, C->top());
        --j; --jmax;
      }
    }
    for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) {
      Node *use = _resproj->last_out(j);
      uint oc1 = _resproj->outcnt();
      if (use->is_Initialize()) {
        InitializeNode *init = use->as_Initialize();
        assert(init->outcnt() <= 2, "only a control and memory projection expected");
        Node *ctrl_proj = init->proj_out(TypeFunc::Control);
        if (ctrl_proj != NULL) {
           assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection");
          _igvn.replace_node(ctrl_proj, _fallthroughcatchproj);
        }
        Node *mem_proj = init->proj_out(TypeFunc::Memory);
        if (mem_proj != NULL) {
          Node *mem = init->in(TypeFunc::Memory);
#ifdef ASSERT
          if (mem->is_MergeMem()) {
            assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection");
          } else {
            assert(mem == _memproj_fallthrough, "allocation memory projection");
          }
#endif
          _igvn.replace_node(mem_proj, mem);
        }
      } else  {
        assert(false, "only Initialize or AddP expected");
      }
      j -= (oc1 - _resproj->outcnt());
    }
  }
  if (_fallthroughcatchproj != NULL) {
    _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control));
  }
  if (_memproj_fallthrough != NULL) {
    _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory));
  }
  if (_memproj_catchall != NULL) {
    _igvn.replace_node(_memproj_catchall, C->top());
  }
  if (_ioproj_fallthrough != NULL) {
    _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O));
  }
  if (_ioproj_catchall != NULL) {
    _igvn.replace_node(_ioproj_catchall, C->top());
  }
  if (_catchallcatchproj != NULL) {
    _igvn.replace_node(_catchallcatchproj, C->top());
  }
}
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
  if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
    return false;
  }
  Node* klass = alloc->in(AllocateNode::KlassNode);
  const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
  Node* res = alloc->result_cast();
  bool boxing_alloc = C->eliminate_boxing() &&
                      tklass->klass()->is_instance_klass()  &&
                      tklass->klass()->as_instance_klass()->is_box_klass();
  if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
    return false;
  }
  extract_call_projections(alloc);
  GrowableArray <SafePointNode *> safepoints;
  if (!can_eliminate_allocation(alloc, safepoints)) {
    return false;
  }
  if (!alloc->_is_scalar_replaceable) {
    assert(res == NULL, "sanity");
    if (safepoints.length() > 0) {
      return false;
    }
  }
  if (!scalar_replacement(alloc, safepoints)) {
    return false;
  }
  CompileLog* log = C->log();
  if (log != NULL) {
    log->head("eliminate_allocation type='%d'",
              log->identify(tklass->klass()));
    JVMState* p = alloc->jvms();
    while (p != NULL) {
      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
      p = p->caller();
    }
    log->tail("eliminate_allocation");
  }
  process_users_of_allocation(alloc);
#ifndef PRODUCT
  if (PrintEliminateAllocations) {
    if (alloc->is_AllocateArray())
      tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
    else
      tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
  }
#endif
  return true;
}
bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
  if (!C->eliminate_boxing() || boxing->proj_out(TypeFunc::Parms) != NULL) {
    return false;
  }
  assert(boxing->result_cast() == NULL, "unexpected boxing node result");
  extract_call_projections(boxing);
  const TypeTuple* r = boxing->tf()->range();
  assert(r->cnt() > TypeFunc::Parms, "sanity");
  const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
  assert(t != NULL, "sanity");
  CompileLog* log = C->log();
  if (log != NULL) {
    log->head("eliminate_boxing type='%d'",
              log->identify(t->klass()));
    JVMState* p = boxing->jvms();
    while (p != NULL) {
      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
      p = p->caller();
    }
    log->tail("eliminate_boxing");
  }
  process_users_of_allocation(boxing);
#ifndef PRODUCT
  if (PrintEliminateAllocations) {
    tty->print("++++ Eliminated: %d ", boxing->_idx);
    boxing->method()->print_short_name(tty);
    tty->cr();
  }
#endif
  return true;
}
void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) {
  if (UseTLAB) {                // Private allocation: load from TLS
    Node* thread = transform_later(new (C) ThreadLocalNode());
    int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset());
    int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset());
    eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset);
    eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset);
  } else {                      // Shared allocation: load from globals
    CollectedHeap* ch = Universe::heap();
    address top_adr = (address)ch->top_addr();
    address end_adr = (address)ch->end_addr();
    eden_top_adr = makecon(TypeRawPtr::make(top_adr));
    eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr);
  }
}
Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
  Node* adr = basic_plus_adr(base, offset);
  const TypePtr* adr_type = adr->bottom_type()->is_ptr();
  Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered);
  transform_later(value);
  return value;
}
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
  Node* adr = basic_plus_adr(base, offset);
  mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt, MemNode::unordered);
  transform_later(mem);
  return mem;
}
void PhaseMacroExpand::expand_allocate_common(
            AllocateNode* alloc, // allocation node to be expanded
            Node* length,  // array length for an array allocation
            const TypeFunc* slow_call_type, // Type of slow call
            address slow_call_address  // Address of slow call
    )
{
  Node* ctrl = alloc->in(TypeFunc::Control);
  Node* mem  = alloc->in(TypeFunc::Memory);
  Node* i_o  = alloc->in(TypeFunc::I_O);
  Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
  Node* klass_node        = alloc->in(AllocateNode::KlassNode);
  Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
  assert(ctrl != NULL, "must have control");
  enum { slow_result_path = 1, fast_result_path = 2 };
  Node *result_region = NULL;
  Node *result_phi_rawmem = NULL;
  Node *result_phi_rawoop = NULL;
  Node *result_phi_i_o = NULL;
  bool always_slow = false;
  int tv = _igvn.find_int_con(initial_slow_test, -1);
  if (tv >= 0) {
    always_slow = (tv == 1);
    initial_slow_test = NULL;
  } else {
    initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
  }
  if (C->env()->dtrace_alloc_probes() ||
      !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() ||
                   (UseConcMarkSweepGC && CMSIncrementalMode))) {
    always_slow = true;
    initial_slow_test = NULL;
  }
  enum { too_big_or_final_path = 1, need_gc_path = 2 };
  Node *slow_region = NULL;
  Node *toobig_false = ctrl;
  assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
  if (initial_slow_test != NULL ) {
    slow_region = new (C) RegionNode(3);
    IfNode *toobig_iff = new (C) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
    transform_later(toobig_iff);
    Node *toobig_true = new (C) IfTrueNode( toobig_iff );
    transform_later(toobig_true);
    slow_region    ->init_req( too_big_or_final_path, toobig_true );
    toobig_false = new (C) IfFalseNode( toobig_iff );
    transform_later(toobig_false);
  } else {         // No initial test, just fall into next case
    toobig_false = ctrl;
    debug_only(slow_region = NodeSentinel);
  }
  Node *slow_mem = mem;  // save the current memory state for slow path
  if (!always_slow) {
    if (mem->is_MergeMem()) {
      mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
    }
    Node* eden_top_adr;
    Node* eden_end_adr;
    set_eden_pointers(eden_top_adr, eden_end_adr);
    Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
    result_region = new (C) RegionNode(3);
    result_phi_rawmem = new (C) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
    result_phi_rawoop = new (C) PhiNode(result_region, TypeRawPtr::BOTTOM);
    result_phi_i_o    = new (C) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
    enum { fall_in_path = 1, contended_loopback_path = 2 };
    Node *contended_region;
    Node *contended_phi_rawmem;
    if (UseTLAB) {
      contended_region = toobig_false;
      contended_phi_rawmem = mem;
    } else {
      contended_region = new (C) RegionNode(3);
      contended_phi_rawmem = new (C) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM);
      contended_region    ->init_req(fall_in_path, toobig_false);
      contended_phi_rawmem->init_req(fall_in_path, mem);
      transform_later(contended_region);
      transform_later(contended_phi_rawmem);
    }
    Node *old_eden_top = UseTLAB
      ? new (C) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
      : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
    transform_later(old_eden_top);
    Node *new_eden_top = new (C) AddPNode(top(), old_eden_top, size_in_bytes);
    transform_later(new_eden_top);
    Node *needgc_cmp = new (C) CmpPNode(new_eden_top, eden_end);
    transform_later(needgc_cmp);
    Node *needgc_bol = new (C) BoolNode(needgc_cmp, BoolTest::ge);
    transform_later(needgc_bol);
    IfNode *needgc_iff = new (C) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
    transform_later(needgc_iff);
    Node *needgc_true = new (C) IfTrueNode(needgc_iff);
    transform_later(needgc_true);
    if (initial_slow_test) {
      slow_region->init_req(need_gc_path, needgc_true);
      transform_later(slow_region);
    } else {                      // No initial slow path needed!
      slow_region = needgc_true;
    }
    Node *needgc_false = new (C) IfFalseNode(needgc_iff);
    transform_later(needgc_false);
    result_phi_i_o->init_req(slow_result_path, i_o);
    i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem,
                              old_eden_top, new_eden_top, length);
    Node* fast_oop = old_eden_top;
    Node* fast_oop_ctrl;
    Node* fast_oop_rawmem;
    if (UseTLAB) {
      Node* store_eden_top =
        new (C) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
                              TypeRawPtr::BOTTOM, new_eden_top, MemNode::unordered);
      transform_later(store_eden_top);
      fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
      fast_oop_rawmem = store_eden_top;
    } else {
      Node* store_eden_top =
        new (C) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr,
                                         new_eden_top, fast_oop/*old_eden_top*/);
      transform_later(store_eden_top);
      Node *contention_check = new (C) BoolNode(store_eden_top, BoolTest::ne);
      transform_later(contention_check);
      store_eden_top = new (C) SCMemProjNode(store_eden_top);
      transform_later(store_eden_top);
      IfNode *contention_iff = new (C) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN);
      transform_later(contention_iff);
      Node *contention_true = new (C) IfTrueNode(contention_iff);
      transform_later(contention_true);
      contended_region->init_req(contended_loopback_path, contention_true);
      contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top);
      Node *contention_false = new (C) IfFalseNode(contention_iff);
      transform_later(contention_false);
      fast_oop_ctrl = contention_false;
      Node* thread = new (C) ThreadLocalNode();
      transform_later(thread);
      Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread,
                                             in_bytes(JavaThread::allocated_bytes_offset()));
      Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
                                    0, TypeLong::LONG, T_LONG);
#ifdef _LP64
      Node* alloc_size = size_in_bytes;
#else
      Node* alloc_size = new (C) ConvI2LNode(size_in_bytes);
      transform_later(alloc_size);
#endif
      Node* new_alloc_bytes = new (C) AddLNode(alloc_bytes, alloc_size);
      transform_later(new_alloc_bytes);
      fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr,
                                   0, new_alloc_bytes, T_LONG);
    }
    InitializeNode* init = alloc->initialization();
    fast_oop_rawmem = initialize_object(alloc,
                                        fast_oop_ctrl, fast_oop_rawmem, fast_oop,
                                        klass_node, length, size_in_bytes);
#ifndef AARCH64
    if (init == NULL || (!init->is_complete_with_arraycopy() && !init->does_not_escape())) {
#else
    if (!alloc->does_not_escape_thread() &&
        (init == NULL || !init->is_complete_with_arraycopy())) {
#endif
      if (init == NULL || init->req() < InitializeNode::RawStores) {
        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
        transform_later(mb);
        mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
        mb->init_req(TypeFunc::Control, fast_oop_ctrl);
        fast_oop_ctrl = new (C) ProjNode(mb,TypeFunc::Control);
        transform_later(fast_oop_ctrl);
        fast_oop_rawmem = new (C) ProjNode(mb,TypeFunc::Memory);
        transform_later(fast_oop_rawmem);
      } else {
        Node* init_ctrl = init->proj_out(TypeFunc::Control);
        Node* init_mem = init->proj_out(TypeFunc::Memory);
        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
        transform_later(mb);
        Node* ctrl = new (C) ProjNode(init,TypeFunc::Control);
        transform_later(ctrl);
        Node* mem = new (C) ProjNode(init,TypeFunc::Memory);
        transform_later(mem);
        mb->init_req(TypeFunc::Memory, mem);
        mb->init_req(TypeFunc::Control, ctrl);
        ctrl = new (C) ProjNode(mb,TypeFunc::Control);
        transform_later(ctrl);
        mem = new (C) ProjNode(mb,TypeFunc::Memory);
        transform_later(mem);
        _igvn.replace_node(init_ctrl, ctrl);
        _igvn.replace_node(init_mem, mem);
      }
    }
    if (C->env()->dtrace_extended_probes()) {
      int size = TypeFunc::Parms + 2;
      CallLeafNode *call = new (C) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
                                                CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
                                                "dtrace_object_alloc",
                                                TypeRawPtr::BOTTOM);
      Node* thread = new (C) ThreadLocalNode();
      transform_later(thread);
      call->init_req(TypeFunc::Parms+0, thread);
      call->init_req(TypeFunc::Parms+1, fast_oop);
      call->init_req(TypeFunc::Control, fast_oop_ctrl);
      call->init_req(TypeFunc::I_O    , top()); // does no i/o
      call->init_req(TypeFunc::Memory , fast_oop_rawmem);
      call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
      call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
      transform_later(call);
      fast_oop_ctrl = new (C) ProjNode(call,TypeFunc::Control);
      transform_later(fast_oop_ctrl);
      fast_oop_rawmem = new (C) ProjNode(call,TypeFunc::Memory);
      transform_later(fast_oop_rawmem);
    }
    result_region    ->init_req(fast_result_path, fast_oop_ctrl);
    result_phi_rawoop->init_req(fast_result_path, fast_oop);
    result_phi_i_o   ->init_req(fast_result_path, i_o);
    result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
  } else {
    slow_region = ctrl;
    result_phi_i_o = i_o; // Rename it to use in the following code.
  }
  CallNode *call = new (C) CallStaticJavaNode(slow_call_type, slow_call_address,
                               OptoRuntime::stub_name(slow_call_address),
                               alloc->jvms()->bci(),
                               TypePtr::BOTTOM);
  call->init_req( TypeFunc::Control, slow_region );
  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
  call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs
  call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
  call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
  call->init_req(TypeFunc::Parms+0, klass_node);
  if (length != NULL) {
    call->init_req(TypeFunc::Parms+1, length);
  }
  copy_call_debug_info((CallNode *) alloc,  call);
  if (!always_slow) {
    call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
  } else {
    call->set_req(TypeFunc::I_O, result_phi_i_o);
  }
  _igvn.replace_node(alloc, call);
  transform_later(call);
  extract_call_projections(call);
  if (!always_slow && _memproj_fallthrough != NULL) {
    for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
      Node *use = _memproj_fallthrough->fast_out(i);
      _igvn.rehash_node_delayed(use);
      imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
      --i;
    }
  }
  if (_memproj_catchall != NULL ) {
    if (_memproj_fallthrough == NULL) {
      _memproj_fallthrough = new (C) ProjNode(call, TypeFunc::Memory);
      transform_later(_memproj_fallthrough);
    }
    for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
      Node *use = _memproj_catchall->fast_out(i);
      _igvn.rehash_node_delayed(use);
      imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
      --i;
    }
    assert(_memproj_catchall->outcnt() == 0, "all uses must be deleted");
    _igvn.remove_dead_node(_memproj_catchall);
  }
  if (_ioproj_fallthrough != NULL) {
    for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
      Node *use = _ioproj_fallthrough->fast_out(i);
      _igvn.rehash_node_delayed(use);
      imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
      --i;
    }
  }
  if (_ioproj_catchall != NULL ) {
    if (_ioproj_fallthrough == NULL) {
      _ioproj_fallthrough = new (C) ProjNode(call, TypeFunc::I_O);
      transform_later(_ioproj_fallthrough);
    }
    for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
      Node *use = _ioproj_catchall->fast_out(i);
      _igvn.rehash_node_delayed(use);
      imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
      --i;
    }
    assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted");
    _igvn.remove_dead_node(_ioproj_catchall);
  }
  if (always_slow) {
    if (result_phi_i_o->outcnt() > 1) {
      call->set_req(TypeFunc::I_O, top());
    } else {
      assert(result_phi_i_o->unique_ctrl_out() == call, "");
    }
    return;
  }
  if (_fallthroughcatchproj != NULL) {
    ctrl = _fallthroughcatchproj->clone();
    transform_later(ctrl);
    _igvn.replace_node(_fallthroughcatchproj, result_region);
  } else {
    ctrl = top();
  }
  Node *slow_result;
  if (_resproj == NULL) {
    slow_result = top();
  } else {
    slow_result = _resproj->clone();
    transform_later(slow_result);
    _igvn.replace_node(_resproj, result_phi_rawoop);
  }
  result_region    ->init_req( slow_result_path, ctrl );
  result_phi_rawoop->init_req( slow_result_path, slow_result);
  result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
  transform_later(result_region);
  transform_later(result_phi_rawoop);
  transform_later(result_phi_rawmem);
  transform_later(result_phi_i_o);
}
Node*
PhaseMacroExpand::initialize_object(AllocateNode* alloc,
                                    Node* control, Node* rawmem, Node* object,
                                    Node* klass_node, Node* length,
                                    Node* size_in_bytes) {
  InitializeNode* init = alloc->initialization();
  Node* mark_node = NULL;
  if (UseBiasedLocking && (length == NULL)) {
    mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
  } else {
    mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype()));
  }
  rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
  rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
  int header_size = alloc->minimum_header_size();  // conservatively small
  if (length != NULL) {         // Arrays need length field
    rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
    header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE);
    ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
    if (k->is_array_klass())    // we know the exact header size in most cases:
      header_size = Klass::layout_helper_header_size(k->layout_helper());
  }
  if (init == NULL) {
    if (!ZeroTLAB)
      rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
                                            header_size, size_in_bytes,
                                            &_igvn);
  } else {
    if (!init->is_complete()) {
      rawmem = init->complete_stores(control, rawmem, object,
                                     header_size, size_in_bytes, &_igvn);
    }
    init->set_req(InitializeNode::RawAddress, top());
  }
  return rawmem;
}
Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
                                        Node*& contended_phi_rawmem,
                                        Node* old_eden_top, Node* new_eden_top,
                                        Node* length) {
   enum { fall_in_path = 1, pf_path = 2 };
   if( UseTLAB && AllocatePrefetchStyle == 2 ) {
      Node *pf_region = new (C) RegionNode(3);
      Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY,
                                                TypeRawPtr::BOTTOM );
      Node *pf_phi_abio = new (C) PhiNode( pf_region, Type::ABIO );
      Node *thread = new (C) ThreadLocalNode();
      transform_later(thread);
      Node *eden_pf_adr = new (C) AddPNode( top()/*not oop*/, thread,
                   _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) );
      transform_later(eden_pf_adr);
      Node *old_pf_wm = new (C) LoadPNode(needgc_false,
                                   contended_phi_rawmem, eden_pf_adr,
                                   TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,
                                   MemNode::unordered);
      transform_later(old_pf_wm);
      Node *need_pf_cmp = new (C) CmpPNode( new_eden_top, old_pf_wm );
      transform_later(need_pf_cmp);
      Node *need_pf_bol = new (C) BoolNode( need_pf_cmp, BoolTest::ge );
      transform_later(need_pf_bol);
      IfNode *need_pf_iff = new (C) IfNode( needgc_false, need_pf_bol,
                                       PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
      transform_later(need_pf_iff);
      Node *need_pf_true = new (C) IfTrueNode( need_pf_iff );
      transform_later(need_pf_true);
      Node *need_pf_false = new (C) IfFalseNode( need_pf_iff );
      transform_later(need_pf_false);
      Node *new_pf_wmt = new (C) AddPNode( top(), old_pf_wm,
                                    _igvn.MakeConX(AllocatePrefetchDistance) );
      transform_later(new_pf_wmt );
      new_pf_wmt->set_req(0, need_pf_true);
      Node *store_new_wmt = new (C) StorePNode(need_pf_true,
                                       contended_phi_rawmem, eden_pf_adr,
                                       TypeRawPtr::BOTTOM, new_pf_wmt,
                                       MemNode::unordered);
      transform_later(store_new_wmt);
      pf_phi_abio->init_req( fall_in_path, i_o );
      Node *prefetch_adr;
      Node *prefetch;
      uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize;
      uint step_size = AllocatePrefetchStepSize;
      uint distance = 0;
      for ( uint i = 0; i < lines; i++ ) {
        prefetch_adr = new (C) AddPNode( old_pf_wm, new_pf_wmt,
                                            _igvn.MakeConX(distance) );
        transform_later(prefetch_adr);
        prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr );
        transform_later(prefetch);
        distance += step_size;
        i_o = prefetch;
      }
      pf_phi_abio->set_req( pf_path, i_o );
      pf_region->init_req( fall_in_path, need_pf_false );
      pf_region->init_req( pf_path, need_pf_true );
      pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem );
      pf_phi_rawmem->init_req( pf_path, store_new_wmt );
      transform_later(pf_region);
      transform_later(pf_phi_rawmem);
      transform_later(pf_phi_abio);
      needgc_false = pf_region;
      contended_phi_rawmem = pf_phi_rawmem;
      i_o = pf_phi_abio;
   } else if( UseTLAB && AllocatePrefetchStyle == 3 ) {
      Node *pf_region = new (C) RegionNode(3);
      Node *pf_phi_rawmem = new (C) PhiNode( pf_region, Type::MEMORY,
                                             TypeRawPtr::BOTTOM );
      uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
      uint step_size = AllocatePrefetchStepSize;
      uint distance = AllocatePrefetchDistance;
      Node *cache_adr = new (C) AddPNode(old_eden_top, old_eden_top,
                                            _igvn.MakeConX(distance));
      transform_later(cache_adr);
      cache_adr = new (C) CastP2XNode(needgc_false, cache_adr);
      transform_later(cache_adr);
      Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
      cache_adr = new (C) AndXNode(cache_adr, mask);
      transform_later(cache_adr);
      cache_adr = new (C) CastX2PNode(cache_adr);
      transform_later(cache_adr);
      Node *prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, cache_adr );
      prefetch->set_req(0, needgc_false);
      transform_later(prefetch);
      contended_phi_rawmem = prefetch;
      Node *prefetch_adr;
      distance = step_size;
      for ( uint i = 1; i < lines; i++ ) {
        prefetch_adr = new (C) AddPNode( cache_adr, cache_adr,
                                            _igvn.MakeConX(distance) );
        transform_later(prefetch_adr);
        prefetch = new (C) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr );
        transform_later(prefetch);
        distance += step_size;
        contended_phi_rawmem = prefetch;
      }
   } else if( AllocatePrefetchStyle > 0 ) {
      Node *prefetch_adr;
      Node *prefetch;
      uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
      uint step_size = AllocatePrefetchStepSize;
      uint distance = AllocatePrefetchDistance;
      for ( uint i = 0; i < lines; i++ ) {
        prefetch_adr = new (C) AddPNode( old_eden_top, new_eden_top,
                                            _igvn.MakeConX(distance) );
        transform_later(prefetch_adr);
        prefetch = new (C) PrefetchAllocationNode( i_o, prefetch_adr );
        if( i == 0 ) { // Set control for first prefetch, next follows it
          prefetch->init_req(0, needgc_false);
        }
        transform_later(prefetch);
        distance += step_size;
        i_o = prefetch;
      }
   }
   return i_o;
}
void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) {
  expand_allocate_common(alloc, NULL,
                         OptoRuntime::new_instance_Type(),
                         OptoRuntime::new_instance_Java());
}
void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) {
  Node* length = alloc->in(AllocateNode::ALength);
  InitializeNode* init = alloc->initialization();
  Node* klass_node = alloc->in(AllocateNode::KlassNode);
  ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass();
  address slow_call_address;  // Address of slow call
  if (init != NULL && init->is_complete_with_arraycopy() &&
      k->is_type_array_klass()) {
    slow_call_address = OptoRuntime::new_array_nozero_Java();
  } else {
    slow_call_address = OptoRuntime::new_array_Java();
  }
  expand_allocate_common(alloc, length,
                         OptoRuntime::new_array_Type(),
                         slow_call_address);
}
void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
  if (oldbox->as_BoxLock()->is_eliminated())
    return; // This BoxLock node was processed already.
  if (EliminateNestedLocks ||
      oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj)) {
    _igvn.hash_delete(oldbox);
    oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value
     _igvn.hash_insert(oldbox);
    for (uint i = 0; i < oldbox->outcnt(); i++) {
      Node* u = oldbox->raw_out(i);
      if (u->is_AbstractLock() && !u->as_AbstractLock()->is_non_esc_obj()) {
        AbstractLockNode* alock = u->as_AbstractLock();
        if (alock->box_node() == oldbox) {
#ifdef ASSERT
          alock->log_lock_optimization(C, "eliminate_lock_set_non_esc4");
#endif
          alock->set_non_esc_obj();
        }
      }
    }
    return;
  }
  BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
  newbox->set_eliminated();
  transform_later(newbox);
  for (uint i = 0; i < oldbox->outcnt();) {
    bool next_edge = true;
    Node* u = oldbox->raw_out(i);
    if (u->is_AbstractLock()) {
      AbstractLockNode* alock = u->as_AbstractLock();
      if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
#ifdef ASSERT
        alock->log_lock_optimization(C, "eliminate_lock_set_non_esc5");
#endif
        alock->set_non_esc_obj();
        _igvn.rehash_node_delayed(alock);
        alock->set_box_node(newbox);
        next_edge = false;
      }
    }
    if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
      FastLockNode* flock = u->as_FastLock();
      assert(flock->box_node() == oldbox, "sanity");
      _igvn.rehash_node_delayed(flock);
      flock->set_box_node(newbox);
      next_edge = false;
    }
    if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
      SafePointNode* sfn = u->as_SafePoint();
      JVMState* youngest_jvms = sfn->jvms();
      int max_depth = youngest_jvms->depth();
      for (int depth = 1; depth <= max_depth; depth++) {
        JVMState* jvms = youngest_jvms->of_depth(depth);
        int num_mon  = jvms->nof_monitors();
        for (int idx = 0; idx < num_mon; idx++) {
          Node* obj_node = sfn->monitor_obj(jvms, idx);
          Node* box_node = sfn->monitor_box(jvms, idx);
          if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
            int j = jvms->monitor_box_offset(idx);
            _igvn.replace_input_of(u, j, newbox);
            next_edge = false;
          }
        }
      }
    }
    if (next_edge) i++;
  }
}
void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
  if (EliminateNestedLocks) {
    if (alock->is_nested()) {
       assert(alock->box_node()->as_BoxLock()->is_eliminated(), "sanity");
       return;
    } else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened
      if (alock->jvms() != NULL) {
        if (alock->as_Lock()->is_nested_lock_region()) {
          Node* obj = alock->obj_node();
          BoxLockNode* box_node = alock->box_node()->as_BoxLock();
          assert(!box_node->is_eliminated(), "should not be marked yet");
          box_node->set_eliminated(); // Box's hash is always NO_HASH here
          for (uint i = 0; i < box_node->outcnt(); i++) {
            Node* u = box_node->raw_out(i);
            if (u->is_AbstractLock()) {
              alock = u->as_AbstractLock();
              if (alock->box_node() == box_node) {
                assert(alock->obj_node()->eqv_uncast(obj), "");
#ifdef ASSERT
                alock->log_lock_optimization(C, "eliminate_lock_set_nested");
#endif
                alock->set_nested();
              }
            }
          }
        } else {
#ifdef ASSERT
          alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region");
          if (C->log() != NULL)
            alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output
#endif
        }
      }
      return;
    }
    assert(alock->is_non_esc_obj(), "");
  } // EliminateNestedLocks
  if (alock->is_non_esc_obj()) { // Lock is used for non escaping object
    Node* obj = alock->obj_node();
    for (uint j = 0; j < obj->outcnt(); j++) {
      Node* o = obj->raw_out(j);
      if (o->is_AbstractLock() &&
          o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
        alock = o->as_AbstractLock();
        Node* box = alock->box_node();
        mark_eliminated_box(box, obj);
      }
    }
  }
}
bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
  if (!alock->is_eliminated()) {
    return false;
  }
#ifdef ASSERT
  if (!alock->is_coarsened()) {
    BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
    assert(oldbox->is_eliminated(), "should be done already");
  }
#endif
  alock->log_lock_optimization(C, "eliminate_lock");
#ifndef PRODUCT
  if (PrintEliminateLocks) {
    if (alock->is_Lock()) {
      tty->print_cr("++++ Eliminated: %d Lock", alock->_idx);
    } else {
      tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx);
    }
  }
#endif
  Node* mem  = alock->in(TypeFunc::Memory);
  Node* ctrl = alock->in(TypeFunc::Control);
  extract_call_projections(alock);
  assert(alock->outcnt() == 2 &&
         _fallthroughproj != NULL &&
         _memproj_fallthrough != NULL,
         "Unexpected projections from Lock/Unlock");
  Node* fallthroughproj = _fallthroughproj;
  Node* memproj_fallthrough = _memproj_fallthrough;
  if (alock->is_Lock()) {
    MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar();
    assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, "");
    Node* ctrlproj = membar->proj_out(TypeFunc::Control);
    Node* memproj = membar->proj_out(TypeFunc::Memory);
    _igvn.replace_node(ctrlproj, fallthroughproj);
    _igvn.replace_node(memproj, memproj_fallthrough);
    Node* flock = alock->as_Lock()->fastlock_node();
    if (flock->outcnt() == 1) {
      assert(flock->unique_out() == alock, "sanity");
      _igvn.replace_node(flock, top());
    }
  }
  if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() &&
      ctrl->in(0)->is_MemBar()) {
    MemBarNode* membar = ctrl->in(0)->as_MemBar();
    assert(membar->Opcode() == Op_MemBarReleaseLock &&
           mem->is_Proj() && membar == mem->in(0), "");
    _igvn.replace_node(fallthroughproj, ctrl);
    _igvn.replace_node(memproj_fallthrough, mem);
    fallthroughproj = ctrl;
    memproj_fallthrough = mem;
    ctrl = membar->in(TypeFunc::Control);
    mem  = membar->in(TypeFunc::Memory);
  }
  _igvn.replace_node(fallthroughproj, ctrl);
  _igvn.replace_node(memproj_fallthrough, mem);
  return true;
}
void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
  Node* ctrl = lock->in(TypeFunc::Control);
  Node* mem = lock->in(TypeFunc::Memory);
  Node* obj = lock->obj_node();
  Node* box = lock->box_node();
  Node* flock = lock->fastlock_node();
  assert(!box->as_BoxLock()->is_eliminated(), "sanity");
  Node *region;
  Node *mem_phi;
  Node *slow_path;
  if (UseOptoBiasInlining) {
    region  = new (C) RegionNode(5);
    mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
    Node* fast_lock_region  = new (C) RegionNode(3);
    Node* fast_lock_mem_phi = new (C) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM);
    Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
    ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node,
                         markOopDesc::biased_lock_mask_in_place,
                         markOopDesc::biased_lock_pattern, true);
    fast_lock_mem_phi->init_req(1, mem);
    Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
    if (klass_node == NULL) {
      Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
      klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
#ifdef _LP64
      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
        assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
        klass_node->in(1)->init_req(0, ctrl);
      } else
#endif
      klass_node->init_req(0, ctrl);
    }
    Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
    Node* thread = transform_later(new (C) ThreadLocalNode());
    Node* cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread));
    Node* o_node = transform_later(new (C) OrXNode(cast_thread, proto_node));
    Node* x_node = transform_later(new (C) XorXNode(o_node, mark_node));
    Node* not_biased_ctrl =  opt_bits_test(ctrl, region, 3, x_node,
                                      (~markOopDesc::age_mask_in_place), 0);
    mem_phi->init_req(3, mem);
    ctrl =  opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node,
                          markOopDesc::biased_lock_mask_in_place, 0, true);
    not_biased_ctrl = fast_lock_region->in(2); // Slow path
    Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
    Node* cas = new (C) StoreXConditionalNode(not_biased_ctrl, mem, adr,
                                              proto_node, mark_node);
    transform_later(cas);
    Node* proj = transform_later( new (C) SCMemProjNode(cas));
    fast_lock_mem_phi->init_req(2, proj);
    Node* rebiased_region  = new (C) RegionNode(3);
    Node* old_phi = new (C) PhiNode( rebiased_region, TypeX_X);
    Node* new_phi = new (C) PhiNode( rebiased_region, TypeX_X);
    Node* epoch_ctrl =  opt_bits_test(ctrl, rebiased_region, 1, x_node,
                                      markOopDesc::epoch_mask_in_place, 0);
    rebiased_region->init_req(2, epoch_ctrl);
    old_phi->init_req(2, mark_node);
    new_phi->init_req(2, o_node);
    Node* cmask   = MakeConX(markOopDesc::biased_lock_mask_in_place |
                             markOopDesc::age_mask_in_place |
                             markOopDesc::epoch_mask_in_place);
    Node* old = transform_later(new (C) AndXNode(mark_node, cmask));
    cast_thread = transform_later(new (C) CastP2XNode(ctrl, thread));
    Node* new_mark = transform_later(new (C) OrXNode(cast_thread, old));
    old_phi->init_req(1, old);
    new_phi->init_req(1, new_mark);
    transform_later(rebiased_region);
    transform_later(old_phi);
    transform_later(new_phi);
    cas = new (C) StoreXConditionalNode(rebiased_region, mem, adr,
                                           new_phi, old_phi);
    transform_later(cas);
    proj = transform_later( new (C) SCMemProjNode(cas));
    not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0);
    mem_phi->init_req(4, proj);
    slow_path  = new (C) RegionNode(3);
    Node *slow_mem = new (C) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM);
    slow_path->init_req(1, not_biased_ctrl); // Capture slow-control
    slow_mem->init_req(1, proj);
    transform_later(fast_lock_region);
    transform_later(fast_lock_mem_phi);
    ctrl = opt_bits_test(fast_lock_region, region, 2, flock, 0, 0);
    mem_phi->init_req(2, fast_lock_mem_phi);
    slow_path->init_req(2, ctrl); // Capture slow-control
    slow_mem->init_req(2, fast_lock_mem_phi);
    transform_later(slow_path);
    transform_later(slow_mem);
    lock->set_req(TypeFunc::Memory, slow_mem);
  } else {
    region  = new (C) RegionNode(3);
    mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
    slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
    mem_phi->init_req(2, mem);
  }
  CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box );
  extract_call_projections(call);
  assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
           _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
  Node *slow_ctrl = _fallthroughproj->clone();
  transform_later(slow_ctrl);
  _igvn.hash_delete(_fallthroughproj);
  _fallthroughproj->disconnect_inputs(NULL, C);
  region->init_req(1, slow_ctrl);
  transform_later(region);
  _igvn.replace_node(_fallthroughproj, region);
  Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) );
  mem_phi->init_req(1, memproj );
  transform_later(mem_phi);
  _igvn.replace_node(_memproj_fallthrough, mem_phi);
}
void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
  Node* ctrl = unlock->in(TypeFunc::Control);
  Node* mem = unlock->in(TypeFunc::Memory);
  Node* obj = unlock->obj_node();
  Node* box = unlock->box_node();
  assert(!box->as_BoxLock()->is_eliminated(), "sanity");
  Node *region;
  Node *mem_phi;
  if (UseOptoBiasInlining) {
    region  = new (C) RegionNode(4);
    mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
    mem_phi->init_req(3, mem);
    Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
    ctrl = opt_bits_test(ctrl, region, 3, mark_node,
                         markOopDesc::biased_lock_mask_in_place,
                         markOopDesc::biased_lock_pattern);
  } else {
    region  = new (C) RegionNode(3);
    mem_phi = new (C) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
  }
  FastUnlockNode *funlock = new (C) FastUnlockNode( ctrl, obj, box );
  funlock = transform_later( funlock )->as_FastUnlock();
  Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0);
  CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
  extract_call_projections(call);
  assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
           _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock");
  Node *slow_ctrl = _fallthroughproj->clone();
  transform_later(slow_ctrl);
  _igvn.hash_delete(_fallthroughproj);
  _fallthroughproj->disconnect_inputs(NULL, C);
  region->init_req(1, slow_ctrl);
  transform_later(region);
  _igvn.replace_node(_fallthroughproj, region);
  Node *memproj = transform_later( new(C) ProjNode(call, TypeFunc::Memory) );
  mem_phi->init_req(1, memproj );
  mem_phi->init_req(2, mem);
  transform_later(mem_phi);
  _igvn.replace_node(_memproj_fallthrough, mem_phi);
}
void PhaseMacroExpand::eliminate_macro_nodes() {
  if (C->macro_count() == 0)
    return;
  int cnt = C->macro_count();
  for (int i=0; i < cnt; i++) {
    Node *n = C->macro_node(i);
    if (n->is_AbstractLock()) { // Lock and Unlock nodes
      mark_eliminated_locking_nodes(n->as_AbstractLock());
    }
  }
  bool progress = true;
  while (progress) {
    progress = false;
    for (int i = C->macro_count(); i > 0; i--) {
      Node * n = C->macro_node(i-1);
      bool success = false;
      debug_only(int old_macro_count = C->macro_count(););
      if (n->is_AbstractLock()) {
        success = eliminate_locking_node(n->as_AbstractLock());
      }
      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
      progress = progress || success;
    }
  }
  _has_locks = false;
  progress = true;
  while (progress) {
    progress = false;
    for (int i = C->macro_count(); i > 0; i--) {
      Node * n = C->macro_node(i-1);
      bool success = false;
      debug_only(int old_macro_count = C->macro_count(););
      switch (n->class_id()) {
      case Node::Class_Allocate:
      case Node::Class_AllocateArray:
        success = eliminate_allocate_node(n->as_Allocate());
        break;
      case Node::Class_CallStaticJava:
        success = eliminate_boxing_node(n->as_CallStaticJava());
        break;
      case Node::Class_Lock:
      case Node::Class_Unlock:
        assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
        _has_locks = true;
        break;
      default:
        assert(n->Opcode() == Op_LoopLimit ||
               n->Opcode() == Op_Opaque1   ||
               n->Opcode() == Op_Opaque2   ||
               n->Opcode() == Op_Opaque3, "unknown node type in macro list");
      }
      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
      progress = progress || success;
    }
  }
}
bool PhaseMacroExpand::expand_macro_nodes() {
  eliminate_macro_nodes();
  if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) )
    return true;
  bool progress = true;
  while (progress) {
    progress = false;
    for (int i = C->macro_count(); i > 0; i--) {
      Node * n = C->macro_node(i-1);
      bool success = false;
      debug_only(int old_macro_count = C->macro_count(););
      if (n->Opcode() == Op_LoopLimit) {
        C->remove_macro_node(n);
        _igvn._worklist.push(n);
        success = true;
      } else if (n->Opcode() == Op_CallStaticJava) {
        C->remove_macro_node(n);
        _igvn._worklist.push(n);
        success = true;
      } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
        _igvn.replace_node(n, n->in(1));
        success = true;
#if INCLUDE_RTM_OPT
      } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
        assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
        assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
        Node* cmp = n->unique_out();
#ifdef ASSERT
        assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
        BoolNode* bol = cmp->unique_out()->as_Bool();
        assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
               (bol->_test._test == BoolTest::ne), "");
        IfNode* ifn = bol->unique_out()->as_If();
        assert((ifn->outcnt() == 2) &&
               ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change), "");
#endif
        Node* repl = n->in(1);
        if (!_has_locks) {
          repl = (cmp->in(1) == n) ? cmp->in(2) : cmp->in(1);
        }
        _igvn.replace_node(n, repl);
        success = true;
#endif
      }
      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
      progress = progress || success;
    }
  }
  while (C->macro_count() > 0) {
    int macro_count = C->macro_count();
    Node * n = C->macro_node(macro_count-1);
    assert(n->is_macro(), "only macro nodes expected here");
    if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) {
      C->remove_macro_node(n);
      continue;
    }
    switch (n->class_id()) {
    case Node::Class_Allocate:
      expand_allocate(n->as_Allocate());
      break;
    case Node::Class_AllocateArray:
      expand_allocate_array(n->as_AllocateArray());
      break;
    case Node::Class_Lock:
      expand_lock_node(n->as_Lock());
      break;
    case Node::Class_Unlock:
      expand_unlock_node(n->as_Unlock());
      break;
    default:
      assert(false, "unknown node type in macro list");
    }
    assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
    if (C->failing())  return true;
  }
  _igvn.set_delay_transform(false);
  _igvn.optimize();
  if (C->failing())  return true;
  return false;
}
C:\hotspot-69087d08d473\src\share\vm/opto/macro.hpp
#ifndef SHARE_VM_OPTO_MACRO_HPP
#define SHARE_VM_OPTO_MACRO_HPP
#include "opto/phase.hpp"
class  AllocateNode;
class  AllocateArrayNode;
class  CallNode;
class  Node;
class  PhaseIterGVN;
class PhaseMacroExpand : public Phase {
private:
  PhaseIterGVN &_igvn;
  Node* top()                   const { return C->top(); }
  Node* intcon(jint con)        const { return _igvn.intcon(con); }
  Node* longcon(jlong con)      const { return _igvn.longcon(con); }
  Node* makecon(const Type *t)  const { return _igvn.makecon(t); }
  Node* basic_plus_adr(Node* base, int offset) {
    return (offset == 0)? base: basic_plus_adr(base, MakeConX(offset));
  }
  Node* basic_plus_adr(Node* base, Node* ptr, int offset) {
    return (offset == 0)? ptr: basic_plus_adr(base, ptr, MakeConX(offset));
  }
  Node* basic_plus_adr(Node* base, Node* offset) {
    return basic_plus_adr(base, base, offset);
  }
  Node* basic_plus_adr(Node* base, Node* ptr, Node* offset) {
    Node* adr = new (C) AddPNode(base, ptr, offset);
    return transform_later(adr);
  }
  Node* transform_later(Node* n) {
    _igvn.register_new_node_with_optimizer(n);
    return n;
  }
  void set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr);
  Node* make_load( Node* ctl, Node* mem, Node* base, int offset,
                   const Type* value_type, BasicType bt);
  Node* make_store(Node* ctl, Node* mem, Node* base, int offset,
                   Node* value, BasicType bt);
  ProjNode *_fallthroughproj;
  ProjNode *_fallthroughcatchproj;
  ProjNode *_ioproj_fallthrough;
  ProjNode *_ioproj_catchall;
  ProjNode *_catchallcatchproj;
  ProjNode *_memproj_fallthrough;
  ProjNode *_memproj_catchall;
  ProjNode *_resproj;
  bool _has_locks;
  void expand_allocate(AllocateNode *alloc);
  void expand_allocate_array(AllocateArrayNode *alloc);
  void expand_allocate_common(AllocateNode* alloc,
                              Node* length,
                              const TypeFunc* slow_call_type,
                              address slow_call_address);
  Node *value_from_mem(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc);
  Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level);
  bool eliminate_boxing_node(CallStaticJavaNode *boxing);
  bool eliminate_allocate_node(AllocateNode *alloc);
  bool can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints);
  bool scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints_done);
  void process_users_of_allocation(CallNode *alloc);
  void eliminate_card_mark(Node *cm);
  void mark_eliminated_box(Node* box, Node* obj);
  void mark_eliminated_locking_nodes(AbstractLockNode *alock);
  bool eliminate_locking_node(AbstractLockNode *alock);
  void expand_lock_node(LockNode *lock);
  void expand_unlock_node(UnlockNode *unlock);
  int replace_input(Node *use, Node *oldref, Node *newref);
  void copy_call_debug_info(CallNode *oldcall, CallNode * newcall);
  Node* opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path = false);
  void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call);
  CallNode* make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call,
                       const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1);
  void extract_call_projections(CallNode *call);
  Node* initialize_object(AllocateNode* alloc,
                          Node* control, Node* rawmem, Node* object,
                          Node* klass_node, Node* length,
                          Node* size_in_bytes);
  Node* prefetch_allocation(Node* i_o,
                            Node*& needgc_false, Node*& contended_phi_rawmem,
                            Node* old_eden_top, Node* new_eden_top,
                            Node* length);
public:
  PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) {
    _igvn.set_delay_transform(true);
  }
  void eliminate_macro_nodes();
  bool expand_macro_nodes();
};
#endif // SHARE_VM_OPTO_MACRO_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/matcher.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/regmask.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/type.hpp"
#include "opto/vectornode.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#if defined AD_MD_HPP
# include AD_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#elif defined TARGET_ARCH_MODEL_aarch64
# include "adfiles/ad_aarch64.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif
OptoReg::Name OptoReg::c_frame_pointer;
const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
RegMask Matcher::mreg2regmask[_last_Mach_Reg];
RegMask Matcher::STACK_ONLY_mask;
RegMask Matcher::c_frame_ptr_mask;
const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
Matcher::Matcher()
: PhaseTransform( Phase::Ins_Select ),
#ifdef ASSERT
  _old2new_map(C->comp_arena()),
  _new2old_map(C->comp_arena()),
#endif
  _shared_nodes(C->comp_arena()),
  _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  _swallowed(swallowed),
  _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  _must_clone(must_clone),
  _register_save_policy(register_save_policy),
  _c_reg_save_policy(c_reg_save_policy),
  _register_save_type(register_save_type),
  _ruleName(ruleName),
  _allocation_started(false),
  _states_arena(Chunk::medium_size, mtCompiler),
  _visited(&_states_arena),
  _shared(&_states_arena),
  _dontcare(&_states_arena) {
  C->set_matcher(this);
  idealreg2spillmask  [Op_RegI] = NULL;
  idealreg2spillmask  [Op_RegN] = NULL;
  idealreg2spillmask  [Op_RegL] = NULL;
  idealreg2spillmask  [Op_RegF] = NULL;
  idealreg2spillmask  [Op_RegD] = NULL;
  idealreg2spillmask  [Op_RegP] = NULL;
  idealreg2spillmask  [Op_VecS] = NULL;
  idealreg2spillmask  [Op_VecD] = NULL;
  idealreg2spillmask  [Op_VecX] = NULL;
  idealreg2spillmask  [Op_VecY] = NULL;
  idealreg2spillmask  [Op_RegFlags] = NULL;
  idealreg2debugmask  [Op_RegI] = NULL;
  idealreg2debugmask  [Op_RegN] = NULL;
  idealreg2debugmask  [Op_RegL] = NULL;
  idealreg2debugmask  [Op_RegF] = NULL;
  idealreg2debugmask  [Op_RegD] = NULL;
  idealreg2debugmask  [Op_RegP] = NULL;
  idealreg2debugmask  [Op_VecS] = NULL;
  idealreg2debugmask  [Op_VecD] = NULL;
  idealreg2debugmask  [Op_VecX] = NULL;
  idealreg2debugmask  [Op_VecY] = NULL;
  idealreg2debugmask  [Op_RegFlags] = NULL;
  idealreg2mhdebugmask[Op_RegI] = NULL;
  idealreg2mhdebugmask[Op_RegN] = NULL;
  idealreg2mhdebugmask[Op_RegL] = NULL;
  idealreg2mhdebugmask[Op_RegF] = NULL;
  idealreg2mhdebugmask[Op_RegD] = NULL;
  idealreg2mhdebugmask[Op_RegP] = NULL;
  idealreg2mhdebugmask[Op_VecS] = NULL;
  idealreg2mhdebugmask[Op_VecD] = NULL;
  idealreg2mhdebugmask[Op_VecX] = NULL;
  idealreg2mhdebugmask[Op_VecY] = NULL;
  idealreg2mhdebugmask[Op_RegFlags] = NULL;
  debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
}
OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
  OptoReg::Name warped;
  if( reg->is_stack() ) {  // Stack slot argument?
    warped = OptoReg::add(_old_SP, reg->reg2stack() );
    warped = OptoReg::add(warped, C->out_preserve_stack_slots());
    if( warped >= _in_arg_limit )
      _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
    if (!RegMask::can_represent_arg(warped)) {
      C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
      return OptoReg::Bad;
    }
    return warped;
  }
  return OptoReg::as_OptoReg(reg);
}
OptoReg::Name Compile::compute_old_SP() {
  int fixed    = fixed_slots();
  int preserve = in_preserve_stack_slots();
  return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
}
#ifdef ASSERT
void Matcher::verify_new_nodes_only(Node* xroot) {
  ResourceMark rm;
  Unique_Node_List worklist;
  VectorSet visited(Thread::current()->resource_area());
  worklist.push(xroot);
  while (worklist.size() > 0) {
    Node* n = worklist.pop();
    visited <<= n->_idx;
    assert(C->node_arena()->contains(n), "dead node");
    for (uint j = 0; j < n->req(); j++) {
      Node* in = n->in(j);
      if (in != NULL) {
        assert(C->node_arena()->contains(in), "dead node");
        if (!visited.test(in->_idx)) {
          worklist.push(in);
        }
      }
    }
  }
}
#endif
void Matcher::match( ) {
  if( MaxLabelRootDepth < 100 ) { // Too small?
    assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
    MaxLabelRootDepth = 100;
  }
  init_spill_mask( C->root()->in(1) );
  _return_addr_mask = return_addr();
#ifdef _LP64
  _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
#endif
  const TypeTuple *range = C->tf()->range();
  if( range->cnt() > TypeFunc::Parms ) { // If not a void function
    uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
    uint sop = C->start()->Opcode();
    OptoRegPair regs = return_value(ireg, false);
    _return_value_mask = RegMask(regs.first());
    if( OptoReg::is_valid(regs.second()) )
      _return_value_mask.Insert(regs.second());
  }
  const TypeTuple *domain = C->tf()->domain();
  uint             argcnt = domain->cnt() - TypeFunc::Parms;
  BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
  VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
  _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
  _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
  uint i;
  for( i = 0; i<argcnt; i++ ) {
    sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
  }
  const StartNode *start = C->start();
  start->calling_convention( sig_bt, vm_parm_regs, argcnt );
#ifdef ASSERT
  { for (uint i = 0; i<argcnt; i++) {
      if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
        assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
        _parm_regs[i].set_bad();
        continue;
      }
      VMReg parm_reg = vm_parm_regs[i].first();
      assert(parm_reg->is_valid(), "invalid arg?");
      if (parm_reg->is_reg()) {
        OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
        assert(can_be_java_arg(opto_parm_reg) ||
               C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
               opto_parm_reg == inline_cache_reg(),
               "parameters in register must be preserved by runtime stubs");
      }
      for (uint j = 0; j < i; j++) {
        assert(parm_reg != vm_parm_regs[j].first(),
               "calling conv. must produce distinct regs");
      }
    }
  }
#endif
  _old_SP = C->compute_old_SP();
  assert( is_even(_old_SP), "must be even" );
  _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
  assert( is_even(_in_arg_limit), "out_preserve must be even" );
  for( i = 0; i < argcnt; i++ ) {
    _calling_convention_mask[i].Clear();
    if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
      continue;
    }
    OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
    if( OptoReg::is_valid(reg1))
      _calling_convention_mask[i].Insert(reg1);
    OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
    if( OptoReg::is_valid(reg2))
      _calling_convention_mask[i].Insert(reg2);
    _parm_regs[i].set_pair(reg2, reg1);
  }
  _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
  _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
  assert( is_even(_out_arg_limit), "out_preserve must be even" );
  if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
    C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
  }
  if (C->failing())  return;  // bailed out on incoming arg failure
  find_shared( C->root() );
  find_shared( C->top() );
  C->print_method(PHASE_BEFORE_MATCHING);
  Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
  Arena *old = C->node_arena()->move_contents(C->old_arena());
  _old_node_note_array = C->node_note_array();
  if (_old_node_note_array != NULL) {
    C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
                           (C->comp_arena(), _old_node_note_array->length(),
                            0, NULL));
  }
  grow_new_node_array(C->unique());
  int live_nodes = C->live_nodes();
  C->set_unique(0);
  C->reset_dead_node_list();
  _visited.Clear();             // Clear visit bits for xform call
  C->set_cached_top_node(xform( C->top(), live_nodes));
  if (!C->failing()) {
    Node* xroot =        xform( C->root(), 1 );
    if (xroot == NULL) {
      Matcher::soft_match_failure();  // recursive matching process failed
      C->record_method_not_compilable("instruction match failed");
    } else {
      for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
        Node* n = C->root()->fast_out(j);
        if (C->node_arena()->contains(n)) {
          assert(n->in(0) == C->root(), "should be control user");
          n->set_req(0, xroot);
          --j;
          --jmax;
        }
      }
      assert(new_ideal_null != NULL, "sanity");
      _mach_null = match_tree(new_ideal_null);
      assert(_mach_null != NULL, "");
      C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
#ifdef ASSERT
      verify_new_nodes_only(xroot);
#endif
    }
  }
  if (C->top() == NULL || C->root() == NULL) {
    C->record_method_not_compilable("graph lost"); // %%% cannot happen?
  }
  if (C->failing()) {
    old->destruct_contents();
    return;
  }
  assert( C->top(), "" );
  assert( C->root(), "" );
  validate_null_checks();
  NOT_DEBUG( old->destruct_contents() );
  Fixup_Save_On_Entry( );
}
static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
  RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
  rms[TypeFunc::Control  ] = RegMask::Empty;
  rms[TypeFunc::I_O      ] = RegMask::Empty;
  rms[TypeFunc::Memory   ] = RegMask::Empty;
  rms[TypeFunc::ReturnAdr] = ret_adr;
  rms[TypeFunc::FramePtr ] = fp;
  return rms;
}
void Matcher::init_first_stack_mask() {
  RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
  idealreg2spillmask  [Op_RegN] = &rms[0];
  idealreg2spillmask  [Op_RegI] = &rms[1];
  idealreg2spillmask  [Op_RegL] = &rms[2];
  idealreg2spillmask  [Op_RegF] = &rms[3];
  idealreg2spillmask  [Op_RegD] = &rms[4];
  idealreg2spillmask  [Op_RegP] = &rms[5];
  idealreg2debugmask  [Op_RegN] = &rms[6];
  idealreg2debugmask  [Op_RegI] = &rms[7];
  idealreg2debugmask  [Op_RegL] = &rms[8];
  idealreg2debugmask  [Op_RegF] = &rms[9];
  idealreg2debugmask  [Op_RegD] = &rms[10];
  idealreg2debugmask  [Op_RegP] = &rms[11];
  idealreg2mhdebugmask[Op_RegN] = &rms[12];
  idealreg2mhdebugmask[Op_RegI] = &rms[13];
  idealreg2mhdebugmask[Op_RegL] = &rms[14];
  idealreg2mhdebugmask[Op_RegF] = &rms[15];
  idealreg2mhdebugmask[Op_RegD] = &rms[16];
  idealreg2mhdebugmask[Op_RegP] = &rms[17];
  idealreg2spillmask  [Op_VecS] = &rms[18];
  idealreg2spillmask  [Op_VecD] = &rms[19];
  idealreg2spillmask  [Op_VecX] = &rms[20];
  idealreg2spillmask  [Op_VecY] = &rms[21];
  OptoReg::Name i;
  C->FIRST_STACK_mask().Clear();
  OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
  for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
    C->FIRST_STACK_mask().Insert(i);
  }
  guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
            "must be able to represent all call arguments in reg mask");
  OptoReg::Name init = _out_arg_limit;
  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
    C->FIRST_STACK_mask().Insert(i);
  }
  C->FIRST_STACK_mask().set_AllStack();
  RegMask aligned_stack_mask = C->FIRST_STACK_mask();
  aligned_stack_mask.clear_to_pairs();
  assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
#ifdef _LP64
   idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
   idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
#else
   idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
#endif
   idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
   idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
   idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
   idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
  if (Matcher::vector_size_supported(T_BYTE,4)) {
     idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
  }
  if (Matcher::vector_size_supported(T_FLOAT,2)) {
     idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
  }
  if (Matcher::vector_size_supported(T_FLOAT,4)) {
    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
      aligned_stack_mask.Remove(in);
      in = OptoReg::add(in, -1);
    }
     aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
     assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
     idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
  }
  if (Matcher::vector_size_supported(T_FLOAT,8)) {
    OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
    for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
      aligned_stack_mask.Remove(in);
      in = OptoReg::add(in, -1);
    }
     aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
     assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
     idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
  }
   if (UseFPUForSpilling) {
     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
#ifdef _LP64
     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
#else
     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
#ifdef ARM
     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
#endif
#endif
   }
  bool exclude_soe = !Compile::current()->is_method_compilation();
  for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
    if( _register_save_policy[i] == 'C' ||
        _register_save_policy[i] == 'A' ||
        (_register_save_policy[i] == 'E' && exclude_soe) ) {
      idealreg2debugmask  [Op_RegN]->Remove(i);
      idealreg2debugmask  [Op_RegI]->Remove(i); // Exclude save-on-call
      idealreg2debugmask  [Op_RegL]->Remove(i); // registers from debug
      idealreg2debugmask  [Op_RegF]->Remove(i); // masks
      idealreg2debugmask  [Op_RegD]->Remove(i);
      idealreg2debugmask  [Op_RegP]->Remove(i);
      idealreg2mhdebugmask[Op_RegN]->Remove(i);
      idealreg2mhdebugmask[Op_RegI]->Remove(i);
      idealreg2mhdebugmask[Op_RegL]->Remove(i);
      idealreg2mhdebugmask[Op_RegF]->Remove(i);
      idealreg2mhdebugmask[Op_RegD]->Remove(i);
      idealreg2mhdebugmask[Op_RegP]->Remove(i);
    }
  }
  const RegMask save_mask = method_handle_invoke_SP_save_mask();
  idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
  idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
  idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
  idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
  idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
  idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
}
bool Matcher::is_save_on_entry( int reg ) {
  return
    _register_save_policy[reg] == 'E' ||
    _register_save_policy[reg] == 'A' || // Save-on-entry register?
    (C->save_argument_registers() && is_spillable_arg(reg));
}
void Matcher::Fixup_Save_On_Entry( ) {
  init_first_stack_mask();
  Node *root = C->root();       // Short name for root
  uint soe_cnt = number_of_saved_registers();
  uint i;
  StartNode *start = C->start();
  assert( start, "Expect a start node" );
  if( C->save_argument_registers() )
    for( i = 0; i < _last_Mach_Reg; i++ )
      if( is_spillable_arg(i) )
        soe_cnt++;
  uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
  RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
  if (ret_edge_cnt > TypeFunc::Parms)
    ret_rms[TypeFunc::Parms+0] = _return_value_mask;
  uint reth_edge_cnt = TypeFunc::Parms+1;
  RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
  reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
#ifdef _LP64
  reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
#endif
  uint tail_call_edge_cnt = TypeFunc::Parms+2;
  RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
  uint tail_jump_edge_cnt = TypeFunc::Parms+2;
  RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
  for( i=1; i < root->req(); i++ ) {
    MachReturnNode *m = root->in(i)->as_MachReturn();
    if( m->ideal_Opcode() == Op_TailCall ) {
      tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
      tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
      break;
    }
  }
  for( i=1; i < root->req(); i++ ) {
    MachReturnNode *m = root->in(i)->as_MachReturn();
    if( m->ideal_Opcode() == Op_TailJump ) {
      tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
      tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
      break;
    }
  }
  uint halt_edge_cnt = TypeFunc::Parms;
  RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
  for( i=1; i < root->req(); i++ ) {
    MachReturnNode *exit = root->in(i)->as_MachReturn();
    switch( exit->ideal_Opcode() ) {
      case Op_Return   : exit->_in_rms = ret_rms;  break;
      case Op_Rethrow  : exit->_in_rms = reth_rms; break;
      case Op_TailCall : exit->_in_rms = tail_call_rms; break;
      case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
      case Op_Halt     : exit->_in_rms = halt_rms; break;
      default          : ShouldNotReachHere();
    }
  }
  int proj_cnt = C->tf()->domain()->cnt();
  for( i = 0; i < _last_Mach_Reg; i++ ) {
    if( is_save_on_entry(i) ) {
      ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
      reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
      tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
      tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
      halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
      Node *mproj;
      if( (i&1) == 0 &&
          _register_save_type[i  ] == Op_RegF &&
          _register_save_type[i+1] == Op_RegF &&
          is_save_on_entry(i+1) ) {
        ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
        reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
        tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
        tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
        halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
        mproj = new (C) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
        proj_cnt += 2;          // Skip 2 for doubles
      }
      else if( (i&1) == 1 &&    // Else check for high half of double
               _register_save_type[i-1] == Op_RegF &&
               _register_save_type[i  ] == Op_RegF &&
               is_save_on_entry(i-1) ) {
        ret_rms      [      ret_edge_cnt] = RegMask::Empty;
        reth_rms     [     reth_edge_cnt] = RegMask::Empty;
        tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
        tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
        halt_rms     [     halt_edge_cnt] = RegMask::Empty;
        mproj = C->top();
      }
      else if( (i&1) == 0 &&
          _register_save_type[i  ] == Op_RegI &&
          _register_save_type[i+1] == Op_RegI &&
        is_save_on_entry(i+1) ) {
        ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
        reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
        tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
        tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
        halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
        mproj = new (C) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
        proj_cnt += 2;          // Skip 2 for longs
      }
      else if( (i&1) == 1 &&    // Else check for high half of long
               _register_save_type[i-1] == Op_RegI &&
               _register_save_type[i  ] == Op_RegI &&
               is_save_on_entry(i-1) ) {
        ret_rms      [      ret_edge_cnt] = RegMask::Empty;
        reth_rms     [     reth_edge_cnt] = RegMask::Empty;
        tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
        tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
        halt_rms     [     halt_edge_cnt] = RegMask::Empty;
        mproj = C->top();
      } else {
        mproj = new (C) MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
      }
      ret_edge_cnt ++;
      reth_edge_cnt ++;
      tail_call_edge_cnt ++;
      tail_jump_edge_cnt ++;
      halt_edge_cnt ++;
      for( uint j=1; j < root->req(); j++ )
        root->in(j)->add_req(mproj);
    } // End of if a save-on-entry register
  } // End of for all machine registers
}
void Matcher::init_spill_mask( Node *ret ) {
  if( idealreg2regmask[Op_RegI] ) return; // One time only init
  OptoReg::c_frame_pointer = c_frame_pointer();
  c_frame_ptr_mask = c_frame_pointer();
#ifdef _LP64
  c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
#endif
  STACK_ONLY_mask.Clear();
  OptoReg::Name init = OptoReg::stack2reg(0);
  OptoReg::Name i;
  for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
    STACK_ONLY_mask.Insert(i);
  STACK_ONLY_mask.set_AllStack();
  for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
    mreg2regmask[i].Insert(i);
  }
  Node *fp  = ret->in(TypeFunc::FramePtr);
  Node *mem = ret->in(TypeFunc::Memory);
  const TypePtr* atp = TypePtr::BOTTOM;
  set_shared(fp);
#ifdef _LP64
  MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
#endif
  MachNode *spillI  = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
  MachNode *spillL  = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest,false));
  MachNode *spillF  = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
  MachNode *spillD  = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
  MachNode *spillP  = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
  assert(spillI != NULL && spillL != NULL && spillF != NULL &&
         spillD != NULL && spillP != NULL, "");
#ifdef _LP64
  idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
#endif
  idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
  idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
  idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
  idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
  idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
  if (Matcher::vector_size_supported(T_BYTE,4)) {
    TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
    MachNode *spillVectS = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
    idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
  }
  if (Matcher::vector_size_supported(T_FLOAT,2)) {
    MachNode *spillVectD = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
    idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
  }
  if (Matcher::vector_size_supported(T_FLOAT,4)) {
    MachNode *spillVectX = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
    idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
  }
  if (Matcher::vector_size_supported(T_FLOAT,8)) {
    MachNode *spillVectY = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
    idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
  }
}
#ifdef ASSERT
static void match_alias_type(Compile* C, Node* n, Node* m) {
  if (!VerifyAliases)  return;  // do not go looking for trouble by default
  const TypePtr* nat = n->adr_type();
  const TypePtr* mat = m->adr_type();
  int nidx = C->get_alias_index(nat);
  int midx = C->get_alias_index(mat);
  if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
    for (uint i = 1; i < n->req(); i++) {
      Node* n1 = n->in(i);
      const TypePtr* n1at = n1->adr_type();
      if (n1at != NULL) {
        nat = n1at;
        nidx = C->get_alias_index(n1at);
      }
    }
  }
  if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
    switch (n->Opcode()) {
    case Op_PrefetchRead:
    case Op_PrefetchWrite:
    case Op_PrefetchAllocation:
      nidx = Compile::AliasIdxRaw;
      nat = TypeRawPtr::BOTTOM;
      break;
    }
  }
  if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
    switch (n->Opcode()) {
    case Op_ClearArray:
      midx = Compile::AliasIdxRaw;
      mat = TypeRawPtr::BOTTOM;
      break;
    }
  }
  if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
    switch (n->Opcode()) {
    case Op_Return:
    case Op_Rethrow:
    case Op_Halt:
    case Op_TailCall:
    case Op_TailJump:
      nidx = Compile::AliasIdxBot;
      nat = TypePtr::BOTTOM;
      break;
    }
  }
  if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
    switch (n->Opcode()) {
    case Op_StrComp:
    case Op_StrEquals:
    case Op_StrIndexOf:
    case Op_AryEq:
    case Op_MemBarVolatile:
    case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
    case Op_EncodeISOArray:
      nidx = Compile::AliasIdxTop;
      nat = NULL;
      break;
    }
  }
  if (nidx != midx) {
    if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
      tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
      n->dump();
      m->dump();
    }
    assert(C->subsume_loads() && C->must_alias(nat, midx),
           "must not lose alias info when matching");
  }
}
#endif
enum Node_State { Pre_Visit,  // node has to be pre-visited
                      Visit,  // visit node
                 Post_Visit,  // post-visit node
             Alt_Post_Visit   // alternative post-visit path
                };
class MStack: public Node_Stack {
  public:
    MStack(int size) : Node_Stack(size) { }
    void push(Node *n, Node_State ns) {
      Node_Stack::push(n, (uint)ns);
    }
    void push(Node *n, Node_State ns, Node *parent, int indx) {
      ++_inode_top;
      if ((_inode_top + 1) >= _inode_max) grow();
      _inode_top->node = parent;
      _inode_top->indx = (uint)indx;
      ++_inode_top;
      _inode_top->node = n;
      _inode_top->indx = (uint)ns;
    }
    Node *parent() {
      pop();
      return node();
    }
    Node_State state() const {
      return (Node_State)index();
    }
    void set_state(Node_State ns) {
      set_index((uint)ns);
    }
};
Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
Node *Matcher::xform( Node *n, int max_stack ) {
  MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
  mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
  while (mstack.is_nonempty()) {
    C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
    if (C->failing()) return NULL;
    n = mstack.node();          // Leave node on stack
    Node_State nstate = mstack.state();
    if (nstate == Visit) {
      mstack.set_state(Post_Visit);
      Node *oldn = n;
      if (!C->node_arena()->contains(n)) {
        Node* m;
        if (has_new_node(n)) {  // Not yet Label/Reduced
          m = new_node(n);
        } else {
          if (!is_dontcare(n)) { // Matcher can match this guy
            m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
            if (C->failing())  return NULL;
            if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
          } else {                  // Nothing the matcher cares about
            if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
              m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
#ifdef ASSERT
              _new2old_map.map(m->_idx, n);
#endif
              if (m->in(0) != NULL) // m might be top
                collect_null_checks(m, n);
            } else {                // Else just a regular 'ol guy
              m = n->clone();       // So just clone into new-space
#ifdef ASSERT
              _new2old_map.map(m->_idx, n);
#endif
              assert(m->outcnt() == 0, "no Uses of this clone yet");
            }
          }
          set_new_node(n, m);       // Map old to new
          if (_old_node_note_array != NULL) {
            Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
                                                  n->_idx);
            C->set_node_notes_at(m->_idx, nn);
          }
          debug_only(match_alias_type(C, n, m));
        }
        n = m;    // n is now a new-space node
        mstack.set_node(n);
      }
      if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
      int i;
      for (i = oldn->req(); (uint)i < oldn->len(); i++) {
        Node *m = oldn->in(i);
        if (m == NULL) break;
        mstack.push(m, Visit, n, -1);
      }
      int cnt = n->req();
      JVMState* jvms = n->jvms();
      int debug_cnt = jvms ? jvms->debug_start() : cnt;
      for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
        Node *m = n->in(i);          // Get input
        int op = m->Opcode();
        assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
        if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
            op == Op_ConF || op == Op_ConD || op == Op_ConL
            ) {
          m = m->clone();
#ifdef ASSERT
          _new2old_map.map(m->_idx, n);
#endif
          mstack.push(m, Post_Visit, n, i); // Don't need to visit
          mstack.push(m->in(0), Visit, m, 0);
        } else {
          mstack.push(m, Visit, n, i);
        }
      }
      for( ; i >= 0; --i ) { // For all normal inputs do
        Node *m = n->in(i);  // Get input
        if(m != NULL)
          mstack.push(m, Visit, n, i);
      }
    }
    else if (nstate == Post_Visit) {
      Node *p = mstack.parent();
      if (p != NULL) { // root doesn't have parent
        int i = (int)mstack.index();
        if (i >= 0)
          p->set_req(i, n); // required input
        else if (i == -1)
          p->add_prec(n);   // precedence input
        else
          ShouldNotReachHere();
      }
      mstack.pop(); // remove processed node from stack
    }
    else {
      ShouldNotReachHere();
    }
  } // while (mstack.is_nonempty())
  return n; // Return new-space Node
}
OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
  if (reg->is_stack()) {
    OptoReg::Name warped = reg->reg2stack();
    warped = OptoReg::add(begin_out_arg_area, warped);
    if( warped >= out_arg_limit_per_call )
      out_arg_limit_per_call = OptoReg::add(warped,1);
    if (!RegMask::can_represent_arg(warped)) {
      C->record_method_not_compilable_all_tiers("unsupported calling sequence");
      return OptoReg::Bad;
    }
    return warped;
  }
  return OptoReg::as_OptoReg(reg);
}
MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
  MachSafePointNode *msfpt = NULL;
  MachCallNode      *mcall = NULL;
  uint               cnt;
  CallNode *call;
  const TypeTuple *domain;
  ciMethod*        method = NULL;
  bool             is_method_handle_invoke = false;  // for special kill effects
  if( sfpt->is_Call() ) {
    call = sfpt->as_Call();
    domain = call->tf()->domain();
    cnt = domain->cnt();
    MachNode *m = match_tree(call);
    if (C->failing())  return NULL;
    if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
    mcall = m->as_MachCall();
    mcall->set_tf(         call->tf());
    mcall->set_entry_point(call->entry_point());
    mcall->set_cnt(        call->cnt());
    if( mcall->is_MachCallJava() ) {
      MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
      const CallJavaNode *call_java =  call->as_CallJava();
      method = call_java->method();
      mcall_java->_method = method;
      mcall_java->_bci = call_java->_bci;
      mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
      is_method_handle_invoke = call_java->is_method_handle_invoke();
      mcall_java->_method_handle_invoke = is_method_handle_invoke;
      if (is_method_handle_invoke) {
        C->set_has_method_handle_invokes(true);
      }
      if( mcall_java->is_MachCallStaticJava() )
        mcall_java->as_MachCallStaticJava()->_name =
         call_java->as_CallStaticJava()->_name;
      if( mcall_java->is_MachCallDynamicJava() )
        mcall_java->as_MachCallDynamicJava()->_vtable_index =
         call_java->as_CallDynamicJava()->_vtable_index;
    }
    else if( mcall->is_MachCallRuntime() ) {
      mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
    }
    msfpt = mcall;
  }
  else {
    call = NULL;
    domain = NULL;
    MachNode *mn = match_tree(sfpt);
    if (C->failing())  return NULL;
    msfpt = mn->as_MachSafePoint();
    cnt = TypeFunc::Parms;
  }
  msfpt->set_adr_type(sfpt->adr_type());
  msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
  memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
  msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
  msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
  OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
  assert( is_even(begin_out_arg_area), "" );
  OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
  if( call != NULL && call->is_CallRuntime() )
    out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
  int argcnt = cnt - TypeFunc::Parms;
  if( argcnt > 0 ) {          // Skip it all if we have no args
    BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
    VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
    int i;
    for( i = 0; i < argcnt; i++ ) {
      sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
    }
    call->calling_convention( sig_bt, parm_regs, argcnt );
#ifdef ASSERT
    { for (int i = 0; i<argcnt; i++) {
      if( !parm_regs[i].first()->is_valid() &&
          !parm_regs[i].second()->is_valid() ) continue;
      VMReg reg1 = parm_regs[i].first();
      VMReg reg2 = parm_regs[i].second();
      for (int j = 0; j < i; j++) {
        if( !parm_regs[j].first()->is_valid() &&
            !parm_regs[j].second()->is_valid() ) continue;
        VMReg reg3 = parm_regs[j].first();
        VMReg reg4 = parm_regs[j].second();
        if( !reg1->is_valid() ) {
          assert( !reg2->is_valid(), "valid halvsies" );
        } else if( !reg3->is_valid() ) {
          assert( !reg4->is_valid(), "valid halvsies" );
        } else {
          assert( reg1 != reg2, "calling conv. must produce distinct regs");
          assert( reg1 != reg3, "calling conv. must produce distinct regs");
          assert( reg1 != reg4, "calling conv. must produce distinct regs");
          assert( reg2 != reg3, "calling conv. must produce distinct regs");
          assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
          assert( reg3 != reg4, "calling conv. must produce distinct regs");
        }
      }
    }
    }
#endif
    for( i = 0; i < argcnt; i++ ) {
      RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
      if( !parm_regs[i].first()->is_valid() &&
          !parm_regs[i].second()->is_valid() ) {
        continue;               // Avoid Halves
      }
      OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
      if (OptoReg::is_valid(reg1))
        rm->Insert( reg1 );
      OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
      if (OptoReg::is_valid(reg2))
        rm->Insert( reg2 );
    } // End of for all arguments
    mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
  }
  if( _out_arg_limit < out_arg_limit_per_call)
    _out_arg_limit = out_arg_limit_per_call;
  if (mcall) {
    uint r_cnt = mcall->tf()->range()->cnt();
    MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
    if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
      C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
    } else {
      for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
        proj->_rout.Insert(OptoReg::Name(i));
    }
    if (proj->_rout.is_NotEmpty()) {
      push_projection(proj);
    }
  }
  msfpt->set_jvms(sfpt->jvms());
  for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
    jvms->set_map(sfpt);
  }
  assert((mcall == NULL) || (mcall->jvms() == NULL) ||
         (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
  msfpt->_oop_map = sfpt->_oop_map;
  if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
    msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
    if (msfpt->jvms() &&
        msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
      msfpt->jvms()->adapt_position(+1);
    }
  }
  return msfpt;
}
MachNode *Matcher::match_tree( const Node *n ) {
  assert( n->Opcode() != Op_Phi, "cannot match" );
  assert( !n->is_block_start(), "cannot match" );
  ResourceMark rm( &_states_arena );
  LabelRootDepth = 0;
  Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
#ifdef ASSERT
  Node* save_mem_node = _mem_node;
  _mem_node = n->is_Store() ? (Node*)n : NULL;
#endif
  State *s = new (&_states_arena) State;
  s->_kids[0] = NULL;
  s->_kids[1] = NULL;
  s->_leaf = (Node*)n;
  Label_Root( n, s, n->in(0), mem );
  if (C->failing())  return NULL;
  uint mincost = max_juint;
  uint cost = max_juint;
  uint i;
  for( i = 0; i < NUM_OPERANDS; i++ ) {
    if( s->valid(i) &&                // valid entry and
        s->_cost[i] < cost &&         // low cost and
        s->_rule[i] >= NUM_OPERANDS ) // not an operand
      cost = s->_cost[mincost=i];
  }
  if (mincost == max_juint) {
#ifndef PRODUCT
    tty->print("No matching rule for:");
    s->dump();
#endif
    Matcher::soft_match_failure();
    return NULL;
  }
  MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
#ifdef ASSERT
  _old2new_map.map(n->_idx, m);
  _new2old_map.map(m->_idx, (Node*)n);
#endif
  uint cnt = n->req();
  uint start = 1;
  if( mem != (Node*)1 ) start = MemNode::Memory+1;
  if( n->is_AddP() ) {
    assert( mem == (Node*)1, "" );
    start = AddPNode::Base+1;
  }
  for( i = start; i < cnt; i++ ) {
    if( !n->match_edge(i) ) {
      if( i < m->req() )
        m->ins_req( i, n->in(i) );
      else
        m->add_req( n->in(i) );
    }
  }
  debug_only( _mem_node = save_mem_node; )
  return m;
}
static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
  const Type *t = m->bottom_type();
  if (t->singleton()) {
    return false;
  } else {                      // Not a constant
    Node* m_control = m->in(0);
    Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
    if (control && m_control && control != m_control && control != mem_control) {
      Node *x = control;
      const uint max_scan = 6;  // Arbitrary scan cutoff
      uint j;
      for (j=0; j<max_scan; j++) {
        if (x->is_Region())     // Bail out at merge points
          return true;
        x = x->in(0);
        if (x == m_control)     // Does 'control' post-dominate
          break;                // m->in(0)?  If so, we can use it
        if (x == mem_control)   // Does 'control' post-dominate
          break;                // mem_control?  If so, we can use it
      }
      if (j == max_scan)        // No post-domination before scan end?
        return true;            // Then break the match tree up
    }
    if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
        (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
      return false;
    }
  }
  return shared;
}
Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
  LabelRootDepth++;
  if (LabelRootDepth > MaxLabelRootDepth) {
    C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
    return NULL;
  }
  uint care = 0;                // Edges matcher cares about
  uint cnt = n->req();
  uint i = 0;
  Node *input_mem = NULL;
  for( i = 1; i < cnt; i++ ) {
    if( !n->match_edge(i) ) continue;
    Node *m = n->in(i);         // Get ith input
    assert( m, "expect non-null children" );
    if( m->is_Load() ) {
      if( input_mem == NULL ) {
        input_mem = m->in(MemNode::Memory);
      } else if( input_mem != m->in(MemNode::Memory) ) {
        input_mem = NodeSentinel;
      }
    }
  }
  for( i = 1; i < cnt; i++ ){// For my children
    if( !n->match_edge(i) ) continue;
    Node *m = n->in(i);         // Get ith input
    State *s = new (&_states_arena) State;
    svec->_kids[care++] = s;
    assert( care <= 2, "binary only for now" );
    s->_kids[0] = NULL;
    s->_kids[1] = NULL;
    s->_leaf = m;
    if( match_into_reg(n, m, control, i, is_shared(m)) ||
        ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
        (input_mem == NodeSentinel) ) {
#ifndef PRODUCT
      if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
        && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
        tty->print_cr("invalid input_mem");
      }
#endif
      s->DFA( m->ideal_reg(), m );
    } else {
      if( control == NULL && m->in(0) != NULL && m->req() > 1 )
        control = m->in(0);         // Pick up control
      control = Label_Root(m,s,control,mem);
      if (C->failing()) return NULL;
    }
  }
  svec->DFA( n->Opcode(), n );
#ifdef ASSERT
  uint x;
  for( x = 0; x < _LAST_MACH_OPER; x++ )
    if( svec->valid(x) )
      break;
  if (x >= _LAST_MACH_OPER) {
    n->dump();
    svec->dump();
    assert( false, "bad AD file" );
  }
#endif
  return control;
}
MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
  if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
  if (_shared_nodes.Size() <= leaf->_idx) return NULL;
  MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
  if (last != NULL && rule == last->rule()) {
    if (leaf->is_DecodeNarrowPtr())
      return last;
    Node* xroot = new_node(C->root());
    if (xroot == NULL) {
      return NULL;
    }
    Node* control = last->in(0);
    if (control != xroot) {
      if (control == NULL || control == C->root()) {
        last->set_req(0, xroot);
      } else {
        assert(false, "unexpected control");
        return NULL;
      }
    }
    return last;
  }
  return NULL;
}
MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
  assert( rule >= NUM_OPERANDS, "called with operand rule" );
  MachNode* shared_node = find_shared_node(s->_leaf, rule);
  if (shared_node != NULL) {
    return shared_node;
  }
  MachNode *mach = s->MachNodeGenerator( rule, C );
  guarantee(mach != NULL, "Missing MachNode");
  mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
  assert( mach->_opnds[0] != NULL, "Missing result operand" );
  Node *leaf = s->_leaf;
  if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
    assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
           "duplicating node that's already been matched");
    mach->add_req( leaf->in(0) ); // Set initial control
    ReduceInst_Interior( s, rule, mem, mach, 1 );
  } else {
    mach->add_req(0);             // Set initial control to none
    ReduceInst_Chain_Rule( s, rule, mem, mach );
  }
  if( mem != (Node*)1 ) {
    mach->ins_req(MemNode::Memory,mem);
#ifdef ASSERT
    const MachOper* oper = mach->memory_operand();
    if (oper != NULL && oper != (MachOper*)-1) {
      Node* m = NULL;
      if (leaf->is_Mem()) {
        m = leaf;
      } else {
        m = _mem_node;
        assert(m != NULL && m->is_Mem(), "expecting memory node");
      }
      const Type* mach_at = mach->adr_type();
      if (m->adr_type() != mach_at &&
          (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
           m->in(MemNode::Address)->is_AddP() &&
           m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
           m->in(MemNode::Address)->is_AddP() &&
           m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
           m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
        mach_at = m->adr_type();
      }
      if (m->adr_type() != mach_at) {
        m->dump();
        tty->print_cr("mach:");
        mach->dump(1);
      }
      assert(m->adr_type() == mach_at, "matcher should not change adr type");
    }
#endif
  }
  if (leaf->is_AddP()) {
    mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
  }
  uint number_of_projections_prior = number_of_projections();
  MachNode *ex = mach->Expand(s, _projection_list, mem);
  if (ex != mach) {
    assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
    if( ex->in(1)->is_Con() )
      ex->in(1)->set_req(0, C->root());
    for( uint i=0; i<mach->req(); i++ ) {
      mach->set_req(i,NULL);
    }
#ifdef ASSERT
    _new2old_map.map(ex->_idx, s->_leaf);
#endif
  }
  if (_allocation_started) {
    guarantee(ex == mach, "no expand rules during spill generation");
    guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
  }
  if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
    _shared_nodes.map(leaf->_idx, ex);
  }
  return ex;
}
void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
  int op = _leftOp[rule];
  int opnd_class_instance = s->_rule[op];
  int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
  int newrule = s->_rule[catch_op];
  if( newrule < NUM_OPERANDS ) {
    assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
            "Bad AD file: Instruction chain rule must chain from operand");
    mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
    ReduceOper( s, newrule, mem, mach );
  } else {
    assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
    mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
    Node *mem1 = (Node*)1;
    debug_only(Node *save_mem_node = _mem_node;)
    mach->add_req( ReduceInst(s, newrule, mem1) );
    debug_only(_mem_node = save_mem_node;)
  }
  return;
}
uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
  if( s->_leaf->is_Load() ) {
    Node *mem2 = s->_leaf->in(MemNode::Memory);
    assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
    debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
    mem = mem2;
  }
  if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
    if( mach->in(0) == NULL )
      mach->set_req(0, s->_leaf->in(0));
  }
  for( uint i=0; i<2; i++ ) {   // binary tree
    State *newstate = s->_kids[i];
    if( newstate == NULL ) break;      // Might only have 1 child
    int op;
    if( i == 0 ) {
      op = _leftOp[rule];
    } else {
      op = _rightOp[rule];
    }
    int opnd_class_instance = newstate->_rule[op];
    int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
    int newrule = newstate->_rule[catch_op];
    if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
      mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
      ReduceOper( newstate, newrule, mem, mach );
    } else {                    // Child is internal operand or new instruction
      if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
        num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
      } else {
        mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
        Node *mem1 = (Node*)1;
        debug_only(Node *save_mem_node = _mem_node;)
        mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
        debug_only(_mem_node = save_mem_node;)
      }
    }
    assert( mach->_opnds[num_opnds-1], "" );
  }
  return num_opnds;
}
void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
  assert( rule < _LAST_MACH_OPER, "called with operand rule" );
  State *kid = s->_kids[0];
  assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
  if( kid == NULL && !_swallowed[rule] ) {
    mach->add_req( s->_leaf );  // Add leaf pointer
    return;                     // Bail out
  }
  if( s->_leaf->is_Load() ) {
    assert( mem == (Node*)1, "multiple Memories being matched at once?" );
    mem = s->_leaf->in(MemNode::Memory);
    debug_only(_mem_node = s->_leaf;)
  }
  if( s->_leaf->in(0) && s->_leaf->req() > 1) {
    if( !mach->in(0) )
      mach->set_req(0,s->_leaf->in(0));
    else {
      assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
    }
  }
  for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
    int newrule;
    if( i == 0)
      newrule = kid->_rule[_leftOp[rule]];
    else
      newrule = kid->_rule[_rightOp[rule]];
    if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
      ReduceOper( kid, newrule, mem, mach );
    } else {                    // Child is a new instruction
      Node *mem1 = (Node*)1;
      debug_only(Node *save_mem_node = _mem_node;)
      mach->add_req( ReduceInst( kid, newrule, mem1 ) );
      debug_only(_mem_node = save_mem_node;)
    }
  }
}
OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
  VMRegPair regs;
  BasicType sig_bt = T_OBJECT;
  calling_convention(&sig_bt, &regs, 1, is_outgoing);
  return OptoReg::as_OptoReg(regs.first());
}
#ifdef X86
template<typename ConType>
class FusedPatternMatcher {
  Node* _op1_node;
  Node* _mop_node;
  int _con_op;
  static int match_next(Node* n, int next_op, int next_op_idx) {
    if (n->in(1) == NULL || n->in(2) == NULL) {
      return -1;
    }
    if (next_op_idx == -1) { // n is commutative, try rotations
      if (n->in(1)->Opcode() == next_op) {
        return 1;
      } else if (n->in(2)->Opcode() == next_op) {
        return 2;
      }
    } else {
      assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
      if (n->in(next_op_idx)->Opcode() == next_op) {
        return next_op_idx;
      }
    }
    return -1;
  }
public:
  FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
    _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
  bool match(int op1, int op1_op2_idx,  // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
             int op2, int op2_con_idx,  // op2 and the index of the op2->con edge, -1 if op2 is commutative
             typename ConType::NativeType con_value) {
    if (_op1_node->Opcode() != op1) {
      return false;
    }
    if (_mop_node->outcnt() > 2) {
      return false;
    }
    op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
    if (op1_op2_idx == -1) {
      return false;
    }
    int op1_mop_idx = (op1_op2_idx & 1) + 1;
    if (_op1_node->in(op1_mop_idx) == _mop_node) {
      Node *op2_node = _op1_node->in(op1_op2_idx);
      if (op2_node->outcnt() > 1) {
        return false;
      }
      assert(op2_node->Opcode() == op2, "Should be");
      op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
      if (op2_con_idx == -1) {
        return false;
      }
      int op2_mop_idx = (op2_con_idx & 1) + 1;
      if (op2_node->in(op2_mop_idx) == _mop_node) {
        const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
        if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
          return true;
        }
      }
    }
    return false;
  }
};
bool Matcher::is_bmi_pattern(Node *n, Node *m) {
  if (n != NULL && m != NULL) {
    if (m->Opcode() == Op_LoadI) {
      FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
      return bmii.match(Op_AndI, -1, Op_SubI,  1,  0)  ||
             bmii.match(Op_AndI, -1, Op_AddI, -1, -1)  ||
             bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
    } else if (m->Opcode() == Op_LoadL) {
      FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
      return bmil.match(Op_AndL, -1, Op_SubL,  1,  0) ||
             bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
             bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
    }
  }
  return false;
}
#endif // X86
void Matcher::find_shared( Node *n ) {
  MStack mstack(C->live_nodes() * 2);
  VectorSet address_visited(Thread::current()->resource_area());
  mstack.push(n, Visit);     // Don't need to pre-visit root node
  while (mstack.is_nonempty()) {
    n = mstack.node();       // Leave node on stack
    Node_State nstate = mstack.state();
    uint nop = n->Opcode();
    if (nstate == Pre_Visit) {
      if (address_visited.test(n->_idx)) { // Visited in address already?
        set_visited(n);
      }
      if (is_visited(n)) {   // Visited already?
        set_shared(n);       // Flag as shared and
        if (n->is_DecodeNarrowPtr()) {
          set_shared(n->in(1));
        }
        mstack.pop();        // remove node from stack
        continue;
      }
      nstate = Visit; // Not already visited; so visit now
    }
    if (nstate == Visit) {
      mstack.set_state(Post_Visit);
      set_visited(n);   // Flag as visited now
      bool mem_op = false;
      switch( nop ) {  // Handle some opcodes special
      case Op_Phi:             // Treat Phis as shared roots
      case Op_Parm:
      case Op_Proj:            // All handled specially during matching
      case Op_SafePointScalarObject:
        set_shared(n);
        set_dontcare(n);
        break;
      case Op_If:
      case Op_CountedLoopEnd:
        mstack.set_state(Alt_Post_Visit); // Alternative way
        mstack.push(n->in(1), Visit);         // Clone the Bool
        mstack.push(n->in(0), Pre_Visit);     // Visit control input
        continue; // while (mstack.is_nonempty())
      case Op_ConvI2D:         // These forms efficiently match with a prior
      case Op_ConvI2F:         //   Load but not a following Store
        if( n->in(1)->is_Load() &&        // Prior load
            n->outcnt() == 1 &&           // Not already shared
            n->unique_out()->is_Store() ) // Following store
          set_shared(n);       // Force it to be a root
        break;
      case Op_ReverseBytesI:
      case Op_ReverseBytesL:
        if( n->in(1)->is_Load() &&        // Prior load
            n->outcnt() == 1 )            // Not already shared
          set_shared(n);                  // Force it to be a root
        break;
      case Op_BoxLock:         // Cant match until we get stack-regs in ADLC
      case Op_IfFalse:
      case Op_IfTrue:
      case Op_MachProj:
      case Op_MergeMem:
      case Op_Catch:
      case Op_CatchProj:
      case Op_CProj:
      case Op_JumpProj:
      case Op_JProj:
      case Op_NeverBranch:
        set_dontcare(n);
        break;
      case Op_Jump:
        mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
        mstack.push(n->in(0), Pre_Visit);     // Visit Control input
        continue;                             // while (mstack.is_nonempty())
      case Op_StrComp:
      case Op_StrEquals:
      case Op_StrIndexOf:
      case Op_AryEq:
      case Op_EncodeISOArray:
        set_shared(n); // Force result into register (it will be anyways)
        break;
      case Op_ConP: {  // Convert pointers above the centerline to NUL
        TypeNode *tn = n->as_Type(); // Constants derive from type nodes
        const TypePtr* tp = tn->type()->is_ptr();
        if (tp->_ptr == TypePtr::AnyNull) {
          tn->set_type(TypePtr::NULL_PTR);
        }
        break;
      }
      case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
        TypeNode *tn = n->as_Type(); // Constants derive from type nodes
        const TypePtr* tp = tn->type()->make_ptr();
        if (tp && tp->_ptr == TypePtr::AnyNull) {
          tn->set_type(TypeNarrowOop::NULL_PTR);
        }
        break;
      }
      case Op_Binary:         // These are introduced in the Post_Visit state.
        ShouldNotReachHere();
        break;
      case Op_ClearArray:
      case Op_SafePoint:
        mem_op = true;
        break;
      default:
        if( n->is_Store() ) {
          mem_op = true;
          break;
        }
        if( n->is_Mem() ) { // Loads and LoadStores
          mem_op = true;
          if( C->subsume_loads() == false )
            set_shared(n);
        }
        if( !n->ideal_reg() )
          set_dontcare(n);  // Unmatchable Nodes
      } // end_switch
      for(int i = n->req() - 1; i >= 0; --i) { // For my children
        Node *m = n->in(i); // Get ith input
        if (m == NULL) continue;  // Ignore NULLs
        uint mop = m->Opcode();
        if( _must_clone[mop] ) {
          mstack.push(m, Visit);
          continue; // for(int i = ...)
        }
#ifdef X86
        if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
          mstack.push(m, Visit);
          continue;
        }
#endif
        if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
          Node *off = m->in(AddPNode::Offset);
          if( off->is_Con() &&
              !is_visited(m) ) {
            address_visited.test_set(m->_idx); // Flag as address_visited
            Node *adr = m->in(AddPNode::Address);
            if( clone_shift_expressions && adr->is_AddP() &&
                !adr->in(AddPNode::Base)->is_top() &&
                !is_visited(adr) ) {
              address_visited.set(adr->_idx); // Flag as address_visited
              Node *shift = adr->in(AddPNode::Offset);
              if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
                  shift->in(2)->get_int() <= 3 &&
                  !is_visited(shift) ) {
                address_visited.set(shift->_idx); // Flag as address_visited
                mstack.push(shift->in(2), Visit);
                Node *conv = shift->in(1);
#ifdef _LP64
                if( conv->Opcode() == Op_ConvI2L &&
                    conv->as_Type()->type()->is_long()->_lo >= 0 &&
                    !is_visited(conv) ) {
                  address_visited.set(conv->_idx); // Flag as address_visited
                  mstack.push(conv->in(1), Pre_Visit);
                } else
#endif
                mstack.push(conv, Pre_Visit);
              } else {
                mstack.push(shift, Pre_Visit);
              }
              mstack.push(adr->in(AddPNode::Address), Pre_Visit);
              mstack.push(adr->in(AddPNode::Base), Pre_Visit);
            } else {  // Sparc, Alpha, PPC and friends
              mstack.push(adr, Pre_Visit);
            }
            mstack.push(off, Visit);
            mstack.push(m->in(AddPNode::Base), Pre_Visit);
            continue; // for(int i = ...)
          } // if( off->is_Con() )
        }   // if( mem_op &&
        mstack.push(m, Pre_Visit);
      }     // for(int i = ...)
    }
    else if (nstate == Alt_Post_Visit) {
      mstack.pop(); // Remove node from stack
      n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
    }
    else if (nstate == Post_Visit) {
      mstack.pop(); // Remove node from stack
      switch( n->Opcode() ) {       // Handle some opcodes special
      case Op_StorePConditional:
      case Op_StoreIConditional:
      case Op_StoreLConditional:
      case Op_CompareAndSwapI:
      case Op_CompareAndSwapL:
      case Op_CompareAndSwapP:
      case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
        Node *newval = n->in(MemNode::ValueIn );
        Node *oldval  = n->in(LoadStoreConditionalNode::ExpectedIn);
        Node *pair = new (C) BinaryNode( oldval, newval );
        n->set_req(MemNode::ValueIn,pair);
        n->del_req(LoadStoreConditionalNode::ExpectedIn);
        break;
      }
      case Op_CMoveD:              // Convert trinary to binary-tree
      case Op_CMoveF:
      case Op_CMoveI:
      case Op_CMoveL:
      case Op_CMoveN:
      case Op_CMoveP: {
        Node *pair1 = new (C) BinaryNode(n->in(1),n->in(1)->in(1));
        n->set_req(1,pair1);
        Node *pair2 = new (C) BinaryNode(n->in(2),n->in(3));
        n->set_req(2,pair2);
        n->del_req(3);
        break;
      }
      case Op_LoopLimit: {
        Node *pair1 = new (C) BinaryNode(n->in(1),n->in(2));
        n->set_req(1,pair1);
        n->set_req(2,n->in(3));
        n->del_req(3);
        break;
      }
      case Op_StrEquals: {
        Node *pair1 = new (C) BinaryNode(n->in(2),n->in(3));
        n->set_req(2,pair1);
        n->set_req(3,n->in(4));
        n->del_req(4);
        break;
      }
      case Op_StrComp:
      case Op_StrIndexOf: {
        Node *pair1 = new (C) BinaryNode(n->in(2),n->in(3));
        n->set_req(2,pair1);
        Node *pair2 = new (C) BinaryNode(n->in(4),n->in(5));
        n->set_req(3,pair2);
        n->del_req(5);
        n->del_req(4);
        break;
      }
      case Op_EncodeISOArray: {
        Node* pair = new (C) BinaryNode(n->in(3), n->in(4));
        n->set_req(3, pair);
        n->del_req(4);
        break;
      }
      default:
        break;
      }
    }
    else {
      ShouldNotReachHere();
    }
  } // end of while (mstack.is_nonempty())
}
#ifdef ASSERT
void Matcher::dump_old2new_map() {
  _old2new_map.dump();
}
#endif
void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
  Node *iff = proj->in(0);
  if( iff->Opcode() == Op_If ) {
    BoolNode *b = iff->in(1)->as_Bool();
    Node *cmp = iff->in(2);
    int opc = cmp->Opcode();
    if (opc != Op_CmpP && opc != Op_CmpN) return;
    const Type* ct = cmp->in(2)->bottom_type();
    if (ct == TypePtr::NULL_PTR ||
        (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
      bool push_it = false;
      if( proj->Opcode() == Op_IfTrue ) {
        extern int all_null_checks_found;
        all_null_checks_found++;
        if( b->_test._test == BoolTest::ne ) {
          push_it = true;
        }
      } else {
        assert( proj->Opcode() == Op_IfFalse, "" );
        if( b->_test._test == BoolTest::eq ) {
          push_it = true;
        }
      }
      if( push_it ) {
        _null_check_tests.push(proj);
        Node* val = cmp->in(1);
#ifdef _LP64
        if (val->bottom_type()->isa_narrowoop() &&
            !Matcher::narrow_oop_use_complex_address()) {
          uint cnt = orig_proj->outcnt();
          for (uint i = 0; i < orig_proj->outcnt(); i++) {
            Node* d = orig_proj->raw_out(i);
            if (d->is_DecodeN() && d->in(1) == val) {
              val = d;
              val->set_req(0, NULL); // Unpin now.
              val = (Node*)(((intptr_t)val) | 1);
              break;
            }
          }
        }
#endif
        _null_check_tests.push(val);
      }
    }
  }
}
void Matcher::validate_null_checks( ) {
  uint cnt = _null_check_tests.size();
  for( uint i=0; i < cnt; i+=2 ) {
    Node *test = _null_check_tests[i];
    Node *val = _null_check_tests[i+1];
    bool is_decoden = ((intptr_t)val) & 1;
    val = (Node*)(((intptr_t)val) & ~1);
    if (has_new_node(val)) {
      Node* new_val = new_node(val);
      if (is_decoden) {
        assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
        new_val->set_req(0, NULL);
        new_val = (Node*)(((intptr_t)new_val) | 1);
      }
      _null_check_tests.map(i+1, new_val);
    } else {
      _null_check_tests.map(i+1,_null_check_tests[--cnt]);
      _null_check_tests.map(i,_null_check_tests[--cnt]);
      _null_check_tests.pop();
      _null_check_tests.pop();
      i-=2;
    }
  }
}
bool Matcher::post_store_load_barrier(const Node* vmb) {
  Compile* C = Compile::current();
  assert(vmb->is_MemBar(), "");
  assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
  const MemBarNode* membar = vmb->as_MemBar();
  Node* ctrl = NULL;
  for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
    Node* p = membar->fast_out(i);
    assert(p->is_Proj(), "only projections here");
    if ((p->as_Proj()->_con == TypeFunc::Control) &&
        !C->node_arena()->contains(p)) { // Unmatched old-space only
      ctrl = p;
      break;
    }
  }
  assert((ctrl != NULL), "missing control projection");
  for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
    Node *x = ctrl->fast_out(j);
    int xop = x->Opcode();
    if (xop == Op_MemBarVolatile ||
        xop == Op_CompareAndSwapL ||
        xop == Op_CompareAndSwapP ||
        xop == Op_CompareAndSwapN ||
        xop == Op_CompareAndSwapI) {
      return true;
    }
    if ((xop == Op_FastLock) && !UseBiasedLocking) {
      return true;
    }
    if (x->is_MemBar()) {
      if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
        return false;
      } else {
        return post_store_load_barrier(x->as_MemBar());
      }
    }
    if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
      return false;
    }
  }
  return false;
}
bool Matcher::branches_to_uncommon_trap(const Node *n) {
  Compile *C = Compile::current();
  if (!C->is_method_compilation()) return false;
  assert(n->is_If(), "You should only call this on if nodes.");
  IfNode *ifn = n->as_If();
  Node *ifFalse = NULL;
  for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
    if (ifn->fast_out(i)->is_IfFalse()) {
      ifFalse = ifn->fast_out(i);
      break;
    }
  }
  assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
  Node *reg = ifFalse;
  int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
  while (reg != NULL && cnt > 0) {
    CallNode *call = NULL;
    RegionNode *nxt_reg = NULL;
    for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
      Node *o = reg->fast_out(i);
      if (o->is_Call()) {
        call = o->as_Call();
      }
      if (o->is_Region()) {
        nxt_reg = o->as_Region();
      }
    }
    if (call &&
        call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
      const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
      if (trtype->isa_int() && trtype->is_int()->is_con()) {
        jint tr_con = trtype->is_int()->get_con();
        Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
        Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
        assert((int)reason < (int)BitsPerInt, "recode bit map");
        if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
            && action != Deoptimization::Action_none) {
          return true;
        }
      }
    }
    reg = nxt_reg;
    cnt--;
  }
  return false;
}
State::State(void) {
#ifdef ASSERT
  _id = 0;
  _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
  _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
#endif
  memset(_valid, 0, sizeof(_valid));
}
#ifdef ASSERT
State::~State() {
  _id = 99;
  _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
  _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
  memset(_cost, -3, sizeof(_cost));
  memset(_rule, -3, sizeof(_rule));
}
#endif
#ifndef PRODUCT
void State::dump() {
  tty->print("\n");
  dump(0);
}
void State::dump(int depth) {
  for( int j = 0; j < depth; j++ )
    tty->print("   ");
  tty->print("--N: ");
  _leaf->dump();
  uint i;
  for( i = 0; i < _LAST_MACH_OPER; i++ )
    if( valid(i) ) {
      for( int j = 0; j < depth; j++ )
        tty->print("   ");
        assert(_cost[i] != max_juint, "cost must be a valid value");
        assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
        tty->print_cr("%s  %d  %s",
                      ruleName[i], _cost[i], ruleName[_rule[i]] );
      }
  tty->cr();
  for( i=0; i<2; i++ )
    if( _kids[i] )
      _kids[i]->dump(depth+1);
}
#endif
C:\hotspot-69087d08d473\src\share\vm/opto/matcher.hpp
#ifndef SHARE_VM_OPTO_MATCHER_HPP
#define SHARE_VM_OPTO_MATCHER_HPP
#include "libadt/vectset.hpp"
#include "memory/resourceArea.hpp"
#include "opto/node.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
class Compile;
class Node;
class MachNode;
class MachTypeNode;
class MachOper;
class Matcher : public PhaseTransform {
  friend class VMStructs;
  ResourceArea _states_arena;
  VectorSet   _visited;         // Visit bits
  VectorSet   _shared;          // Shared Ideal Node
  VectorSet   _dontcare;        // Nothing the matcher cares about
  MachNode *ReduceInst( State *s, int rule, Node *&mem);
  void ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach);
  uint ReduceInst_Interior(State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds);
  void ReduceOper( State *s, int newrule, Node *&mem, MachNode *mach );
  MachNode* find_shared_node(Node* n, uint rule);
  const int *_reduceOp;
  const int *_leftOp;
  const int *_rightOp;
  const bool *_swallowed;
  const uint _begin_inst_chain_rule;
  const uint _end_inst_chain_rule;
  const char *_must_clone;
  void find_shared( Node *n );
#ifdef X86
  bool is_bmi_pattern(Node *n, Node *m);
#endif
  GrowableArray<Node_Notes*>* _old_node_note_array;
  Node *Label_Root( const Node *n, State *svec, Node *control, const Node *mem );
  Node *transform( Node *dummy );
  Node_List _projection_list;        // For Machine nodes killing many values
  Node_Array _shared_nodes;
  debug_only(Node_Array _old2new_map;)   // Map roots of ideal-trees to machine-roots
  debug_only(Node_Array _new2old_map;)   // Maps machine nodes back to ideal
  void   grow_new_node_array(uint idx_limit) {
    _nodes.map(idx_limit-1, NULL);
  }
  bool    has_new_node(const Node* n) const {
    return _nodes.at(n->_idx) != NULL;
  }
  Node*       new_node(const Node* n) const {
    assert(has_new_node(n), "set before get");
    return _nodes.at(n->_idx);
  }
  void    set_new_node(const Node* n, Node *nn) {
    assert(!has_new_node(n), "set only once");
    _nodes.map(n->_idx, nn);
  }
#ifdef ASSERT
  void verify_new_nodes_only(Node* root);
  Node* _mem_node;   // Ideal memory node consumed by mach node
#endif
  MachNode* _mach_null;
public:
  int LabelRootDepth;
  static const RegMask *idealreg2regmask[];
  RegMask *idealreg2spillmask  [_last_machine_leaf];
  RegMask *idealreg2debugmask  [_last_machine_leaf];
  RegMask *idealreg2mhdebugmask[_last_machine_leaf];
  void init_spill_mask( Node *ret );
  static uint mreg2regmask_max;
  static RegMask mreg2regmask[];
  static RegMask STACK_ONLY_mask;
  MachNode* mach_null() const { return _mach_null; }
  bool    is_shared( Node *n ) { return _shared.test(n->_idx) != 0; }
  void   set_shared( Node *n ) {  _shared.set(n->_idx); }
  bool   is_visited( Node *n ) { return _visited.test(n->_idx) != 0; }
  void  set_visited( Node *n ) { _visited.set(n->_idx); }
  bool  is_dontcare( Node *n ) { return _dontcare.test(n->_idx) != 0; }
  void set_dontcare( Node *n ) {  _dontcare.set(n->_idx); }
  bool _allocation_started;
  static const char *regName[];
  static const unsigned char _regEncode[];
  const char **_ruleName;
  static const uint _begin_rematerialize;
  static const uint _end_rematerialize;
  const char *_register_save_policy;
  const char *_c_reg_save_policy;
  const int *_register_save_type;
  static bool can_be_java_arg( int reg );
  static bool is_spillable_arg( int reg );
  Node_List _null_check_tests;
  void collect_null_checks( Node *proj, Node *orig_proj );
  void validate_null_checks( );
  Matcher();
  Node* get_projection(uint pos) {
    return _projection_list[pos];
  }
  void push_projection(Node* node) {
    _projection_list.push(node);
  }
  Node* pop_projection() {
    return _projection_list.pop();
  }
  uint number_of_projections() const {
    return _projection_list.size();
  }
  void match();
  OptoReg::Name warp_incoming_stk_arg( VMReg reg );
  Node *xform( Node *old_space_node, int Nodes );
  MachNode *match_tree( const Node *n );
  MachNode *match_sfpt( SafePointNode *sfpt );
  OptoReg::Name warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call );
  void init_first_stack_mask();
  bool is_save_on_entry( int reg );
  void Fixup_Save_On_Entry( );
  OptoReg::Name _old_SP;
  OptoReg::Name _in_arg_limit;
  OptoReg::Name _new_SP;
  OptoReg::Name _out_arg_limit;
  OptoRegPair *_parm_regs;        // Array of machine registers per argument
  RegMask *_calling_convention_mask; // Array of RegMasks per argument
  static const bool has_match_rule(int opcode);
  static const bool _hasMatchRule[_last_opcode];
  static const bool match_rule_supported(int opcode);
  static const bool convL2FSupported(void);
  static const int vector_width_in_bytes(BasicType bt);
  static const int max_vector_size(const BasicType bt);
  static const int min_vector_size(const BasicType bt);
  static const bool vector_size_supported(const BasicType bt, int size) {
    return (Matcher::max_vector_size(bt) >= size &&
            Matcher::min_vector_size(bt) <= size);
  }
  static const uint vector_ideal_reg(int len);
  static const uint vector_shift_count_ideal_reg(int len);
  static const bool misaligned_vectors_ok();
  static const bool pass_original_key_for_aes();
  static const bool isSimpleConstant64(jlong con);
  virtual bool stack_direction() const;
  static uint stack_alignment_in_bytes();
  static uint stack_alignment_in_slots() {
    return stack_alignment_in_bytes() / (VMRegImpl::stack_slot_size);
  }
  static void calling_convention( BasicType *, VMRegPair *, uint len, bool is_outgoing );
  static OptoReg::Name  find_receiver( bool is_outgoing );
  virtual OptoReg::Name return_addr() const;
  RegMask              _return_addr_mask;
  static OptoRegPair   return_value(uint ideal_reg, bool is_outgoing);
  static OptoRegPair c_return_value(uint ideal_reg, bool is_outgoing);
  RegMask                     _return_value_mask;
  static OptoReg::Name  inline_cache_reg();
  static int            inline_cache_reg_encode();
  static RegMask divI_proj_mask();
  static RegMask modI_proj_mask();
  static RegMask divL_proj_mask();
  static RegMask modL_proj_mask();
  static bool use_asm_for_ldiv_by_con( jlong divisor );
  static const RegMask method_handle_invoke_SP_save_mask();
  static int  number_of_saved_registers();
  static OptoReg::Name  interpreter_method_oop_reg();
  static int            interpreter_method_oop_reg_encode();
  static OptoReg::Name  compiler_method_oop_reg();
  static const RegMask &compiler_method_oop_reg_mask();
  static int            compiler_method_oop_reg_encode();
  static OptoReg::Name  interpreter_frame_pointer_reg();
  static void c_calling_convention( BasicType*, VMRegPair *, uint );
  OptoReg::Name  c_frame_pointer() const;
  static RegMask c_frame_ptr_mask;
  virtual int      regnum_to_fpu_offset(int regnum);
  bool is_short_branch_offset(int rule, int br_size, int offset);
  static const bool init_array_count_is_in_bytes;
  static const int init_array_short_size;
  static const int long_cmove_cost();
  static const int float_cmove_cost();
  static const bool clone_shift_expressions;
  static bool narrow_oop_use_complex_address();
  static bool narrow_klass_use_complex_address();
  inline static bool gen_narrow_oop_implicit_null_checks() {
    return Universe::narrow_oop_use_implicit_null_checks() &&
           (narrow_oop_use_complex_address() ||
            Universe::narrow_oop_base() != NULL);
  }
  static const bool rematerialize_float_constants;
  static const bool misaligned_doubles_ok;
  static const bool require_postalloc_expand;
  void pd_implicit_null_fixup(MachNode *load, uint idx);
  static const bool strict_fp_requires_explicit_rounding;
  static bool float_in_double();
  static const bool int_in_long;
  static const bool need_masked_shift_count;
  static void soft_match_failure() {
    if( SoftMatchFailure ) return;
    else { fatal("SoftMatchFailure is not allowed except in product"); }
  }
  static bool post_store_load_barrier(const Node* mb);
  static bool branches_to_uncommon_trap(const Node *n);
#ifdef ASSERT
  void dump_old2new_map();      // machine-independent to machine-dependent
  Node* find_old_node(Node* new_node) {
    return _new2old_map[new_node->_idx];
  }
#endif
};
#endif // SHARE_VM_OPTO_MATCHER_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/mathexactnode.cpp
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/subnode.hpp"
template <typename OverflowOp>
class AddHelper {
public:
  typedef typename OverflowOp::TypeClass TypeClass;
  typedef typename TypeClass::NativeType NativeType;
  static bool will_overflow(NativeType value1, NativeType value2) {
    NativeType result = value1 + value2;
    if (((value1 ^ result) & (value2 ^ result)) >= 0) {
      return false;
    }
    return true;
  }
  static bool can_overflow(const Type* type1, const Type* type2) {
    if (type1 == TypeClass::ZERO || type2 == TypeClass::ZERO) {
      return false;
    }
    return true;
  }
};
template <typename OverflowOp>
class SubHelper {
public:
  typedef typename OverflowOp::TypeClass TypeClass;
  typedef typename TypeClass::NativeType NativeType;
  static bool will_overflow(NativeType value1, NativeType value2) {
    NativeType result = value1 - value2;
    if (((value1 ^ value2) & (value1 ^ result)) >= 0) {
      return false;
    }
    return true;
  }
  static bool can_overflow(const Type* type1, const Type* type2) {
    if (type2 == TypeClass::ZERO) {
      return false;
    }
    return true;
  }
};
template <typename OverflowOp>
class MulHelper {
public:
  typedef typename OverflowOp::TypeClass TypeClass;
  static bool can_overflow(const Type* type1, const Type* type2) {
    if (type1 == TypeClass::ZERO || type2 == TypeClass::ZERO) {
      return false;
    } else if (type1 == TypeClass::ONE || type2 == TypeClass::ONE) {
      return false;
    }
    return true;
  }
};
bool OverflowAddINode::will_overflow(jint v1, jint v2) const {
  return AddHelper<OverflowAddINode>::will_overflow(v1, v2);
}
bool OverflowSubINode::will_overflow(jint v1, jint v2) const {
  return SubHelper<OverflowSubINode>::will_overflow(v1, v2);
}
bool OverflowMulINode::will_overflow(jint v1, jint v2) const {
    jlong result = (jlong) v1 * (jlong) v2;
    if ((jint) result == result) {
      return false;
    }
    return true;
}
bool OverflowAddLNode::will_overflow(jlong v1, jlong v2) const {
  return AddHelper<OverflowAddLNode>::will_overflow(v1, v2);
}
bool OverflowSubLNode::will_overflow(jlong v1, jlong v2) const {
  return SubHelper<OverflowSubLNode>::will_overflow(v1, v2);
}
bool OverflowMulLNode::is_overflow(jlong val1, jlong val2) {
    if (val1 == 0 || val2 == 0 || val1 == 1 || val2 == 1) {
      return false;
    }
    if (val1 == min_jlong || val2 == min_jlong) {
      return true;
    }
    julong v1 = (julong) val1;
    julong v2 = (julong) val2;
    julong tmp = v1 * v2;
    jlong result = (jlong) tmp;
    if (result / val2 != val1) {
      return true;
    }
    return false;
}
bool OverflowAddINode::can_overflow(const Type* t1, const Type* t2) const {
  return AddHelper<OverflowAddINode>::can_overflow(t1, t2);
}
bool OverflowSubINode::can_overflow(const Type* t1, const Type* t2) const {
  if (in(1) == in(2)) {
    return false;
  }
  return SubHelper<OverflowSubINode>::can_overflow(t1, t2);
}
bool OverflowMulINode::can_overflow(const Type* t1, const Type* t2) const {
  return MulHelper<OverflowMulINode>::can_overflow(t1, t2);
}
bool OverflowAddLNode::can_overflow(const Type* t1, const Type* t2) const {
  return AddHelper<OverflowAddLNode>::can_overflow(t1, t2);
}
bool OverflowSubLNode::can_overflow(const Type* t1, const Type* t2) const {
  if (in(1) == in(2)) {
    return false;
  }
  return SubHelper<OverflowSubLNode>::can_overflow(t1, t2);
}
bool OverflowMulLNode::can_overflow(const Type* t1, const Type* t2) const {
  return MulHelper<OverflowMulLNode>::can_overflow(t1, t2);
}
const Type* OverflowNode::sub(const Type* t1, const Type* t2) const {
  fatal(err_msg_res("sub() should not be called for '%s'", NodeClassNames[this->Opcode()]));
  return TypeInt::CC;
}
template <typename OverflowOp>
struct IdealHelper {
  typedef typename OverflowOp::TypeClass TypeClass; // TypeInt, TypeLong
  typedef typename TypeClass::NativeType NativeType;
  static Node* Ideal(const OverflowOp* node, PhaseGVN* phase, bool can_reshape) {
    Node* arg1 = node->in(1);
    Node* arg2 = node->in(2);
    const Type* type1 = phase->type(arg1);
    const Type* type2 = phase->type(arg2);
    if (type1 == NULL || type2 == NULL) {
      return NULL;
    }
    if (type1 != Type::TOP && type1->singleton() &&
        type2 != Type::TOP && type2->singleton()) {
      NativeType val1 = TypeClass::as_self(type1)->get_con();
      NativeType val2 = TypeClass::as_self(type2)->get_con();
      if (node->will_overflow(val1, val2) == false) {
        Node* con_result = ConINode::make(phase->C, 0);
        return con_result;
      }
      return NULL;
    }
    return NULL;
  }
  static const Type* Value(const OverflowOp* node, PhaseTransform* phase) {
    const Type *t1 = phase->type( node->in(1) );
    const Type *t2 = phase->type( node->in(2) );
    if( t1 == Type::TOP ) return Type::TOP;
    if( t2 == Type::TOP ) return Type::TOP;
    const TypeClass* i1 = TypeClass::as_self(t1);
    const TypeClass* i2 = TypeClass::as_self(t2);
    if (i1 == NULL || i2 == NULL) {
      return TypeInt::CC;
    }
    if (t1->singleton() && t2->singleton()) {
      NativeType val1 = i1->get_con();
      NativeType val2 = i2->get_con();
      if (node->will_overflow(val1, val2)) {
        return TypeInt::CC;
      }
      return TypeInt::ZERO;
    } else if (i1 != TypeClass::TYPE_DOMAIN && i2 != TypeClass::TYPE_DOMAIN) {
      if (node->will_overflow(i1->_lo, i2->_lo)) {
        return TypeInt::CC;
      } else if (node->will_overflow(i1->_lo, i2->_hi)) {
        return TypeInt::CC;
      } else if (node->will_overflow(i1->_hi, i2->_lo)) {
        return TypeInt::CC;
      } else if (node->will_overflow(i1->_hi, i2->_hi)) {
        return TypeInt::CC;
      }
      return TypeInt::ZERO;
    }
    if (!node->can_overflow(t1, t2)) {
      return TypeInt::ZERO;
    }
    return TypeInt::CC;
  }
};
Node* OverflowINode::Ideal(PhaseGVN* phase, bool can_reshape) {
  return IdealHelper<OverflowINode>::Ideal(this, phase, can_reshape);
}
Node* OverflowLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
  return IdealHelper<OverflowLNode>::Ideal(this, phase, can_reshape);
}
const Type* OverflowINode::Value(PhaseTransform* phase) const {
  return IdealHelper<OverflowINode>::Value(this, phase);
}
const Type* OverflowLNode::Value(PhaseTransform* phase) const {
  return IdealHelper<OverflowLNode>::Value(this, phase);
}
C:\hotspot-69087d08d473\src\share\vm/opto/mathexactnode.hpp
#ifndef SHARE_VM_OPTO_MATHEXACTNODE_HPP
#define SHARE_VM_OPTO_MATHEXACTNODE_HPP
#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/addnode.hpp"
#include "opto/subnode.hpp"
#include "opto/type.hpp"
class PhaseGVN;
class PhaseTransform;
class OverflowNode : public CmpNode {
public:
  OverflowNode(Node* in1, Node* in2) : CmpNode(in1, in2) {}
  virtual uint ideal_reg() const { return Op_RegFlags; }
  virtual const Type* sub(const Type* t1, const Type* t2) const;
};
class OverflowINode : public OverflowNode {
public:
  typedef TypeInt TypeClass;
  OverflowINode(Node* in1, Node* in2) : OverflowNode(in1, in2) {}
  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
  virtual const Type* Value(PhaseTransform* phase) const;
  virtual bool will_overflow(jint v1, jint v2) const = 0;
  virtual bool can_overflow(const Type* t1, const Type* t2) const = 0;
};
class OverflowLNode : public OverflowNode {
public:
  typedef TypeLong TypeClass;
  OverflowLNode(Node* in1, Node* in2) : OverflowNode(in1, in2) {}
  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
  virtual const Type* Value(PhaseTransform* phase) const;
  virtual bool will_overflow(jlong v1, jlong v2) const = 0;
  virtual bool can_overflow(const Type* t1, const Type* t2) const = 0;
};
class OverflowAddINode : public OverflowINode {
public:
  typedef AddINode MathOp;
  OverflowAddINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jint v1, jint v2) const;
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
};
class OverflowSubINode : public OverflowINode {
public:
  typedef SubINode MathOp;
  OverflowSubINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jint v1, jint v2) const;
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
};
class OverflowMulINode : public OverflowINode {
public:
  typedef MulINode MathOp;
  OverflowMulINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jint v1, jint v2) const;
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
};
class OverflowAddLNode : public OverflowLNode {
public:
  typedef AddLNode MathOp;
  OverflowAddLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jlong v1, jlong v2) const;
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
};
class OverflowSubLNode : public OverflowLNode {
public:
  typedef SubLNode MathOp;
  OverflowSubLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jlong v1, jlong v2) const;
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
};
class OverflowMulLNode : public OverflowLNode {
public:
  typedef MulLNode MathOp;
  OverflowMulLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
  virtual int Opcode() const;
  virtual bool will_overflow(jlong v1, jlong v2) const { return is_overflow(v1, v2); }
  virtual bool can_overflow(const Type* t1, const Type* t2) const;
  static bool is_overflow(jlong v1, jlong v2);
};
#endif
C:\hotspot-69087d08d473\src\share\vm/opto/memnode.cpp
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/compileLog.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
uint MemNode::size_of() const { return sizeof(*this); }
const TypePtr *MemNode::adr_type() const {
  Node* adr = in(Address);
  const TypePtr* cross_check = NULL;
  DEBUG_ONLY(cross_check = _adr_type);
  return calculate_adr_type(adr->bottom_type(), cross_check);
}
bool MemNode::check_if_adr_maybe_raw(Node* adr) {
  if (adr != NULL) {
    if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) {
      return true;
    }
  }
  return false;
}
#ifndef PRODUCT
void MemNode::dump_spec(outputStream *st) const {
  if (in(Address) == NULL)  return; // node is dead
#ifndef ASSERT
  const TypePtr* _adr_type = NULL;
  if (in(Address) != NULL)
    _adr_type = in(Address)->bottom_type()->isa_ptr();
#endif
  dump_adr_type(this, _adr_type, st);
  Compile* C = Compile::current();
  if (C->alias_type(_adr_type)->is_volatile()) {
    st->print(" Volatile!");
  }
  if (_unaligned_access) {
    st->print(" unaligned");
  }
  if (_mismatched_access) {
    st->print(" mismatched");
  }
}
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
  st->print(" @");
  if (adr_type == NULL) {
    st->print("NULL");
  } else {
    adr_type->dump_on(st);
    Compile* C = Compile::current();
    Compile::AliasType* atp = NULL;
    if (C->have_alias_type(adr_type))  atp = C->alias_type(adr_type);
    if (atp == NULL)
      st->print(", idx=?\?;");
    else if (atp->index() == Compile::AliasIdxBot)
      st->print(", idx=Bot;");
    else if (atp->index() == Compile::AliasIdxTop)
      st->print(", idx=Top;");
    else if (atp->index() == Compile::AliasIdxRaw)
      st->print(", idx=Raw;");
    else {
      ciField* field = atp->field();
      if (field) {
        st->print(", name=");
        field->print_name_on(st);
      }
      st->print(", idx=%d;", atp->index());
    }
  }
}
extern void print_alias_types();
#endif
Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) {
  assert((t_oop != NULL), "sanity");
  bool is_instance = t_oop->is_known_instance_field();
  bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() &&
                             (load != NULL) && load->is_Load() &&
                             (phase->is_IterGVN() != NULL);
  if (!(is_instance || is_boxed_value_load))
    return mchain;  // don't try to optimize non-instance types
  uint instance_id = t_oop->instance_id();
  Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
  Node *prev = NULL;
  Node *result = mchain;
  while (prev != result) {
    prev = result;
    if (result == start_mem)
      break;  // hit one of our sentinels
    if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
      Node *proj_in = result->in(0);
      if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
        break;  // hit one of our sentinels
      } else if (proj_in->is_Call()) {
        CallNode *call = proj_in->as_Call();
        if (!call->may_modify(t_oop, phase)) { // returns false for instances
          result = call->in(TypeFunc::Memory);
        }
      } else if (proj_in->is_Initialize()) {
        AllocateNode* alloc = proj_in->as_Initialize()->allocation();
        if ((alloc == NULL) || (alloc->_idx == instance_id)) {
          break;
        }
        if (is_instance) {
          result = proj_in->in(TypeFunc::Memory);
        } else if (is_boxed_value_load) {
          Node* klass = alloc->in(AllocateNode::KlassNode);
          const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr();
          if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) {
            result = proj_in->in(TypeFunc::Memory); // not related allocation
          }
        }
      } else if (proj_in->is_MemBar()) {
        result = proj_in->in(TypeFunc::Memory);
      } else {
        assert(false, "unexpected projection");
      }
    } else if (result->is_ClearArray()) {
      if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) {
        break;
      }
    } else if (result->is_MergeMem()) {
      result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty);
    }
  }
  return result;
}
Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) {
  const TypeOopPtr* t_oop = t_adr->isa_oopptr();
  if (t_oop == NULL)
    return mchain;  // don't try to optimize non-oop types
  Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase);
  bool is_instance = t_oop->is_known_instance_field();
  PhaseIterGVN *igvn = phase->is_IterGVN();
  if (is_instance && igvn != NULL  && result->is_Phi()) {
    PhiNode *mphi = result->as_Phi();
    assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
    const TypePtr *t = mphi->adr_type();
    if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
        t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
        t->is_oopptr()->cast_to_exactness(true)
         ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
         ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
      result = mphi->split_out_instance(t_adr, igvn);
    } else {
      assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
    }
  }
  return result;
}
static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
  uint alias_idx = phase->C->get_alias_index(tp);
  Node *mem = mmem;
#ifdef ASSERT
  {
    assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
    bool consistent =  adr_check == NULL || adr_check->empty() ||
                       phase->C->must_alias(adr_check, alias_idx );
    if( !consistent && adr_check != NULL && !adr_check->empty() &&
               tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
        adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
        ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
          adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
          adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
      consistent = true;
    }
    if( !consistent ) {
      st->print("alias_idx==%d, adr_check==", alias_idx);
      if( adr_check == NULL ) {
        st->print("NULL");
      } else {
        adr_check->dump();
      }
      st->cr();
      print_alias_types();
      assert(consistent, "adr_check must match alias idx");
    }
  }
#endif
  const TypeOopPtr *toop = tp->isa_oopptr();
  if( tp->base() != Type::AnyPtr &&
      !(toop &&
        toop->klass() != NULL &&
        toop->klass()->is_java_lang_Object() &&
        toop->offset() == Type::OffsetBot) ) {
    Node* m  = phase->transform(mmem);
    mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
  }
  return mem;
}
Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
  Node *ctl = in(MemNode::Control);
  if (ctl && remove_dead_region(phase, can_reshape))
    return this;
  ctl = in(MemNode::Control);
  if (ctl && ctl->is_top())  return NodeSentinel;
  PhaseIterGVN *igvn = phase->is_IterGVN();
  if (ctl && can_reshape && igvn != NULL) {
    Node* bol = NULL;
    Node* cmp = NULL;
    if (ctl->in(0)->is_If()) {
      assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity");
      bol = ctl->in(0)->in(1);
      if (bol->is_Bool())
        cmp = ctl->in(0)->in(1)->in(1);
    }
    if (igvn->_worklist.member(ctl) ||
        (bol != NULL && igvn->_worklist.member(bol)) ||
        (cmp != NULL && igvn->_worklist.member(cmp)) ) {
      phase->is_IterGVN()->_worklist.push(this);
      return NodeSentinel; // caller will return NULL
    }
  }
  Node *mem = in(MemNode::Memory);
  if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
  assert(mem != this, "dead loop in MemNode::Ideal");
  if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
    phase->is_IterGVN()->_worklist.push(this);
    return NodeSentinel; // caller will return NULL
  }
  Node *address = in(MemNode::Address);
  const Type *t_adr = phase->type(address);
  if (t_adr == Type::TOP)              return NodeSentinel; // caller will return NULL
  if (can_reshape && igvn != NULL &&
      (igvn->_worklist.member(address) ||
       igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
    phase->is_IterGVN()->_worklist.push(this);
    return NodeSentinel; // caller will return NULL
  }
  if (t_adr->isa_oopptr()) {
    int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
  }
  Node* base = NULL;
  if (address->is_AddP()) {
    base = address->in(AddPNode::Base);
  }
  if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
      !t_adr->isa_rawptr()) {
    return NodeSentinel; // caller will return NULL
  }
  Node* old_mem = mem;
  if (mem->is_MergeMem()) {
    MergeMemNode* mmem = mem->as_MergeMem();
    const TypePtr *tp = t_adr->is_ptr();
    mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty);
  }
  if (mem != old_mem) {
    set_req(MemNode::Memory, mem);
    if (can_reshape && old_mem->outcnt() == 0) {
        igvn->_worklist.push(old_mem);
    }
    if (phase->type( mem ) == Type::TOP) return NodeSentinel;
    return this;
  }
  return NULL;
}
bool MemNode::all_controls_dominate(Node* dom, Node* sub) {
  if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top())
    return false; // Conservative answer for dead code
  dom = dom->find_exact_control(dom);
  if (dom == NULL || dom->is_top())
    return false; // Conservative answer for dead code
  if (dom == sub) {
    return false;
  }
  if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub)
    return true;
  assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
         sub->is_Region() || sub->is_Call(), "expecting only these nodes");
  Node* orig_sub = sub;
  sub = sub->find_exact_control(sub->in(0));
  if (sub == NULL || sub->is_top())
    return false; // Conservative answer for dead code
  assert(sub->is_CFG(), "expecting control");
  if (sub == dom)
    return true;
  if (sub->is_Start() || sub->is_Root())
    return false;
  {
    ResourceMark rm;
    Arena* arena = Thread::current()->resource_area();
    Node_List nlist(arena);
    Unique_Node_List dom_list(arena);
    dom_list.push(dom);
    bool only_dominating_controls = false;
    for (uint next = 0; next < dom_list.size(); next++) {
      Node* n = dom_list.at(next);
      if (n == orig_sub)
        return false; // One of dom's inputs dominated by sub.
      if (!n->is_CFG() && n->pinned()) {
        n = n->find_exact_control(n->in(0));
        if (n == NULL || n->is_top())
          return false; // Conservative answer for dead code
        assert(n->is_CFG(), "expecting control");
        dom_list.push(n);
      } else if (n->is_Con() || n->is_Start() || n->is_Root()) {
        only_dominating_controls = true;
      } else if (n->is_CFG()) {
        if (n->dominates(sub, nlist))
          only_dominating_controls = true;
        else
          return false;
      } else {
        Node* m = n->find_exact_control(n->in(0));
        if (m != NULL) {
          if (m->is_top())
            return false; // Conservative answer for dead code
          dom_list.push(m);
        }
        uint cnt = n->req();
        for (uint i = 1; i < cnt; i++) {
          m = n->find_exact_control(n->in(i));
          if (m == NULL || m->is_top())
            continue;
          dom_list.push(m);
        }
      }
    }
    return only_dominating_controls;
  }
}
bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
                                      Node* p2, AllocateNode* a2,
                                      PhaseTransform* phase) {
  if (a1 == NULL && a2 == NULL) {           // neither an allocation
    return (p1 != p2) && p1->is_Con() && p2->is_Con();
  } else if (a1 != NULL && a2 != NULL) {    // both allocations
    return (a1 != a2);
  } else if (a1 != NULL) {                  // one allocation a1
    return all_controls_dominate(p2, a1);
  } else { //(a2 != NULL)                   // one allocation a2
    return all_controls_dominate(p1, a2);
  }
  return false;
}
Node* MemNode::find_previous_store(PhaseTransform* phase) {
  Node*         ctrl   = in(MemNode::Control);
  Node*         adr    = in(MemNode::Address);
  intptr_t      offset = 0;
  Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
  AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
  if (offset == Type::OffsetBot)
    return NULL;            // cannot unalias unless there are precise offsets
  const bool adr_maybe_raw = check_if_adr_maybe_raw(adr);
  const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr();
  intptr_t size_in_bytes = memory_size();
  Node* mem = in(MemNode::Memory);   // start searching here...
  int cnt = 50;             // Cycle limiter
  for (;;) {                // While we can dance past unrelated stores...
    if (--cnt < 0)  break;  // Caught in cycle or a complicated dance?
    if (mem->is_Store()) {
      Node* st_adr = mem->in(MemNode::Address);
      intptr_t st_offset = 0;
      Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
      if (st_base == NULL)
        break;              // inscrutable pointer
      if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) {
        break;
      }
      if (st_offset != offset && st_offset != Type::OffsetBot) {
        const int MAX_STORE = BytesPerLong;
        if (st_offset >= offset + size_in_bytes ||
            st_offset <= offset - MAX_STORE ||
            st_offset <= offset - mem->as_Store()->memory_size()) {
          mem = mem->in(MemNode::Memory);
          continue;           // (a) advance through independent store memory
        }
      }
      if (st_base != base &&
          detect_ptr_independence(base, alloc,
                                  st_base,
                                  AllocateNode::Ideal_allocation(st_base, phase),
                                  phase)) {
        mem = mem->in(MemNode::Memory);
        continue;           // (a) advance through independent store memory
      }
      if (st_base == base && st_offset == offset) {
        return mem;         // let caller handle steps (c), (d)
      }
    } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
      InitializeNode* st_init = mem->in(0)->as_Initialize();
      AllocateNode*  st_alloc = st_init->allocation();
      if (st_alloc == NULL)
        break;              // something degenerated
      bool known_identical = false;
      bool known_independent = false;
      if (alloc == st_alloc)
        known_identical = true;
      else if (alloc != NULL)
        known_independent = true;
      else if (all_controls_dominate(this, st_alloc))
        known_independent = true;
      if (known_independent) {
        int alias_idx = phase->C->get_alias_index(adr_type());
        if (alias_idx == Compile::AliasIdxRaw) {
          mem = st_alloc->in(TypeFunc::Memory);
        } else {
          mem = st_init->memory(alias_idx);
        }
        continue;           // (a) advance through independent store memory
      }
      if (known_identical) {
        return mem;         // let caller handle steps (c), (d)
      }
    } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
      if (mem->is_Proj() && mem->in(0)->is_Call()) {
        CallNode *call = mem->in(0)->as_Call();
        if (!call->may_modify(addr_t, phase)) {
          mem = call->in(TypeFunc::Memory);
          continue;         // (a) advance through independent call memory
        }
      } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
        mem = mem->in(0)->in(TypeFunc::Memory);
        continue;           // (a) advance through independent MemBar memory
      } else if (mem->is_ClearArray()) {
        if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
          continue;         // (a) advance through independent allocation memory
        } else {
          return mem;
        }
      } else if (mem->is_MergeMem()) {
        int alias_idx = phase->C->get_alias_index(adr_type());
        mem = mem->as_MergeMem()->memory_at(alias_idx);
        continue;           // (a) advance through independent MergeMem memory
      }
    }
    break;
  }
  return NULL;              // bail out
}
const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) {
  if (t == Type::TOP)  return NULL; // does not touch memory any more?
  #ifdef PRODUCT
  cross_check = NULL;
  #else
  if (!VerifyAliases || is_error_reported() || Node::in_dump())  cross_check = NULL;
  #endif
  const TypePtr* tp = t->isa_ptr();
  if (tp == NULL) {
    assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide");
    return TypePtr::BOTTOM;           // touches lots of memory
  } else {
    #ifdef ASSERT
    if (cross_check != NULL &&
        cross_check != TypePtr::BOTTOM &&
        cross_check != TypeRawPtr::BOTTOM) {
      Compile* C = Compile::current();
      assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
             "must stay in the original alias category");
      const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
      assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
             "real address must not escape from expected memory type");
    }
    #endif
    return tp;
  }
}
bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
  ResourceMark rm;
  Unique_Node_List closure;
  closure.push(adr_phi->in(LoopNode::EntryControl));
  Unique_Node_List worklist;
  worklist.push(adr_phi);
  if( cast != NULL ){
    if( !cast->is_ConstraintCast() ) return false;
    worklist.push(cast);
  }
  while( worklist.size() ){
    Node *n = worklist.pop();
    if( !closure.member(n) ){
      closure.push(n);
      if( closure.size() > 20) return false;
      if( n->is_ConstraintCast() ){
        worklist.push(n->in(1));
      } else if( n->is_Phi() ) {
        for( uint i = 1; i < n->req(); i++ ) {
          worklist.push(n->in(i));
        }
      } else {
        return false;
      }
    }
  }
  return true;
}
Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
  return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
}
Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
  Node *skipped_cast = NULL;
  if( n->in(MemNode::Control) == NULL ) {
    while( true ) {
      switch( adr->Opcode() ) {
      case Op_AddP:             // No change to NULL-ness, so peek thru AddP's
        adr = adr->in(AddPNode::Base);
        continue;
      case Op_DecodeN:         // No change to NULL-ness, so peek thru
      case Op_DecodeNKlass:
        adr = adr->in(1);
        continue;
      case Op_EncodeP:
      case Op_EncodePKlass:
        if (adr->in(0) == NULL) {
          adr = adr->in(1);
          continue;
        }
        ccp->hash_delete(n);
        n->set_req(MemNode::Control, adr->in(0));
        ccp->hash_insert(n);
        return n;
      case Op_CastPP:
        if( ccp->type(adr) == ccp->type(adr->in(1)) ) {
          skipped_cast = adr;
          adr = adr->in(1);
          continue;
        }
        ccp->hash_delete(n);
        n->set_req(MemNode::Control, adr->in(0));
        ccp->hash_insert(n);
        return n;
      case Op_Phi:
        if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) {
          if (adr_phi_is_loop_invariant(adr, skipped_cast)) {
            adr = adr->in(1);
            continue;
          }
        }
      case Op_CheckCastPP:
        if (adr->in(0) == NULL) {
          skipped_cast = adr;
          adr = adr->in(1);
          continue;
        }
        ccp->hash_delete(n);
        n->set_req(MemNode::Control, adr->in(0));
        ccp->hash_insert(n);
        return n;
      case Op_CastX2P:          // no null checks on native pointers
      case Op_Parm:             // 'this' pointer is not null
      case Op_LoadP:            // Loading from within a klass
      case Op_LoadN:            // Loading from within a klass
      case Op_LoadKlass:        // Loading from within a klass
      case Op_LoadNKlass:       // Loading from within a klass
      case Op_ConP:             // Loading from a klass
      case Op_ConN:             // Loading from a klass
      case Op_ConNKlass:        // Loading from a klass
      case Op_CreateEx:         // Sucking up the guts of an exception oop
      case Op_Con:              // Reading from TLS
      case Op_CMoveP:           // CMoveP is pinned
      case Op_CMoveN:           // CMoveN is pinned
        break;                  // No progress
      case Op_Proj:             // Direct call to an allocation routine
      case Op_SCMemProj:        // Memory state from store conditional ops
#ifdef ASSERT
        {
          assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value");
          const Node* call = adr->in(0);
          if (call->is_CallJava()) {
            const CallJavaNode* call_java = call->as_CallJava();
            const TypeTuple *r = call_java->tf()->range();
            assert(r->cnt() > TypeFunc::Parms, "must return value");
            const Type* ret_type = r->field_at(TypeFunc::Parms);
            assert(ret_type && ret_type->isa_ptr(), "must return pointer");
          } else if (call->is_Allocate()) {
          } else if (!call->is_CallLeaf()) {
            ShouldNotReachHere();
          }
        }
#endif
        break;
      default:
        ShouldNotReachHere();
      }
      break;
    }
  }
  return  NULL;               // No progress
}
bool LoadNode::can_remove_control() const {
  return true;
}
uint LoadNode::size_of() const { return sizeof(*this); }
uint LoadNode::cmp( const Node &n ) const
{ return !Type::cmp( _type, ((LoadNode&)n)._type ); }
const Type *LoadNode::bottom_type() const { return _type; }
uint LoadNode::ideal_reg() const {
  return _type->ideal_reg();
}
#ifndef PRODUCT
void LoadNode::dump_spec(outputStream *st) const {
  MemNode::dump_spec(st);
  if( !Verbose && !WizardMode ) {
    st->print(" #"); _type->dump_on(st);
  }
  if (!_depends_only_on_test) {
    st->print(" (does not depend only on test)");
  }
}
#endif
#ifdef ASSERT
bool LoadNode::is_immutable_value(Node* adr) {
  return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() &&
          adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
          (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) ==
           in_bytes(JavaThread::osthread_offset())));
}
#endif
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo, ControlDependency control_dependency) {
  Compile* C = gvn.C;
  assert(!(adr_type->isa_oopptr() &&
           adr_type->offset() == oopDesc::klass_offset_in_bytes()),
         "use LoadKlassNode instead");
  assert(!(adr_type->isa_aryptr() &&
           adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
         "use LoadRangeNode instead");
  assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
          rt->isa_oopptr() || is_immutable_value(adr),
          "raw memory operations should have control edge");
  switch (bt) {
  case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency);
  case T_BYTE:    return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency);
  case T_INT:     return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency);
  case T_CHAR:    return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency);
  case T_SHORT:   return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency);
  case T_LONG:    return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency);
  case T_FLOAT:   return new (C) LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency);
  case T_DOUBLE:  return new (C) LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency);
  case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency);
  case T_OBJECT:
#ifdef _LP64
    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
      Node* load  = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency));
      return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
    } else
#endif
    {
      assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
      return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo, control_dependency);
    }
  }
  ShouldNotReachHere();
  return (LoadNode*)NULL;
}
LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
  bool require_atomic = true;
  return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
}
LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo, ControlDependency control_dependency) {
  bool require_atomic = true;
  return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
}
uint LoadNode::hash() const {
  return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
}
static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
  if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
    bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
    bool is_stable_ary = FoldStableValues &&
                         (tp != NULL) && (tp->isa_aryptr() != NULL) &&
                         tp->isa_aryptr()->is_stable();
    return (eliminate_boxing && non_volatile) || is_stable_ary;
  }
  return false;
}
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
  Node* ld_adr = in(MemNode::Address);
  intptr_t ld_off = 0;
  AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
  const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
  Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
  if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
    uint alias_idx = atp->index();
    bool final = !atp->is_rewritable();
    Node* result = NULL;
    Node* current = st;
    while (current->is_Proj()) {
      int opc = current->in(0)->Opcode();
      if ((final && (opc == Op_MemBarAcquire ||
                     opc == Op_MemBarAcquireLock ||
                     opc == Op_LoadFence)) ||
          opc == Op_MemBarRelease ||
          opc == Op_StoreFence ||
          opc == Op_MemBarReleaseLock ||
          opc == Op_MemBarCPUOrder) {
        Node* mem = current->in(0)->in(TypeFunc::Memory);
        if (mem->is_MergeMem()) {
          MergeMemNode* merge = mem->as_MergeMem();
          Node* new_st = merge->memory_at(alias_idx);
          if (new_st == merge->base_memory()) {
            current = new_st;
            continue;
          }
          result = new_st;
        }
      }
      break;
    }
    if (result != NULL) {
      st = result;
    }
  }

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值