sssssssss71


  // The Store can be captured only if nothing after the allocation
  // and before the Store is using the memory location that the store
  // overwrites.
  bool failed = false;
  // If is_complete_with_arraycopy() is true the shape of the graph is
  // well defined and is safe so no need for extra checks.
  if (!is_complete_with_arraycopy()) {
    // We are going to look at each use of the memory state following
    // the allocation to make sure nothing reads the memory that the
    // Store writes.
    const TypePtr* t_adr = phase->type(adr)->isa_ptr();
    int alias_idx = phase->C->get_alias_index(t_adr);
    ResourceMark rm;
    Unique_Node_List mems;
    mems.push(mem);
    Node* unique_merge = NULL;
    for (uint next = 0; next < mems.size(); ++next) {
      Node *m  = mems.at(next);
      for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
        Node *n = m->fast_out(j);
        if (n->outcnt() == 0) {
          continue;
        }
        if (n == st) {
          continue;
        } else if (n->in(0) != NULL && n->in(0) != ctl) {
          // If the control of this use is different from the control
          // of the Store which is right after the InitializeNode then
          // this node cannot be between the InitializeNode and the
          // Store.
          continue;
        } else if (n->is_MergeMem()) {
          if (n->as_MergeMem()->memory_at(alias_idx) == m) {
            // We can hit a MergeMemNode (that will likely go away
            // later) that is a direct use of the memory state
            // following the InitializeNode on the same slice as the
            // store node that we'd like to capture. We need to check
            // the uses of the MergeMemNode.
            mems.push(n);
          }
        } else if (n->is_Mem()) {
          Node* other_adr = n->in(MemNode::Address);
          if (other_adr == adr) {
            failed = true;
            break;
          } else {
            const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
            if (other_t_adr != NULL) {
              int other_alias_idx = phase->C->get_alias_index(other_t_adr);
              if (other_alias_idx == alias_idx) {
                // A load from the same memory slice as the store right
                // after the InitializeNode. We check the control of the
                // object/array that is loaded from. If it's the same as
                // the store control then we cannot capture the store.
                assert(!n->is_Store(), "2 stores to same slice on same control?");
                Node* base = other_adr;
                assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name()));
                base = base->in(AddPNode::Base);
                if (base != NULL) {
                  base = base->uncast();
                  if (base->is_Proj() && base->in(0) == alloc) {
                    failed = true;
                    break;
                  }
                }
              }
            }
          }
        } else {
          failed = true;
          break;
        }
      }
    }
  }
  if (failed) {
    if (!can_reshape) {
      // We decided we couldn't capture the store during parsing. We
      // should try again during the next IGVN once the graph is
      // cleaner.
      phase->C->record_for_igvn(st);
    }
    return FAIL;
  }

  return offset;                // success
}

// Find the captured store in(i) which corresponds to the range
// [start..start+size) in the initialized object.
// If there is one, return its index i.  If there isn't, return the
// negative of the index where it should be inserted.
// Return 0 if the queried range overlaps an initialization boundary
// or if dead code is encountered.
// If size_in_bytes is zero, do not bother with overlap checks.
int InitializeNode::captured_store_insertion_point(intptr_t start,
                                                   int size_in_bytes,
                                                   PhaseTransform* phase) {
  const int FAIL = 0, MAX_STORE = BytesPerLong;

  if (is_complete())
    return FAIL;                // arraycopy got here first; punt

  assert(allocation() != NULL, "must be present");

  // no negatives, no header fields:
  if (start < (intptr_t) allocation()->minimum_header_size())  return FAIL;

  // after a certain size, we bail out on tracking all the stores:
  intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
  if (start >= ti_limit)  return FAIL;

  for (uint i = InitializeNode::RawStores, limit = req(); ; ) {
    if (i >= limit)  return -(int)i; // not found; here is where to put it

    Node*    st     = in(i);
    intptr_t st_off = get_store_offset(st, phase);
    if (st_off < 0) {
      if (st != zero_memory()) {
        return FAIL;            // bail out if there is dead garbage
      }
    } else if (st_off > start) {
      // ...we are done, since stores are ordered
      if (st_off < start + size_in_bytes) {
        return FAIL;            // the next store overlaps
      }
      return -(int)i;           // not found; here is where to put it
    } else if (st_off < start) {
      if (size_in_bytes != 0 &&
          start < st_off + MAX_STORE &&
          start < st_off + st->as_Store()->memory_size()) {
        return FAIL;            // the previous store overlaps
      }
    } else {
      if (size_in_bytes != 0 &&
          st->as_Store()->memory_size() != size_in_bytes) {
        return FAIL;            // mismatched store size
      }
      return i;
    }

    ++i;
  }
}

// Look for a captured store which initializes at the offset 'start'
// with the given size.  If there is no such store, and no other
// initialization interferes, then return zero_memory (the memory
// projection of the AllocateNode).
Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes,
                                          PhaseTransform* phase) {
  assert(stores_are_sane(phase), "");
  int i = captured_store_insertion_point(start, size_in_bytes, phase);
  if (i == 0) {
    return NULL;                // something is dead
  } else if (i < 0) {
    return zero_memory();       // just primordial zero bits here
  } else {
    Node* st = in(i);           // here is the store at this position
    assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
    return st;
  }
}

// Create, as a raw pointer, an address within my new object at 'offset'.
Node* InitializeNode::make_raw_address(intptr_t offset,
                                       PhaseTransform* phase) {
  Node* addr = in(RawAddress);
  if (offset != 0) {
    Compile* C = phase->C;
    addr = phase->transform( new (C) AddPNode(C->top(), addr,
                                                 phase->MakeConX(offset)) );
  }
  return addr;
}

// Clone the given store, converting it into a raw store
// initializing a field or element of my new object.
// Caller is responsible for retiring the original store,
// with subsume_node or the like.
//
// From the example above InitializeNode::InitializeNode,
// here are the old stores to be captured:
//   store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
//   store2 = (StoreC init.Control store1      (+ oop 14) 2)
//
// Here is the changed code; note the extra edges on init:
//   alloc = (Allocate ...)
//   rawoop = alloc.RawAddress
//   rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1)
//   rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2)
//   init = (Initialize alloc.Control alloc.Memory rawoop
//                      rawstore1 rawstore2)
//
Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
                                    PhaseTransform* phase, bool can_reshape) {
  assert(stores_are_sane(phase), "");

  if (start < 0)  return NULL;
  assert(can_capture_store(st, phase, can_reshape) == start, "sanity");

  Compile* C = phase->C;
  int size_in_bytes = st->memory_size();
  int i = captured_store_insertion_point(start, size_in_bytes, phase);
  if (i == 0)  return NULL;     // bail out
  Node* prev_mem = NULL;        // raw memory for the captured store
  if (i > 0) {
    prev_mem = in(i);           // there is a pre-existing store under this one
    set_req(i, C->top());       // temporarily disconnect it
    // See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect.
  } else {
    i = -i;                     // no pre-existing store
    prev_mem = zero_memory();   // a slice of the newly allocated object
    if (i > InitializeNode::RawStores && in(i-1) == prev_mem)
      set_req(--i, C->top());   // reuse this edge; it has been folded away
    else
      ins_req(i, C->top());     // build a new edge
  }
  Node* new_st = st->clone();
  new_st->set_req(MemNode::Control, in(Control));
  new_st->set_req(MemNode::Memory,  prev_mem);
  new_st->set_req(MemNode::Address, make_raw_address(start, phase));
  new_st = phase->transform(new_st);

  // At this point, new_st might have swallowed a pre-existing store
  // at the same offset, or perhaps new_st might have disappeared,
  // if it redundantly stored the same value (or zero to fresh memory).

  // In any case, wire it in:
  set_req(i, new_st);

  // The caller may now kill the old guy.
  DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase));
  assert(check_st == new_st || check_st == NULL, "must be findable");
  assert(!is_complete(), "");
  return new_st;
}

static bool store_constant(jlong* tiles, int num_tiles,
                           intptr_t st_off, int st_size,
                           jlong con) {
  if ((st_off & (st_size-1)) != 0)
    return false;               // strange store offset (assume size==2**N)
  address addr = (address)tiles + st_off;
  assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob");
  switch (st_size) {
  case sizeof(jbyte):  *(jbyte*) addr = (jbyte) con; break;
  case sizeof(jchar):  *(jchar*) addr = (jchar) con; break;
  case sizeof(jint):   *(jint*)  addr = (jint)  con; break;
  case sizeof(jlong):  *(jlong*) addr = (jlong) con; break;
  default: return false;        // strange store size (detect size!=2**N here)
  }
  return true;                  // return success to caller
}

// Coalesce subword constants into int constants and possibly
// into long constants.  The goal, if the CPU permits,
// is to initialize the object with a small number of 64-bit tiles.
// Also, convert floating-point constants to bit patterns.
// Non-constants are not relevant to this pass.
//
// In terms of the running example on InitializeNode::InitializeNode
// and InitializeNode::capture_store, here is the transformation
// of rawstore1 and rawstore2 into rawstore12:
//   alloc = (Allocate ...)
//   rawoop = alloc.RawAddress
//   tile12 = 0x00010002
//   rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12)
//   init = (Initialize alloc.Control alloc.Memory rawoop rawstore12)
//
void
InitializeNode::coalesce_subword_stores(intptr_t header_size,
                                        Node* size_in_bytes,
                                        PhaseGVN* phase) {
  Compile* C = phase->C;

  assert(stores_are_sane(phase), "");
  // Note:  After this pass, they are not completely sane,
  // since there may be some overlaps.

  int old_subword = 0, old_long = 0, new_int = 0, new_long = 0;

  intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
  intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
  size_limit = MIN2(size_limit, ti_limit);
  size_limit = align_size_up(size_limit, BytesPerLong);
  int num_tiles = size_limit / BytesPerLong;

  // allocate space for the tile map:
  const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small
  jlong  tiles_buf[small_len];
  Node*  nodes_buf[small_len];
  jlong  inits_buf[small_len];
  jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0]
                  : NEW_RESOURCE_ARRAY(jlong, num_tiles));
  Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0]
                  : NEW_RESOURCE_ARRAY(Node*, num_tiles));
  jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0]
                  : NEW_RESOURCE_ARRAY(jlong, num_tiles));
  // tiles: exact bitwise model of all primitive constants
  // nodes: last constant-storing node subsumed into the tiles model
  // inits: which bytes (in each tile) are touched by any initializations

   Pass A: Fill in the tile model with any relevant stores.

  Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles);
  Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles);
  Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles);
  Node* zmem = zero_memory(); // initially zero memory state
  for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
    Node* st = in(i);
    intptr_t st_off = get_store_offset(st, phase);

    // Figure out the store's offset and constant value:
    if (st_off < header_size)             continue; //skip (ignore header)
    if (st->in(MemNode::Memory) != zmem)  continue; //skip (odd store chain)
    int st_size = st->as_Store()->memory_size();
    if (st_off + st_size > size_limit)    break;

    // Record which bytes are touched, whether by constant or not.
    if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1))
      continue;                 // skip (strange store size)

    const Type* val = phase->type(st->in(MemNode::ValueIn));
    if (!val->singleton())                continue; //skip (non-con store)
    BasicType type = val->basic_type();

    jlong con = 0;
    switch (type) {
    case T_INT:    con = val->is_int()->get_con();  break;
    case T_LONG:   con = val->is_long()->get_con(); break;
    case T_FLOAT:  con = jint_cast(val->getf());    break;
    case T_DOUBLE: con = jlong_cast(val->getd());   break;
    default:                              continue; //skip (odd store type)
    }

    if (type == T_LONG && Matcher::isSimpleConstant64(con) &&
        st->Opcode() == Op_StoreL) {
      continue;                 // This StoreL is already optimal.
    }

    // Store down the constant.
    store_constant(tiles, num_tiles, st_off, st_size, con);

    intptr_t j = st_off >> LogBytesPerLong;

    if (type == T_INT && st_size == BytesPerInt
        && (st_off & BytesPerInt) == BytesPerInt) {
      jlong lcon = tiles[j];
      if (!Matcher::isSimpleConstant64(lcon) &&
          st->Opcode() == Op_StoreI) {
        // This StoreI is already optimal by itself.
        jint* intcon = (jint*) &tiles[j];
        intcon[1] = 0;  // undo the store_constant()

        // If the previous store is also optimal by itself, back up and
        // undo the action of the previous loop iteration... if we can.
        // But if we can't, just let the previous half take care of itself.
        st = nodes[j];
        st_off -= BytesPerInt;
        con = intcon[0];
        if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) {
          assert(st_off >= header_size, "still ignoring header");
          assert(get_store_offset(st, phase) == st_off, "must be");
          assert(in(i-1) == zmem, "must be");
          DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn)));
          assert(con == tcon->is_int()->get_con(), "must be");
          // Undo the effects of the previous loop trip, which swallowed st:
          intcon[0] = 0;        // undo store_constant()
          set_req(i-1, st);     // undo set_req(i, zmem)
          nodes[j] = NULL;      // undo nodes[j] = st
          --old_subword;        // undo ++old_subword
        }
        continue;               // This StoreI is already optimal.
      }
    }

    // This store is not needed.
    set_req(i, zmem);
    nodes[j] = st;              // record for the moment
    if (st_size < BytesPerLong) // something has changed
          ++old_subword;        // includes int/float, but who's counting...
    else  ++old_long;
  }

  if ((old_subword + old_long) == 0)
    return;                     // nothing more to do

   Pass B: Convert any non-zero tiles into optimal constant stores.
  // Be sure to insert them before overlapping non-constant stores.
  // (E.g., byte[] x = { 1,2,y,4 }  =>  x[int 0] = 0x01020004, x[2]=y.)
  for (int j = 0; j < num_tiles; j++) {
    jlong con  = tiles[j];
    jlong init = inits[j];
    if (con == 0)  continue;
    jint con0,  con1;           // split the constant, address-wise
    jint init0, init1;          // split the init map, address-wise
    { union { jlong con; jint intcon[2]; } u;
      u.con = con;
      con0  = u.intcon[0];
      con1  = u.intcon[1];
      u.con = init;
      init0 = u.intcon[0];
      init1 = u.intcon[1];
    }

    Node* old = nodes[j];
    assert(old != NULL, "need the prior store");
    intptr_t offset = (j * BytesPerLong);

    bool split = !Matcher::isSimpleConstant64(con);

    if (offset < header_size) {
      assert(offset + BytesPerInt >= header_size, "second int counts");
      assert(*(jint*)&tiles[j] == 0, "junk in header");
      split = true;             // only the second word counts
      // Example:  int a[] = { 42 ... }
    } else if (con0 == 0 && init0 == -1) {
      split = true;             // first word is covered by full inits
      // Example:  int a[] = { ... foo(), 42 ... }
    } else if (con1 == 0 && init1 == -1) {
      split = true;             // second word is covered by full inits
      // Example:  int a[] = { ... 42, foo() ... }
    }

    // Here's a case where init0 is neither 0 nor -1:
    //   byte a[] = { ... 0,0,foo(),0,  0,0,0,42 ... }
    // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF.
    // In this case the tile is not split; it is (jlong)42.
    // The big tile is stored down, and then the foo() value is inserted.
    // (If there were foo(),foo() instead of foo(),0, init0 would be -1.)

    Node* ctl = old->in(MemNode::Control);
    Node* adr = make_raw_address(offset, phase);
    const TypePtr* atp = TypeRawPtr::BOTTOM;

    // One or two coalesced stores to plop down.
    Node*    st[2];
    intptr_t off[2];
    int  nst = 0;
    if (!split) {
      ++new_long;
      off[nst] = offset;
      st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                  phase->longcon(con), T_LONG, MemNode::unordered);
    } else {
      // Omit either if it is a zero.
      if (con0 != 0) {
        ++new_int;
        off[nst]  = offset;
        st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                    phase->intcon(con0), T_INT, MemNode::unordered);
      }
      if (con1 != 0) {
        ++new_int;
        offset += BytesPerInt;
        adr = make_raw_address(offset, phase);
        off[nst]  = offset;
        st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
                                    phase->intcon(con1), T_INT, MemNode::unordered);
      }
    }

    // Insert second store first, then the first before the second.
    // Insert each one just before any overlapping non-constant stores.
    while (nst > 0) {
      Node* st1 = st[--nst];
      C->copy_node_notes_to(st1, old);
      st1 = phase->transform(st1);
      offset = off[nst];
      assert(offset >= header_size, "do not smash header");
      int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase);
      guarantee(ins_idx != 0, "must re-insert constant store");
      if (ins_idx < 0)  ins_idx = -ins_idx;  // never overlap
      if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem)
        set_req(--ins_idx, st1);
      else
        ins_req(ins_idx, st1);
    }
  }

  if (PrintCompilation && WizardMode)
    tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long",
                  old_subword, old_long, new_int, new_long);
  if (C->log() != NULL)
    C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'",
                   old_subword, old_long, new_int, new_long);

  // Clean up any remaining occurrences of zmem:
  remove_extra_zeroes();
}

// Explore forward from in(start) to find the first fully initialized
// word, and return its offset.  Skip groups of subword stores which
// together initialize full words.  If in(start) is itself part of a
// fully initialized word, return the offset of in(start).  If there
// are no following full-word stores, or if something is fishy, return
// a negative value.
intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
  int       int_map = 0;
  intptr_t  int_map_off = 0;
  const int FULL_MAP = right_n_bits(BytesPerInt);  // the int_map we hope for

  for (uint i = start, limit = req(); i < limit; i++) {
    Node* st = in(i);

    intptr_t st_off = get_store_offset(st, phase);
    if (st_off < 0)  break;  // return conservative answer

    int st_size = st->as_Store()->memory_size();
    if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) {
      return st_off;            // we found a complete word init
    }

    // update the map:

    intptr_t this_int_off = align_size_down(st_off, BytesPerInt);
    if (this_int_off != int_map_off) {
      // reset the map:
      int_map = 0;
      int_map_off = this_int_off;
    }

    int subword_off = st_off - this_int_off;
    int_map |= right_n_bits(st_size) << subword_off;
    if ((int_map & FULL_MAP) == FULL_MAP) {
      return this_int_off;      // we found a complete word init
    }

    // Did this store hit or cross the word boundary?
    intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt);
    if (next_int_off == this_int_off + BytesPerInt) {
      // We passed the current int, without fully initializing it.
      int_map_off = next_int_off;
      int_map >>= BytesPerInt;
    } else if (next_int_off > this_int_off + BytesPerInt) {
      // We passed the current and next int.
      return this_int_off + BytesPerInt;
    }
  }

  return -1;
}


// Called when the associated AllocateNode is expanded into CFG.
// At this point, we may perform additional optimizations.
// Linearize the stores by ascending offset, to make memory
// activity as coherent as possible.
Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
                                      intptr_t header_size,
                                      Node* size_in_bytes,
                                      PhaseGVN* phase) {
  assert(!is_complete(), "not already complete");
  assert(stores_are_sane(phase), "");
  assert(allocation() != NULL, "must be present");

  remove_extra_zeroes();

  if (ReduceFieldZeroing || ReduceBulkZeroing)
    // reduce instruction count for common initialization patterns
    coalesce_subword_stores(header_size, size_in_bytes, phase);

  Node* zmem = zero_memory();   // initially zero memory state
  Node* inits = zmem;           // accumulating a linearized chain of inits
  #ifdef ASSERT
  intptr_t first_offset = allocation()->minimum_header_size();
  intptr_t last_init_off = first_offset;  // previous init offset
  intptr_t last_init_end = first_offset;  // previous init offset+size
  intptr_t last_tile_end = first_offset;  // previous tile offset+size
  #endif
  intptr_t zeroes_done = header_size;

  bool do_zeroing = true;       // we might give up if inits are very sparse
  int  big_init_gaps = 0;       // how many large gaps have we seen?

  if (ZeroTLAB)  do_zeroing = false;
  if (!ReduceFieldZeroing && !ReduceBulkZeroing)  do_zeroing = false;

  for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) {
    Node* st = in(i);
    intptr_t st_off = get_store_offset(st, phase);
    if (st_off < 0)
      break;                    // unknown junk in the inits
    if (st->in(MemNode::Memory) != zmem)
      break;                    // complicated store chains somehow in list

    int st_size = st->as_Store()->memory_size();
    intptr_t next_init_off = st_off + st_size;

    if (do_zeroing && zeroes_done < next_init_off) {
      // See if this store needs a zero before it or under it.
      intptr_t zeroes_needed = st_off;

      if (st_size < BytesPerInt) {
        // Look for subword stores which only partially initialize words.
        // If we find some, we must lay down some word-level zeroes first,
        // underneath the subword stores.
        //
        // Examples:
        //   byte[] a = { p,q,r,s }  =>  a[0]=p,a[1]=q,a[2]=r,a[3]=s
        //   byte[] a = { x,y,0,0 }  =>  a[0..3] = 0, a[0]=x,a[1]=y
        //   byte[] a = { 0,0,z,0 }  =>  a[0..3] = 0, a[2]=z
        //
        // Note:  coalesce_subword_stores may have already done this,
        // if it was prompted by constant non-zero subword initializers.
        // But this case can still arise with non-constant stores.

        intptr_t next_full_store = find_next_fullword_store(i, phase);

        // In the examples above:
        //   in(i)          p   q   r   s     x   y     z
        //   st_off        12  13  14  15    12  13    14
        //   st_size        1   1   1   1     1   1     1
        //   next_full_s.  12  16  16  16    16  16    16
        //   z's_done      12  16  16  16    12  16    12
        //   z's_needed    12  16  16  16    16  16    16
        //   zsize          0   0   0   0     4   0     4
        if (next_full_store < 0) {
          // Conservative tack:  Zero to end of current word.
          zeroes_needed = align_size_up(zeroes_needed, BytesPerInt);
        } else {
          // Zero to beginning of next fully initialized word.
          // Or, don't zero at all, if we are already in that word.
          assert(next_full_store >= zeroes_needed, "must go forward");
          assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
          zeroes_needed = next_full_store;
        }
      }

      if (zeroes_needed > zeroes_done) {
        intptr_t zsize = zeroes_needed - zeroes_done;
        // Do some incremental zeroing on rawmem, in parallel with inits.
        zeroes_done = align_size_down(zeroes_done, BytesPerInt);
        rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
                                              zeroes_done, zeroes_needed,
                                              phase);
        zeroes_done = zeroes_needed;
        if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2)
          do_zeroing = false;   // leave the hole, next time
      }
    }

    // Collect the store and move on:
    st->set_req(MemNode::Memory, inits);
    inits = st;                 // put it on the linearized chain
    set_req(i, zmem);           // unhook from previous position

    if (zeroes_done == st_off)
      zeroes_done = next_init_off;

    assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");

    #ifdef ASSERT
    // Various order invariants.  Weaker than stores_are_sane because
    // a large constant tile can be filled in by smaller non-constant stores.
    assert(st_off >= last_init_off, "inits do not reverse");
    last_init_off = st_off;
    const Type* val = NULL;
    if (st_size >= BytesPerInt &&
        (val = phase->type(st->in(MemNode::ValueIn)))->singleton() &&
        (int)val->basic_type() < (int)T_OBJECT) {
      assert(st_off >= last_tile_end, "tiles do not overlap");
      assert(st_off >= last_init_end, "tiles do not overwrite inits");
      last_tile_end = MAX2(last_tile_end, next_init_off);
    } else {
      intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong);
      assert(st_tile_end >= last_tile_end, "inits stay with tiles");
      assert(st_off      >= last_init_end, "inits do not overlap");
      last_init_end = next_init_off;  // it's a non-tile
    }
    #endif //ASSERT
  }

  remove_extra_zeroes();        // clear out all the zmems left over
  add_req(inits);

  if (!ZeroTLAB) {
    // If anything remains to be zeroed, zero it all now.
    zeroes_done = align_size_down(zeroes_done, BytesPerInt);
    // if it is the last unused 4 bytes of an instance, forget about it
    intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
    if (zeroes_done + BytesPerLong >= size_limit) {
      AllocateNode* alloc = allocation();
      assert(alloc != NULL, "must be present");
      if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
        Node* klass_node = alloc->in(AllocateNode::KlassNode);
        ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
        if (zeroes_done == k->layout_helper())
          zeroes_done = size_limit;
      }
    }
    if (zeroes_done < size_limit) {
      rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
                                            zeroes_done, size_in_bytes, phase);
    }
  }

  set_complete(phase);
  return rawmem;
}


#ifdef ASSERT
bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
  if (is_complete())
    return true;                // stores could be anything at this point
  assert(allocation() != NULL, "must be present");
  intptr_t last_off = allocation()->minimum_header_size();
  for (uint i = InitializeNode::RawStores; i < req(); i++) {
    Node* st = in(i);
    intptr_t st_off = get_store_offset(st, phase);
    if (st_off < 0)  continue;  // ignore dead garbage
    if (last_off > st_off) {
      tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off);
      this->dump(2);
      assert(false, "ascending store offsets");
      return false;
    }
    last_off = st_off + st->as_Store()->memory_size();
  }
  return true;
}
#endif //ASSERT




//============================MergeMemNode=====================================
//
// SEMANTICS OF MEMORY MERGES:  A MergeMem is a memory state assembled from several
// contributing store or call operations.  Each contributor provides the memory
// state for a particular "alias type" (see Compile::alias_type).  For example,
// if a MergeMem has an input X for alias category #6, then any memory reference
// to alias category #6 may use X as its memory state input, as an exact equivalent
// to using the MergeMem as a whole.
//   Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p)
//
// (Here, the <N> notation gives the index of the relevant adr_type.)
//
// In one special case (and more cases in the future), alias categories overlap.
// The special alias category "Bot" (Compile::AliasIdxBot) includes all memory
// states.  Therefore, if a MergeMem has only one contributing input W for Bot,
// it is exactly equivalent to that state W:
//   MergeMem(<Bot>: W) <==> W
//
// Usually, the merge has more than one input.  In that case, where inputs
// overlap (i.e., one is Bot), the narrower alias type determines the memory
// state for that type, and the wider alias type (Bot) fills in everywhere else:
//   Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p)
//   Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p)
//
// A merge can take a "wide" memory state as one of its narrow inputs.
// This simply means that the merge observes out only the relevant parts of
// the wide input.  That is, wide memory states arriving at narrow merge inputs
// are implicitly "filtered" or "sliced" as necessary.  (This is rare.)
//
// These rules imply that MergeMem nodes may cascade (via their <Bot> links),
// and that memory slices "leak through":
//   MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y)
//
// But, in such a cascade, repeated memory slices can "block the leak":
//   MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y')
//
// In the last example, Y is not part of the combined memory state of the
// outermost MergeMem.  The system must, of course, prevent unschedulable
// memory states from arising, so you can be sure that the state Y is somehow
// a precursor to state Y'.
//
//
// REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array
// of each MergeMemNode array are exactly the numerical alias indexes, including
// but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw.  The functions
// Compile::alias_type (and kin) produce and manage these indexes.
//
// By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node.
// (Note that this provides quick access to the top node inside MergeMem methods,
// without the need to reach out via TLS to Compile::current.)
//
// As a consequence of what was just described, a MergeMem that represents a full
// memory state has an edge in(AliasIdxBot) which is a "wide" memory state,
// containing all alias categories.
//
// MergeMem nodes never (?) have control inputs, so in(0) is NULL.
//
// All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either
// a memory state for the alias type <N>, or else the top node, meaning that
// there is no particular input for that alias type.  Note that the length of
// a MergeMem is variable, and may be extended at any time to accommodate new
// memory states at larger alias indexes.  When merges grow, they are of course
// filled with "top" in the unused in() positions.
//
// This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable.
// (Top was chosen because it works smoothly with passes like GCM.)
//
// For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM.  (It is
// the type of random VM bits like TLS references.)  Since it is always the
// first non-Bot memory slice, some low-level loops use it to initialize an
// index variable:  for (i = AliasIdxRaw; i < req(); i++).
//
//
// ACCESSORS:  There is a special accessor MergeMemNode::base_memory which returns
// the distinguished "wide" state.  The accessor MergeMemNode::memory_at(N) returns
// the memory state for alias type <N>, or (if there is no particular slice at <N>,
// it returns the base memory.  To prevent bugs, memory_at does not accept <Top>
// or <Bot> indexes.  The iterator MergeMemStream provides robust iteration over
// MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited.
//
// %%%% We may get rid of base_memory as a separate accessor at some point; it isn't
// really that different from the other memory inputs.  An abbreviation called
// "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy.
//
//
// PARTIAL MEMORY STATES:  During optimization, MergeMem nodes may arise that represent
// partial memory states.  When a Phi splits through a MergeMem, the copy of the Phi
// that "emerges though" the base memory will be marked as excluding the alias types
// of the other (narrow-memory) copies which "emerged through" the narrow edges:
//
//   Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y))
//     ==Ideal=>  MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y))
//
// This strange "subtraction" effect is necessary to ensure IGVN convergence.
// (It is currently unimplemented.)  As you can see, the resulting merge is
// actually a disjoint union of memory states, rather than an overlay.
//

//------------------------------MergeMemNode-----------------------------------
Node* MergeMemNode::make_empty_memory() {
  Node* empty_memory = (Node*) Compile::current()->top();
  assert(empty_memory->is_top(), "correct sentinel identity");
  return empty_memory;
}

MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) {
  init_class_id(Class_MergeMem);
  // all inputs are nullified in Node::Node(int)
  // set_input(0, NULL);  // no control input

  // Initialize the edges uniformly to top, for starters.
  Node* empty_mem = make_empty_memory();
  for (uint i = Compile::AliasIdxTop; i < req(); i++) {
    init_req(i,empty_mem);
  }
  assert(empty_memory() == empty_mem, "");

  if( new_base != NULL && new_base->is_MergeMem() ) {
    MergeMemNode* mdef = new_base->as_MergeMem();
    assert(mdef->empty_memory() == empty_mem, "consistent sentinels");
    for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) {
      mms.set_memory(mms.memory2());
    }
    assert(base_memory() == mdef->base_memory(), "");
  } else {
    set_base_memory(new_base);
  }
}

// Make a new, untransformed MergeMem with the same base as 'mem'.
// If mem is itself a MergeMem, populate the result with the same edges.
MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
  return new(C) MergeMemNode(mem);
}

//------------------------------cmp--------------------------------------------
uint MergeMemNode::hash() const { return NO_HASH; }
uint MergeMemNode::cmp( const Node &n ) const {
  return (&n == this);          // Always fail except on self
}

//------------------------------Identity---------------------------------------
Node* MergeMemNode::Identity(PhaseTransform *phase) {
  // Identity if this merge point does not record any interesting memory
  // disambiguations.
  Node* base_mem = base_memory();
  Node* empty_mem = empty_memory();
  if (base_mem != empty_mem) {  // Memory path is not dead?
    for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
      Node* mem = in(i);
      if (mem != empty_mem && mem != base_mem) {
        return this;            // Many memory splits; no change
      }
    }
  }
  return base_mem;              // No memory splits; ID on the one true input
}

//------------------------------Ideal------------------------------------------
// This method is invoked recursively on chains of MergeMem nodes
Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Remove chain'd MergeMems
  //
  // This is delicate, because the each "in(i)" (i >= Raw) is interpreted
  // relative to the "in(Bot)".  Since we are patching both at the same time,
  // we have to be careful to read each "in(i)" relative to the old "in(Bot)",
  // but rewrite each "in(i)" relative to the new "in(Bot)".
  Node *progress = NULL;


  Node* old_base = base_memory();
  Node* empty_mem = empty_memory();
  if (old_base == empty_mem)
    return NULL; // Dead memory path.

  MergeMemNode* old_mbase;
  if (old_base != NULL && old_base->is_MergeMem())
    old_mbase = old_base->as_MergeMem();
  else
    old_mbase = NULL;
  Node* new_base = old_base;

  // simplify stacked MergeMems in base memory
  if (old_mbase)  new_base = old_mbase->base_memory();

  // the base memory might contribute new slices beyond my req()
  if (old_mbase)  grow_to_match(old_mbase);

  // Look carefully at the base node if it is a phi.
  PhiNode* phi_base;
  if (new_base != NULL && new_base->is_Phi())
    phi_base = new_base->as_Phi();
  else
    phi_base = NULL;

  Node*    phi_reg = NULL;
  uint     phi_len = (uint)-1;
  if (phi_base != NULL && !phi_base->is_copy()) {
    // do not examine phi if degraded to a copy
    phi_reg = phi_base->region();
    phi_len = phi_base->req();
    // see if the phi is unfinished
    for (uint i = 1; i < phi_len; i++) {
      if (phi_base->in(i) == NULL) {
        // incomplete phi; do not look at it yet!
        phi_reg = NULL;
        phi_len = (uint)-1;
        break;
      }
    }
  }

  // Note:  We do not call verify_sparse on entry, because inputs
  // can normalize to the base_memory via subsume_node or similar
  // mechanisms.  This method repairs that damage.

  assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels");

  // Look at each slice.
  for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
    Node* old_in = in(i);
    // calculate the old memory value
    Node* old_mem = old_in;
    if (old_mem == empty_mem)  old_mem = old_base;
    assert(old_mem == memory_at(i), "");

    // maybe update (reslice) the old memory value

    // simplify stacked MergeMems
    Node* new_mem = old_mem;
    MergeMemNode* old_mmem;
    if (old_mem != NULL && old_mem->is_MergeMem())
      old_mmem = old_mem->as_MergeMem();
    else
      old_mmem = NULL;
    if (old_mmem == this) {
      // This can happen if loops break up and safepoints disappear.
      // A merge of BotPtr (default) with a RawPtr memory derived from a
      // safepoint can be rewritten to a merge of the same BotPtr with
      // the BotPtr phi coming into the loop.  If that phi disappears
      // also, we can end up with a self-loop of the mergemem.
      // In general, if loops degenerate and memory effects disappear,
      // a mergemem can be left looking at itself.  This simply means
      // that the mergemem's default should be used, since there is
      // no longer any apparent effect on this slice.
      // Note: If a memory slice is a MergeMem cycle, it is unreachable
      //       from start.  Update the input to TOP.
      new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base;
    }
    else if (old_mmem != NULL) {
      new_mem = old_mmem->memory_at(i);
    }
    // else preceding memory was not a MergeMem

    // replace equivalent phis (unfortunately, they do not GVN together)
    if (new_mem != NULL && new_mem != new_base &&
        new_mem->req() == phi_len && new_mem->in(0) == phi_reg) {
      if (new_mem->is_Phi()) {
        PhiNode* phi_mem = new_mem->as_Phi();
        for (uint i = 1; i < phi_len; i++) {
          if (phi_base->in(i) != phi_mem->in(i)) {
            phi_mem = NULL;
            break;
          }
        }
        if (phi_mem != NULL) {
          // equivalent phi nodes; revert to the def
          new_mem = new_base;
        }
      }
    }

    // maybe store down a new value
    Node* new_in = new_mem;
    if (new_in == new_base)  new_in = empty_mem;

    if (new_in != old_in) {
      // Warning:  Do not combine this "if" with the previous "if"
      // A memory slice might have be be rewritten even if it is semantically
      // unchanged, if the base_memory value has changed.
      set_req(i, new_in);
      progress = this;          // Report progress
    }
  }

  if (new_base != old_base) {
    set_req(Compile::AliasIdxBot, new_base);
    // Don't use set_base_memory(new_base), because we need to update du.
    assert(base_memory() == new_base, "");
    progress = this;
  }

  if( base_memory() == this ) {
    // a self cycle indicates this memory path is dead
    set_req(Compile::AliasIdxBot, empty_mem);
  }

  // Resolve external cycles by calling Ideal on a MergeMem base_memory
  // Recursion must occur after the self cycle check above
  if( base_memory()->is_MergeMem() ) {
    MergeMemNode *new_mbase = base_memory()->as_MergeMem();
    Node *m = phase->transform(new_mbase);  // Rollup any cycles
    if( m != NULL && (m->is_top() ||
        m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) {
      // propagate rollup of dead cycle to self
      set_req(Compile::AliasIdxBot, empty_mem);
    }
  }

  if( base_memory() == empty_mem ) {
    progress = this;
    // Cut inputs during Parse phase only.
    // During Optimize phase a dead MergeMem node will be subsumed by Top.
    if( !can_reshape ) {
      for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
        if( in(i) != empty_mem ) { set_req(i, empty_mem); }
      }
    }
  }

  if( !progress && base_memory()->is_Phi() && can_reshape ) {
    // Check if PhiNode::Ideal's "Split phis through memory merges"
    // transform should be attempted. Look for this->phi->this cycle.
    uint merge_width = req();
    if (merge_width > Compile::AliasIdxRaw) {
      PhiNode* phi = base_memory()->as_Phi();
      for( uint i = 1; i < phi->req(); ++i ) {// For all paths in
        if (phi->in(i) == this) {
          phase->is_IterGVN()->_worklist.push(phi);
          break;
        }
      }
    }
  }

  assert(progress || verify_sparse(), "please, no dups of base");
  return progress;
}

//-------------------------set_base_memory-------------------------------------
void MergeMemNode::set_base_memory(Node *new_base) {
  Node* empty_mem = empty_memory();
  set_req(Compile::AliasIdxBot, new_base);
  assert(memory_at(req()) == new_base, "must set default memory");
  // Clear out other occurrences of new_base:
  if (new_base != empty_mem) {
    for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
      if (in(i) == new_base)  set_req(i, empty_mem);
    }
  }
}

//------------------------------out_RegMask------------------------------------
const RegMask &MergeMemNode::out_RegMask() const {
  return RegMask::Empty;
}

//------------------------------dump_spec--------------------------------------
#ifndef PRODUCT
void MergeMemNode::dump_spec(outputStream *st) const {
  st->print(" {");
  Node* base_mem = base_memory();
  for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
    Node* mem = memory_at(i);
    if (mem == base_mem) { st->print(" -"); continue; }
    st->print( " N%d:", mem->_idx );
    Compile::current()->get_adr_type(i)->dump_on(st);
  }
  st->print(" }");
}
#endif // !PRODUCT


#ifdef ASSERT
static bool might_be_same(Node* a, Node* b) {
  if (a == b)  return true;
  if (!(a->is_Phi() || b->is_Phi()))  return false;
  // phis shift around during optimization
  return true;  // pretty stupid...
}

// verify a narrow slice (either incoming or outgoing)
static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
  if (!VerifyAliases)       return;  // don't bother to verify unless requested
  if (is_error_reported())  return;  // muzzle asserts when debugging an error
  if (Node::in_dump())      return;  // muzzle asserts when printing
  assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel");
  assert(n != NULL, "");
  // Elide intervening MergeMem's
  while (n->is_MergeMem()) {
    n = n->as_MergeMem()->memory_at(alias_idx);
  }
  Compile* C = Compile::current();
  const TypePtr* n_adr_type = n->adr_type();
  if (n == m->empty_memory()) {
    // Implicit copy of base_memory()
  } else if (n_adr_type != TypePtr::BOTTOM) {
    assert(n_adr_type != NULL, "new memory must have a well-defined adr_type");
    assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice");
  } else {
    // A few places like make_runtime_call "know" that VM calls are narrow,
    // and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM.
    bool expected_wide_mem = false;
    if (n == m->base_memory()) {
      expected_wide_mem = true;
    } else if (alias_idx == Compile::AliasIdxRaw ||
               n == m->memory_at(Compile::AliasIdxRaw)) {
      expected_wide_mem = true;
    } else if (!C->alias_type(alias_idx)->is_rewritable()) {
      // memory can "leak through" calls on channels that
      // are write-once.  Allow this also.
      expected_wide_mem = true;
    }
    assert(expected_wide_mem, "expected narrow slice replacement");
  }
}
#else // !ASSERT
#define verify_memory_slice(m,i,n) (void)(0)  // PRODUCT version is no-op
#endif


//-----------------------------memory_at---------------------------------------
Node* MergeMemNode::memory_at(uint alias_idx) const {
  assert(alias_idx >= Compile::AliasIdxRaw ||
         alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
         "must avoid base_memory and AliasIdxTop");

  // Otherwise, it is a narrow slice.
  Node* n = alias_idx < req() ? in(alias_idx) : empty_memory();
  Compile *C = Compile::current();
  if (is_empty_memory(n)) {
    // the array is sparse; empty slots are the "top" node
    n = base_memory();
    assert(Node::in_dump()
           || n == NULL || n->bottom_type() == Type::TOP
           || n->adr_type() == NULL // address is TOP
           || n->adr_type() == TypePtr::BOTTOM
           || n->adr_type() == TypeRawPtr::BOTTOM
           || Compile::current()->AliasLevel() == 0,
           "must be a wide memory");
    // AliasLevel == 0 if we are organizing the memory states manually.
    // See verify_memory_slice for comments on TypeRawPtr::BOTTOM.
  } else {
    // make sure the stored slice is sane
    #ifdef ASSERT
    if (is_error_reported() || Node::in_dump()) {
    } else if (might_be_same(n, base_memory())) {
      // Give it a pass:  It is a mostly harmless repetition of the base.
      // This can arise normally from node subsumption during optimization.
    } else {
      verify_memory_slice(this, alias_idx, n);
    }
    #endif
  }
  return n;
}

//---------------------------set_memory_at-------------------------------------
void MergeMemNode::set_memory_at(uint alias_idx, Node *n) {
  verify_memory_slice(this, alias_idx, n);
  Node* empty_mem = empty_memory();
  if (n == base_memory())  n = empty_mem;  // collapse default
  uint need_req = alias_idx+1;
  if (req() < need_req) {
    if (n == empty_mem)  return;  // already the default, so do not grow me
    // grow the sparse array
    do {
      add_req(empty_mem);
    } while (req() < need_req);
  }
  set_req( alias_idx, n );
}



//--------------------------iteration_setup------------------------------------
void MergeMemNode::iteration_setup(const MergeMemNode* other) {
  if (other != NULL) {
    grow_to_match(other);
    // invariant:  the finite support of mm2 is within mm->req()
    #ifdef ASSERT
    for (uint i = req(); i < other->req(); i++) {
      assert(other->is_empty_memory(other->in(i)), "slice left uncovered");
    }
    #endif
  }
  // Replace spurious copies of base_memory by top.
  Node* base_mem = base_memory();
  if (base_mem != NULL && !base_mem->is_top()) {
    for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) {
      if (in(i) == base_mem)
        set_req(i, empty_memory());
    }
  }
}

//---------------------------grow_to_match-------------------------------------
void MergeMemNode::grow_to_match(const MergeMemNode* other) {
  Node* empty_mem = empty_memory();
  assert(other->is_empty_memory(empty_mem), "consistent sentinels");
  // look for the finite support of the other memory
  for (uint i = other->req(); --i >= req(); ) {
    if (other->in(i) != empty_mem) {
      uint new_len = i+1;
      while (req() < new_len)  add_req(empty_mem);
      break;
    }
  }
}

//---------------------------verify_sparse-------------------------------------
#ifndef PRODUCT
bool MergeMemNode::verify_sparse() const {
  assert(is_empty_memory(make_empty_memory()), "sane sentinel");
  Node* base_mem = base_memory();
  // The following can happen in degenerate cases, since empty==top.
  if (is_empty_memory(base_mem))  return true;
  for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
    assert(in(i) != NULL, "sane slice");
    if (in(i) == base_mem)  return false;  // should have been the sentinel value!
  }
  return true;
}

bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) {
  Node* n;
  n = mm->in(idx);
  if (mem == n)  return true;  // might be empty_memory()
  n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx);
  if (mem == n)  return true;
  while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) {
    if (mem == n)  return true;
    if (n == NULL)  break;
  }
  return false;
}
#endif // !PRODUCT
C:\hotspot-69087d08d473\src\share\vm/opto/memnode.hpp
/*
 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_MEMNODE_HPP
#define SHARE_VM_OPTO_MEMNODE_HPP

#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/type.hpp"

// Portions of code courtesy of Clifford Click

class MultiNode;
class PhaseCCP;
class PhaseTransform;

//------------------------------MemNode----------------------------------------
// Load or Store, possibly throwing a NULL pointer exception
class MemNode : public Node {
private:
  bool _unaligned_access; // Unaligned access from unsafe
  bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
protected:
#ifdef ASSERT
  const TypePtr* _adr_type;     // What kind of memory is being addressed?
#endif
  virtual uint size_of() const;
public:
  enum { Control,               // When is it safe to do this load?
         Memory,                // Chunk of memory is being loaded from
         Address,               // Actually address, derived from base
         ValueIn,               // Value to store
         OopStore               // Preceeding oop store, only in StoreCM
  };
  typedef enum { unordered = 0,
                 acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
                 release        // Store has to release or be preceded by MemBarRelease.
  } MemOrd;
protected:
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
    : Node(c0,c1,c2   ), _unaligned_access(false), _mismatched_access(false) {
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
    : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }
  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
    : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
    init_class_id(Class_Mem);
    debug_only(_adr_type=at; adr_type();)
  }

  static bool check_if_adr_maybe_raw(Node* adr);

public:
  // Helpers for the optimizer.  Documented in memnode.cpp.
  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
                                      Node* p2, AllocateNode* a2,
                                      PhaseTransform* phase);
  static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);

  static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  // This one should probably be a phase-specific function:
  static bool all_controls_dominate(Node* dom, Node* sub);

  // Find any cast-away of null-ness and keep its control.
  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );

  virtual const class TypePtr *adr_type() const;  // returns bottom_type of address

  // Shared code for Ideal methods:
  Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.

  // Helper function for adr_type() implementations.
  static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);

  // Raw access function, to allow copying of adr_type efficiently in
  // product builds and retain the debug info for debug builds.
  const TypePtr *raw_adr_type() const {
#ifdef ASSERT
    return _adr_type;
#else
    return 0;
#endif
  }

  // Map a load or store opcode to its corresponding store opcode.
  // (Return -1 if unknown.)
  virtual int store_Opcode() const { return -1; }

  // What is the type of the value in memory?  (T_VOID mean "unspecified".)
  virtual BasicType memory_type() const = 0;
  virtual int memory_size() const {
#ifdef ASSERT
    return type2aelembytes(memory_type(), true);
#else
    return type2aelembytes(memory_type());
#endif
  }

  // Search through memory states which precede this node (load or store).
  // Look for an exact match for the address, with no intervening
  // aliased stores.
  Node* find_previous_store(PhaseTransform* phase);

  // Can this node (load or store) accurately see a stored value in
  // the given memory state?  (The state may or may not be in(Memory).)
  Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;

  void set_unaligned_access() { _unaligned_access = true; }
  bool is_unaligned_access() const { return _unaligned_access; }
  void set_mismatched_access() { _mismatched_access = true; }
  bool is_mismatched_access() const { return _mismatched_access; }

#ifndef PRODUCT
  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
  virtual void dump_spec(outputStream *st) const;
#endif
};

//------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address
class LoadNode : public MemNode {
public:
  // Some loads (from unsafe) should be pinned: they don't depend only
  // on the dominating test.  The boolean field _depends_only_on_test
  // below records whether that node depends only on the dominating
  // test.
  // Methods used to build LoadNodes pass an argument of type enum
  // ControlDependency instead of a boolean because those methods
  // typically have multiple boolean parameters with default values:
  // passing the wrong boolean to one of these parameters by mistake
  // goes easily unnoticed. Using an enum, the compiler can check that
  // the type of a value and the type of the parameter match.
  enum ControlDependency {
    Pinned,
    DependsOnlyOnTest
  };
private:
  // LoadNode::hash() doesn't take the _depends_only_on_test field
  // into account: If the graph already has a non-pinned LoadNode and
  // we add a pinned LoadNode with the same inputs, it's safe for GVN
  // to replace the pinned LoadNode with the non-pinned LoadNode,
  // otherwise it wouldn't be safe to have a non pinned LoadNode with
  // those inputs in the first place. If the graph already has a
  // pinned LoadNode and we add a non pinned LoadNode with the same
  // inputs, it's safe (but suboptimal) for GVN to replace the
  // non-pinned LoadNode by the pinned LoadNode.
  bool _depends_only_on_test;

  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
  // loads that can be reordered, and such requiring acquire semantics to
  // adhere to the Java specification.  The required behaviour is stored in
  // this field.
  const MemOrd _mo;

protected:
  virtual uint cmp(const Node &n) const;
  virtual uint size_of() const; // Size is bigger
  // Should LoadNode::Ideal() attempt to remove control edges?
  virtual bool can_remove_control() const;
  const Type* const _type;      // What kind of value is loaded?
public:

  LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
    : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
    init_class_id(Class_Load);
  }
  inline bool is_unordered() const { return !is_acquire(); }
  inline bool is_acquire() const {
    assert(_mo == unordered || _mo == acquire, "unexpected");
    return _mo == acquire;
  }

  // Polymorphic factory method:
   static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
                     const TypePtr* at, const Type *rt, BasicType bt,
                     MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);

  virtual uint hash()   const;  // Check the type

  // Handle algebraic identities here.  If we have an identity, return the Node
  // we are equivalent to.  We look for Load of a Store.
  virtual Node *Identity( PhaseTransform *phase );

  // If the load is from Field memory and the pointer is non-null, it might be possible to
  // zero out the control input.
  // If the offset is constant and the base is an object allocation,
  // try to hook me up to the exact initializing store.
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

  // Split instance field load through Phi.
  Node* split_through_phi(PhaseGVN *phase);

  // Recover original value from boxed values
  Node *eliminate_autobox(PhaseGVN *phase);

  // Compute a new Type for this node.  Basically we just do the pre-check,
  // then call the virtual add() to set the type.
  virtual const Type *Value( PhaseTransform *phase ) const;

  // Common methods for LoadKlass and LoadNKlass nodes.
  const Type *klass_value_common( PhaseTransform *phase ) const;
  Node *klass_identity_common( PhaseTransform *phase );

  virtual uint ideal_reg() const;
  virtual const Type *bottom_type() const;
  // Following method is copied from TypeNode:
  void set_type(const Type* t) {
    assert(t != NULL, "sanity");
    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
    *(const Type**)&_type = t;   // cast away const-ness
    // If this node is in the hash table, make sure it doesn't need a rehash.
    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
  }
  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };

  // Do not match memory edge
  virtual uint match_edge(uint idx) const;

  // Map a load opcode to its corresponding store opcode.
  virtual int store_Opcode() const = 0;

  // Check if the load's memory input is a Phi node with the same control.
  bool is_instance_field_load_with_local_phi(Node* ctrl);

#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
#ifdef ASSERT
  // Helper function to allow a raw load without control edge for some cases
  static bool is_immutable_value(Node* adr);
#endif
protected:
  const Type* load_array_final_field(const TypeKlassPtr *tkls,
                                     ciKlass* klass) const;
  // depends_only_on_test is almost always true, and needs to be almost always
  // true to enable key hoisting & commoning optimizations.  However, for the
  // special case of RawPtr loads from TLS top & end, and other loads performed by
  // GC barriers, the control edge carries the dependence preventing hoisting past
  // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
  // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
  // which produce results (new raw memory state) inside of loops preventing all
  // manner of other optimizations).  Basically, it's ugly but so is the alternative.
  // See comment in macro.cpp, around line 125 expand_allocate_common().
  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
};

//------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory
class LoadBNode : public LoadNode {
public:
  LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
  virtual int store_Opcode() const { return Op_StoreB; }
  virtual BasicType memory_type() const { return T_BYTE; }
};

//------------------------------LoadUBNode-------------------------------------
// Load a unsigned byte (8bits unsigned) from memory
class LoadUBNode : public LoadNode {
public:
  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
  virtual int store_Opcode() const { return Op_StoreB; }
  virtual BasicType memory_type() const { return T_BYTE; }
};

//------------------------------LoadUSNode-------------------------------------
// Load an unsigned short/char (16bits unsigned) from memory
class LoadUSNode : public LoadNode {
public:
  LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
  virtual int store_Opcode() const { return Op_StoreC; }
  virtual BasicType memory_type() const { return T_CHAR; }
};

//------------------------------LoadSNode--------------------------------------
// Load a short (16bits signed) from memory
class LoadSNode : public LoadNode {
public:
  LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
  virtual int store_Opcode() const { return Op_StoreC; }
  virtual BasicType memory_type() const { return T_SHORT; }
};

//------------------------------LoadINode--------------------------------------
// Load an integer from memory
class LoadINode : public LoadNode {
public:
  LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual int store_Opcode() const { return Op_StoreI; }
  virtual BasicType memory_type() const { return T_INT; }
};

//------------------------------LoadRangeNode----------------------------------
// Load an array length from the array
class LoadRangeNode : public LoadINode {
public:
  LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
    : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};

//------------------------------LoadLNode--------------------------------------
// Load a long from memory
class LoadLNode : public LoadNode {
  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
      && LoadNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise load forbidden?

public:
  LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
    : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegL; }
  virtual int store_Opcode() const { return Op_StoreL; }
  virtual BasicType memory_type() const { return T_LONG; }
  bool require_atomic_access() const { return _require_atomic_access; }
  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    LoadNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
};

//------------------------------LoadL_unalignedNode----------------------------
// Load a long from unaligned memory
class LoadL_unalignedNode : public LoadLNode {
public:
  LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
  virtual int Opcode() const;
};

//------------------------------LoadFNode--------------------------------------
// Load a float (64 bits) from memory
class LoadFNode : public LoadNode {
public:
  LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegF; }
  virtual int store_Opcode() const { return Op_StoreF; }
  virtual BasicType memory_type() const { return T_FLOAT; }
};

//------------------------------LoadDNode--------------------------------------
// Load a double (64 bits) from memory
class LoadDNode : public LoadNode {
  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
      && LoadNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise load forbidden?

public:
  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegD; }
  virtual int store_Opcode() const { return Op_StoreD; }
  virtual BasicType memory_type() const { return T_DOUBLE; }
  bool require_atomic_access() const { return _require_atomic_access; }
  static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    LoadNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
};

//------------------------------LoadD_unalignedNode----------------------------
// Load a double from unaligned memory
class LoadD_unalignedNode : public LoadDNode {
public:
  LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
  virtual int Opcode() const;
};

//------------------------------LoadPNode--------------------------------------
// Load a pointer from memory (either object or array)
class LoadPNode : public LoadNode {
public:
  LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegP; }
  virtual int store_Opcode() const { return Op_StoreP; }
  virtual BasicType memory_type() const { return T_ADDRESS; }
};


//------------------------------LoadNNode--------------------------------------
// Load a narrow oop from memory (either object or array)
class LoadNNode : public LoadNode {
public:
  LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegN; }
  virtual int store_Opcode() const { return Op_StoreN; }
  virtual BasicType memory_type() const { return T_NARROWOOP; }
};

//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
class LoadKlassNode : public LoadPNode {
protected:
  // In most cases, LoadKlassNode does not have the control input set. If the control
  // input is set, it must not be removed (by LoadNode::Ideal()).
  virtual bool can_remove_control() const;
public:
  LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
    : LoadPNode(c, mem, adr, at, tk, mo) {}
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual bool depends_only_on_test() const { return true; }

  // Polymorphic factory method:
  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
};

//------------------------------LoadNKlassNode---------------------------------
// Load a narrow Klass from an object.
class LoadNKlassNode : public LoadNNode {
public:
  LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
    : LoadNNode(c, mem, adr, at, tk, mo) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return Op_RegN; }
  virtual int store_Opcode() const { return Op_StoreNKlass; }
  virtual BasicType memory_type() const { return T_NARROWKLASS; }

  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual bool depends_only_on_test() const { return true; }
};


//------------------------------StoreNode--------------------------------------
// Store value; requires Store, Address and Value
class StoreNode : public MemNode {
private:
  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
  // stores that can be reordered, and such requiring release semantics to
  // adhere to the Java specification.  The required behaviour is stored in
  // this field.
  const MemOrd _mo;
  // Needed for proper cloning.
  virtual uint size_of() const { return sizeof(*this); }
protected:
  virtual uint cmp( const Node &n ) const;
  virtual bool depends_only_on_test() const { return false; }

  Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
  Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);

public:
  // We must ensure that stores of object references will be visible
  // only after the object's initialization. So the callers of this
  // procedure must indicate that the store requires `release'
  // semantics, if the stored value is an object reference that might
  // point to a new object and may become externally visible.
  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : MemNode(c, mem, adr, at, val), _mo(mo) {
    init_class_id(Class_Store);
  }
  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
    : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
    init_class_id(Class_Store);
  }

  inline bool is_unordered() const { return !is_release(); }
  inline bool is_release() const {
    assert((_mo == unordered || _mo == release), "unexpected");
    return _mo == release;
  }

  // Conservatively release stores of object references in order to
  // ensure visibility of object initialization.
  static inline MemOrd release_if_reference(const BasicType t) {
    const MemOrd mo = (t == T_ARRAY ||
                       t == T_ADDRESS || // Might be the address of an object reference (`boxing').
                       t == T_OBJECT) ? release : unordered;
    return mo;
  }

  // Polymorphic factory method
  //
  // We must ensure that stores of object references will be visible
  // only after the object's initialization. So the callers of this
  // procedure must indicate that the store requires `release'
  // semantics, if the stored value is an object reference that might
  // point to a new object and may become externally visible.
  static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
                         const TypePtr* at, Node *val, BasicType bt, MemOrd mo);

  virtual uint hash() const;    // Check the type

  // If the store is to Field memory and the pointer is non-null, we can
  // zero out the control input.
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

  // Compute a new Type for this node.  Basically we just do the pre-check,
  // then call the virtual add() to set the type.
  virtual const Type *Value( PhaseTransform *phase ) const;

  // Check for identity function on memory (Load then Store at same address)
  virtual Node *Identity( PhaseTransform *phase );

  // Do not match memory edge
  virtual uint match_edge(uint idx) const;

  virtual const Type *bottom_type() const;  // returns Type::MEMORY

  // Map a store opcode to its corresponding own opcode, trivially.
  virtual int store_Opcode() const { return Opcode(); }

  // have all possible loads of the value stored been optimized away?
  bool value_never_loaded(PhaseTransform *phase) const;

  MemBarNode* trailing_membar() const;
};

//------------------------------StoreBNode-------------------------------------
// Store byte to memory
class StoreBNode : public StoreNode {
public:
  StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual BasicType memory_type() const { return T_BYTE; }
};

//------------------------------StoreCNode-------------------------------------
// Store char/short to memory
class StoreCNode : public StoreNode {
public:
  StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual BasicType memory_type() const { return T_CHAR; }
};

//------------------------------StoreINode-------------------------------------
// Store int to memory
class StoreINode : public StoreNode {
public:
  StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_INT; }
};

//------------------------------StoreLNode-------------------------------------
// Store long to memory
class StoreLNode : public StoreNode {
  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise store forbidden?

public:
  StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_LONG; }
  bool require_atomic_access() const { return _require_atomic_access; }
  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    StoreNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif
};

//------------------------------StoreFNode-------------------------------------
// Store float to memory
class StoreFNode : public StoreNode {
public:
  StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_FLOAT; }
};

//------------------------------StoreDNode-------------------------------------
// Store double to memory
class StoreDNode : public StoreNode {
  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
  virtual uint cmp( const Node &n ) const {
    return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  const bool _require_atomic_access;  // is piecewise store forbidden?
public:
  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
             MemOrd mo, bool require_atomic_access = false)
    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_DOUBLE; }
  bool require_atomic_access() const { return _require_atomic_access; }
  static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {
    StoreNode::dump_spec(st);
    if (_require_atomic_access)  st->print(" Atomic!");
  }
#endif

};

//------------------------------StorePNode-------------------------------------
// Store pointer to memory
class StorePNode : public StoreNode {
public:
  StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_ADDRESS; }
};

//------------------------------StoreNNode-------------------------------------
// Store narrow oop to memory
class StoreNNode : public StoreNode {
public:
  StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_NARROWOOP; }
};

//------------------------------StoreNKlassNode--------------------------------------
// Store narrow klass to memory
class StoreNKlassNode : public StoreNNode {
public:
  StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
    : StoreNNode(c, mem, adr, at, val, mo) {}
  virtual int Opcode() const;
  virtual BasicType memory_type() const { return T_NARROWKLASS; }
};

//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
// Preceeding equivalent StoreCMs may be eliminated.
class StoreCMNode : public StoreNode {
 private:
  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
  virtual uint cmp( const Node &n ) const {
    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
      && StoreNode::cmp(n);
  }
  virtual uint size_of() const { return sizeof(*this); }
  int _oop_alias_idx;   // The alias_idx of OopStore

public:
  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
    StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
    _oop_alias_idx(oop_alias_idx) {
    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
           "bad oop alias idx");
  }
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual BasicType memory_type() const { return T_VOID; } // unspecific
  int oop_alias_idx() const { return _oop_alias_idx; }
};

//------------------------------LoadPLockedNode---------------------------------
// Load-locked a pointer from memory (either object or array).
// On Sparc & Intel this is implemented as a normal pointer load.
// On PowerPC and friends it's a real load-locked.
class LoadPLockedNode : public LoadPNode {
public:
  LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
    : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
  virtual int Opcode() const;
  virtual int store_Opcode() const { return Op_StorePConditional; }
  virtual bool depends_only_on_test() const { return true; }
};

//------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory  state of a store conditional node.
// These nodes return a value, but also update memory.
class SCMemProjNode : public ProjNode {
public:
  enum {SCMEMPROJCON = (uint)-2};
  SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
  virtual int Opcode() const;
  virtual bool      is_CFG() const  { return false; }
  virtual const Type *bottom_type() const {return Type::MEMORY;}
  virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
  virtual const Type *Value( PhaseTransform *phase ) const;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const {};
#endif
};

//------------------------------LoadStoreNode---------------------------
// Note: is_Mem() method returns 'true' for this class.
class LoadStoreNode : public Node {
private:
  const Type* const _type;      // What kind of value is loaded?
  const TypePtr* _adr_type;     // What kind of memory is being addressed?
  virtual uint size_of() const; // Size is bigger
public:
  LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
  virtual bool depends_only_on_test() const { return false; }
  virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }

  virtual const Type *bottom_type() const { return _type; }
  virtual uint ideal_reg() const;
  virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address

  bool result_not_used() const;
  MemBarNode* trailing_membar() const;
};

class LoadStoreConditionalNode : public LoadStoreNode {
public:
  enum {
    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
  };
  LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
};

//------------------------------StorePConditionalNode---------------------------
// Conditionally store pointer to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
class StorePConditionalNode : public LoadStoreConditionalNode {
public:
  StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
  virtual int Opcode() const;
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
};

//------------------------------StoreIConditionalNode---------------------------
// Conditionally store int to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
class StoreIConditionalNode : public LoadStoreConditionalNode {
public:
  StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
  virtual int Opcode() const;
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
};

//------------------------------StoreLConditionalNode---------------------------
// Conditionally store long to memory, if no change since prior
// load-locked.  Sets flags for success or failure of the store.
class StoreLConditionalNode : public LoadStoreConditionalNode {
public:
  StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
  virtual int Opcode() const;
  // Produces flags
  virtual uint ideal_reg() const { return Op_RegFlags; }
};


//------------------------------CompareAndSwapLNode---------------------------
class CompareAndSwapLNode : public LoadStoreConditionalNode {
public:
  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
  virtual int Opcode() const;
};


//------------------------------CompareAndSwapINode---------------------------
class CompareAndSwapINode : public LoadStoreConditionalNode {
public:
  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
  virtual int Opcode() const;
};


//------------------------------CompareAndSwapPNode---------------------------
class CompareAndSwapPNode : public LoadStoreConditionalNode {
public:
  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
  virtual int Opcode() const;
};

//------------------------------CompareAndSwapNNode---------------------------
class CompareAndSwapNNode : public LoadStoreConditionalNode {
public:
  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
  virtual int Opcode() const;
};

//------------------------------GetAndAddINode---------------------------
class GetAndAddINode : public LoadStoreNode {
public:
  GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndAddLNode---------------------------
class GetAndAddLNode : public LoadStoreNode {
public:
  GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
  virtual int Opcode() const;
};


//------------------------------GetAndSetINode---------------------------
class GetAndSetINode : public LoadStoreNode {
public:
  GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetINode---------------------------
class GetAndSetLNode : public LoadStoreNode {
public:
  GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetPNode---------------------------
class GetAndSetPNode : public LoadStoreNode {
public:
  GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
  virtual int Opcode() const;
};

//------------------------------GetAndSetNNode---------------------------
class GetAndSetNNode : public LoadStoreNode {
public:
  GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
  virtual int Opcode() const;
};

//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
public:
  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
    : Node(ctrl,arymem,word_cnt,base) {
    init_class_id(Class_ClearArray);
  }
  virtual int         Opcode() const;
  virtual const Type *bottom_type() const { return Type::MEMORY; }
  // ClearArray modifies array elements, and so affects only the
  // array memory addressed by the bottom_type of its base address.
  virtual const class TypePtr *adr_type() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint match_edge(uint idx) const;

  // Clear the given area of an object or array.
  // The start offset must always be aligned mod BytesPerInt.
  // The end offset must always be aligned mod BytesPerLong.
  // Return the new memory.
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            intptr_t start_offset,
                            intptr_t end_offset,
                            PhaseGVN* phase);
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            intptr_t start_offset,
                            Node* end_offset,
                            PhaseGVN* phase);
  static Node* clear_memory(Node* control, Node* mem, Node* dest,
                            Node* start_offset,
                            Node* end_offset,
                            PhaseGVN* phase);
  // Return allocation input memory edge if it is different instance
  // or itself if it is the one we are looking for.
  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
};

//------------------------------StrIntrinsic-------------------------------
// Base class for Ideal nodes used in String instrinsic code.
class StrIntrinsicNode: public Node {
public:
  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* c1, Node* s2, Node* c2):
    Node(control, char_array_mem, s1, c1, s2, c2) {
  }

  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* s2, Node* c):
    Node(control, char_array_mem, s1, s2, c) {
  }

  StrIntrinsicNode(Node* control, Node* char_array_mem,
                   Node* s1, Node* s2):
    Node(control, char_array_mem, s1, s2) {
  }

  virtual bool depends_only_on_test() const { return false; }
  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
  virtual uint match_edge(uint idx) const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
};

//------------------------------StrComp-------------------------------------
class StrCompNode: public StrIntrinsicNode {
public:
  StrCompNode(Node* control, Node* char_array_mem,
              Node* s1, Node* c1, Node* s2, Node* c2):
    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::INT; }
};

//------------------------------StrEquals-------------------------------------
class StrEqualsNode: public StrIntrinsicNode {
public:
  StrEqualsNode(Node* control, Node* char_array_mem,
                Node* s1, Node* s2, Node* c):
    StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};

//------------------------------StrIndexOf-------------------------------------
class StrIndexOfNode: public StrIntrinsicNode {
public:
  StrIndexOfNode(Node* control, Node* char_array_mem,
              Node* s1, Node* c1, Node* s2, Node* c2):
    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::INT; }
};

//------------------------------AryEq---------------------------------------
class AryEqNode: public StrIntrinsicNode {
public:
  AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
    StrIntrinsicNode(control, char_array_mem, s1, s2) {};
  virtual int Opcode() const;
  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
};


//------------------------------EncodeISOArray--------------------------------
// encode char[] to byte[] in ISO_8859_1
class EncodeISOArrayNode: public Node {
public:
  EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
  virtual int Opcode() const;
  virtual bool depends_only_on_test() const { return false; }
  virtual const Type* bottom_type() const { return TypeInt::INT; }
  virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
  virtual uint match_edge(uint idx) const;
  virtual uint ideal_reg() const { return Op_RegI; }
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value(PhaseTransform *phase) const;
};

//------------------------------MemBar-----------------------------------------
// There are different flavors of Memory Barriers to match the Java Memory
// Model.  Monitor-enter and volatile-load act as Aquires: no following ref
// can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
// volatile-load.  Monitor-exit and volatile-store act as Release: no
// preceding ref can be moved to after them.  We insert a MemBar-Release
// before a FastUnlock or volatile-store.  All volatiles need to be
// serialized, so we follow all volatile-stores with a MemBar-Volatile to
// separate it from any following volatile-load.
class MemBarNode: public MultiNode {
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self

  virtual uint size_of() const { return sizeof(*this); }
  // Memory type this node is serializing.  Usually either rawptr or bottom.
  const TypePtr* _adr_type;

  // How is this membar related to a nearby memory access?
  enum {
    Standalone,
    TrailingLoad,
    TrailingStore,
    LeadingStore,
    TrailingLoadStore,
    LeadingLoadStore
  } _kind;

#ifdef ASSERT
  uint _pair_idx;
#endif

public:
  enum {
    Precedent = TypeFunc::Parms  // optional edge to force precedence
  };
  MemBarNode(Compile* C, int alias_idx, Node* precedent);
  virtual int Opcode() const = 0;
  virtual const class TypePtr *adr_type() const { return _adr_type; }
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint match_edge(uint idx) const { return 0; }
  virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
  virtual Node *match( const ProjNode *proj, const Matcher *m );
  // Factory method.  Builds a wide or narrow membar.
  // Optional 'precedent' becomes an extra edge if not null.
  static MemBarNode* make(Compile* C, int opcode,
                          int alias_idx = Compile::AliasIdxBot,
                          Node* precedent = NULL);

  MemBarNode* trailing_membar() const;
  MemBarNode* leading_membar() const;

  void set_trailing_load() { _kind = TrailingLoad; }
  bool trailing_load() const { return _kind == TrailingLoad; }
  bool trailing_store() const { return _kind == TrailingStore; }
  bool leading_store() const { return _kind == LeadingStore; }
  bool trailing_load_store() const { return _kind == TrailingLoadStore; }
  bool leading_load_store() const { return _kind == LeadingLoadStore; }
  bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; }
  bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; }
  bool standalone() const { return _kind == Standalone; }

  static void set_store_pair(MemBarNode* leading, MemBarNode* trailing);
  static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing);

  void remove(PhaseIterGVN *igvn);
};

// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
// visibility.  Inserted after a volatile load.
class MemBarAcquireNode: public MemBarNode {
public:
  MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
// visibility.  Inserted independ of any load, as required
// for intrinsic sun.misc.Unsafe.loadFence().
class LoadFenceNode: public MemBarNode {
public:
  LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
// multi-cpu visibility.  Inserted before a volatile store.
class MemBarReleaseNode: public MemBarNode {
public:
  MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
// multi-cpu visibility.  Inserted independent of any store, as required
// for intrinsic sun.misc.Unsafe.storeFence().
class StoreFenceNode: public MemBarNode {
public:
  StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache).  Requires multi-cpu
// visibility.  Inserted after a FastLock.
class MemBarAcquireLockNode: public MemBarNode {
public:
  MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load).  Requires
// multi-cpu visibility.  Inserted before a FastUnLock.
class MemBarReleaseLockNode: public MemBarNode {
public:
  MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

class MemBarStoreStoreNode: public MemBarNode {
public:
  MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {
    init_class_id(Class_MemBarStoreStore);
  }
  virtual int Opcode() const;
};

// Ordering between a volatile store and a following volatile load.
// Requires multi-CPU visibility?
class MemBarVolatileNode: public MemBarNode {
public:
  MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
};

// Ordering within the same CPU.  Used to order unsafe memory references
// inside the compiler when we lack alias info.  Not needed "outside" the
// compiler because the CPU does all the ordering for us.
class MemBarCPUOrderNode: public MemBarNode {
public:
  MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
    : MemBarNode(C, alias_idx, precedent) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
};

// Isolation of object setup after an AllocateNode and before next safepoint.
// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
class InitializeNode: public MemBarNode {
  friend class AllocateNode;

  enum {
    Incomplete    = 0,
    Complete      = 1,
    WithArraycopy = 2
  };
  int _is_complete;

  bool _does_not_escape;

public:
  enum {
    Control    = TypeFunc::Control,
    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
  };

  InitializeNode(Compile* C, int adr_type, Node* rawoop);
  virtual int Opcode() const;
  virtual uint size_of() const { return sizeof(*this); }
  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress

  // Manage incoming memory edges via a MergeMem on in(Memory):
  Node* memory(uint alias_idx);

  // The raw memory edge coming directly from the Allocation.
  // The contents of this memory are *always* all-zero-bits.
  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }

  // Return the corresponding allocation for this initialization (or null if none).
  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
  AllocateNode* allocation();

  // Anything other than zeroing in this init?
  bool is_non_zero();

  // An InitializeNode must completed before macro expansion is done.
  // Completion requires that the AllocateNode must be followed by
  // initialization of the new memory to zero, then to any initializers.
  bool is_complete() { return _is_complete != Incomplete; }
  bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }

  // Mark complete.  (Must not yet be complete.)
  void set_complete(PhaseGVN* phase);
  void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }

  bool does_not_escape() { return _does_not_escape; }
  void set_does_not_escape() { _does_not_escape = true; }

#ifdef ASSERT
  // ensure all non-degenerate stores are ordered and non-overlapping
  bool stores_are_sane(PhaseTransform* phase);
#endif //ASSERT

  // See if this store can be captured; return offset where it initializes.
  // Return 0 if the store cannot be moved (any sort of problem).
  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);

  // Capture another store; reformat it to write my internal raw memory.
  // Return the captured copy, else NULL if there is some sort of problem.
  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);

  // Find captured store which corresponds to the range [start..start+size).
  // Return my own memory projection (meaning the initial zero bits)
  // if there is no such store.  Return NULL if there is a problem.
  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);

  // Called when the associated AllocateNode is expanded into CFG.
  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
                        intptr_t header_size, Node* size_in_bytes,
                        PhaseGVN* phase);

 private:
  void remove_extra_zeroes();

  // Find out where a captured store should be placed (or already is placed).
  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
                                     PhaseTransform* phase);

  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);

  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);

  bool detect_init_independence(Node* n, int& count);

  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
                               PhaseGVN* phase);

  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
};

//------------------------------MergeMem---------------------------------------
// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
class MergeMemNode: public Node {
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  friend class MergeMemStream;
  MergeMemNode(Node* def);  // clients use MergeMemNode::make

public:
  // If the input is a whole memory state, clone it with all its slices intact.
  // Otherwise, make a new memory state with just that base memory input.
  // In either case, the result is a newly created MergeMem.
  static MergeMemNode* make(Compile* C, Node* base_memory);

  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return 0; }
  virtual const RegMask &out_RegMask() const;
  virtual const Type *bottom_type() const { return Type::MEMORY; }
  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  // sparse accessors
  // Fetch the previously stored "set_memory_at", or else the base memory.
  // (Caller should clone it if it is a phi-nest.)
  Node* memory_at(uint alias_idx) const;
  // set the memory, regardless of its previous value
  void set_memory_at(uint alias_idx, Node* n);
  // the "base" is the memory that provides the non-finite support
  Node* base_memory() const       { return in(Compile::AliasIdxBot); }
  // warning: setting the base can implicitly set any of the other slices too
  void set_base_memory(Node* def);
  // sentinel value which denotes a copy of the base memory:
  Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
  static Node* make_empty_memory(); // where the sentinel comes from
  bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
  // hook for the iterator, to perform any necessary setup
  void iteration_setup(const MergeMemNode* other = NULL);
  // push sentinels until I am at least as long as the other (semantic no-op)
  void grow_to_match(const MergeMemNode* other);
  bool verify_sparse() const PRODUCT_RETURN0;
#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif
};

class MergeMemStream : public StackObj {
 private:
  MergeMemNode*       _mm;
  const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
  Node*               _mm_base;  // loop-invariant base memory of _mm
  int                 _idx;
  int                 _cnt;
  Node*               _mem;
  Node*               _mem2;
  int                 _cnt2;

  void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
    // subsume_node will break sparseness at times, whenever a memory slice
    // folds down to a copy of the base ("fat") memory.  In such a case,
    // the raw edge will update to base, although it should be top.
    // This iterator will recognize either top or base_memory as an
    // "empty" slice.  See is_empty, is_empty2, and next below.
    //
    // The sparseness property is repaired in MergeMemNode::Ideal.
    // As long as access to a MergeMem goes through this iterator
    // or the memory_at accessor, flaws in the sparseness will
    // never be observed.
    //
    // Also, iteration_setup repairs sparseness.
    assert(mm->verify_sparse(), "please, no dups of base");
    assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");

    _mm  = mm;
    _mm_base = mm->base_memory();
    _mm2 = mm2;
    _cnt = mm->req();
    _idx = Compile::AliasIdxBot-1; // start at the base memory
    _mem = NULL;
    _mem2 = NULL;
  }

#ifdef ASSERT
  Node* check_memory() const {
    if (at_base_memory())
      return _mm->base_memory();
    else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
      return _mm->memory_at(_idx);
    else
      return _mm_base;
  }
  Node* check_memory2() const {
    return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
  }
#endif

  static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
  void assert_synch() const {
    assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
           "no side-effects except through the stream");
  }

 public:

  // expected usages:
  // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
  // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }

  // iterate over one merge
  MergeMemStream(MergeMemNode* mm) {
    mm->iteration_setup();
    init(mm);
    debug_only(_cnt2 = 999);
  }
  // iterate in parallel over two merges
  // only iterates through non-empty elements of mm2
  MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
    assert(mm2, "second argument must be a MergeMem also");
    ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
    mm->iteration_setup(mm2);
    init(mm, mm2);
    _cnt2 = mm2->req();
  }
#ifdef ASSERT
  ~MergeMemStream() {
    assert_synch();
  }
#endif

  MergeMemNode* all_memory() const {
    return _mm;
  }
  Node* base_memory() const {
    assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
    return _mm_base;
  }
  const MergeMemNode* all_memory2() const {
    assert(_mm2 != NULL, "");
    return _mm2;
  }
  bool at_base_memory() const {
    return _idx == Compile::AliasIdxBot;
  }
  int alias_idx() const {
    assert(_mem, "must call next 1st");
    return _idx;
  }

  const TypePtr* adr_type() const {
    return Compile::current()->get_adr_type(alias_idx());
  }

  const TypePtr* adr_type(Compile* C) const {
    return C->get_adr_type(alias_idx());
  }
  bool is_empty() const {
    assert(_mem, "must call next 1st");
    assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
    return _mem->is_top();
  }
  bool is_empty2() const {
    assert(_mem2, "must call next 1st");
    assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
    return _mem2->is_top();
  }
  Node* memory() const {
    assert(!is_empty(), "must not be empty");
    assert_synch();
    return _mem;
  }
  // get the current memory, regardless of empty or non-empty status
  Node* force_memory() const {
    assert(!is_empty() || !at_base_memory(), "");
    // Use _mm_base to defend against updates to _mem->base_memory().
    Node *mem = _mem->is_top() ? _mm_base : _mem;
    assert(mem == check_memory(), "");
    return mem;
  }
  Node* memory2() const {
    assert(_mem2 == check_memory2(), "");
    return _mem2;
  }
  void set_memory(Node* mem) {
    if (at_base_memory()) {
      // Note that this does not change the invariant _mm_base.
      _mm->set_base_memory(mem);
    } else {
      _mm->set_memory_at(_idx, mem);
    }
    _mem = mem;
    assert_synch();
  }

  // Recover from a side effect to the MergeMemNode.
  void set_memory() {
    _mem = _mm->in(_idx);
  }

  bool next()  { return next(false); }
  bool next2() { return next(true); }

  bool next_non_empty()  { return next_non_empty(false); }
  bool next_non_empty2() { return next_non_empty(true); }
  // next_non_empty2 can yield states where is_empty() is true

 private:
  // find the next item, which might be empty
  bool next(bool have_mm2) {
    assert((_mm2 != NULL) == have_mm2, "use other next");
    assert_synch();
    if (++_idx < _cnt) {
      // Note:  This iterator allows _mm to be non-sparse.
      // It behaves the same whether _mem is top or base_memory.
      _mem = _mm->in(_idx);
      if (have_mm2)
        _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
      return true;
    }
    return false;
  }

  // find the next non-empty item
  bool next_non_empty(bool have_mm2) {
    while (next(have_mm2)) {
      if (!is_empty()) {
        // make sure _mem2 is filled in sensibly
        if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
        return true;
      } else if (have_mm2 && !is_empty2()) {
        return true;   // is_empty() == true
      }
    }
    return false;
  }
};

//------------------------------Prefetch---------------------------------------

// Non-faulting prefetch load.  Prefetch for many reads.
class PrefetchReadNode : public Node {
public:
  PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
  virtual const Type *bottom_type() const { return Type::ABIO; }
};

// Non-faulting prefetch load.  Prefetch for many reads & many writes.
class PrefetchWriteNode : public Node {
public:
  PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
  virtual const Type *bottom_type() const { return Type::ABIO; }
};

// Allocation prefetch which may fault, TLAB size have to be adjusted.
class PrefetchAllocationNode : public Node {
public:
  PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
  virtual int Opcode() const;
  virtual uint ideal_reg() const { return NotAMachineReg; }
  virtual uint match_edge(uint idx) const { return idx==2; }
  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
};

#endif // SHARE_VM_OPTO_MEMNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/mulnode.cpp
/*
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/connode.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/subnode.hpp"

// Portions of code courtesy of Clifford Click


//=============================================================================
//------------------------------hash-------------------------------------------
// Hash function over MulNodes.  Needs to be commutative; i.e., I swap
// (commute) inputs to MulNodes willy-nilly so the hash function must return
// the same value in the presence of edge swapping.
uint MulNode::hash() const {
  return (uintptr_t)in(1) + (uintptr_t)in(2) + Opcode();
}

//------------------------------Identity---------------------------------------
// Multiplying a one preserves the other argument
Node *MulNode::Identity( PhaseTransform *phase ) {
  register const Type *one = mul_id();  // The multiplicative identity
  if( phase->type( in(1) )->higher_equal( one ) ) return in(2);
  if( phase->type( in(2) )->higher_equal( one ) ) return in(1);

  return this;
}

//------------------------------Ideal------------------------------------------
// We also canonicalize the Node, moving constants to the right input,
// and flatten expressions (so that 1+x+2 becomes x+3).
Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  Node *progress = NULL;        // Progress flag
  // We are OK if right is a constant, or right is a load and
  // left is a non-constant.
  if( !(t2->singleton() ||
        (in(2)->is_Load() && !(t1->singleton() || in(1)->is_Load())) ) ) {
    if( t1->singleton() ||       // Left input is a constant?
        // Otherwise, sort inputs (commutativity) to help value numbering.
        (in(1)->_idx > in(2)->_idx) ) {
      swap_edges(1, 2);
      const Type *t = t1;
      t1 = t2;
      t2 = t;
      progress = this;            // Made progress
    }
  }

  // If the right input is a constant, and the left input is a product of a
  // constant, flatten the expression tree.
  uint op = Opcode();
  if( t2->singleton() &&        // Right input is a constant?
      op != Op_MulF &&          // Float & double cannot reassociate
      op != Op_MulD ) {
    if( t2 == Type::TOP ) return NULL;
    Node *mul1 = in(1);
#ifdef ASSERT
    // Check for dead loop
    int   op1 = mul1->Opcode();
    if( phase->eqv( mul1, this ) || phase->eqv( in(2), this ) ||
        ( op1 == mul_opcode() || op1 == add_opcode() ) &&
        ( phase->eqv( mul1->in(1), this ) || phase->eqv( mul1->in(2), this ) ||
          phase->eqv( mul1->in(1), mul1 ) || phase->eqv( mul1->in(2), mul1 ) ) )
      assert(false, "dead loop in MulNode::Ideal");
#endif

    if( mul1->Opcode() == mul_opcode() ) {  // Left input is a multiply?
      // Mul of a constant?
      const Type *t12 = phase->type( mul1->in(2) );
      if( t12->singleton() && t12 != Type::TOP) { // Left input is an add of a constant?
        // Compute new constant; check for overflow
        const Type *tcon01 = ((MulNode*)mul1)->mul_ring(t2,t12);
        if( tcon01->singleton() ) {
          // The Mul of the flattened expression
          set_req(1, mul1->in(1));
          set_req(2, phase->makecon( tcon01 ));
          t2 = tcon01;
          progress = this;      // Made progress
        }
      }
    }
    // If the right input is a constant, and the left input is an add of a
    // constant, flatten the tree: (X+con1)*con0 ==> X*con0 + con1*con0
    const Node *add1 = in(1);
    if( add1->Opcode() == add_opcode() ) {      // Left input is an add?
      // Add of a constant?
      const Type *t12 = phase->type( add1->in(2) );
      if( t12->singleton() && t12 != Type::TOP ) { // Left input is an add of a constant?
        assert( add1->in(1) != add1, "dead loop in MulNode::Ideal" );
        // Compute new constant; check for overflow
        const Type *tcon01 = mul_ring(t2,t12);
        if( tcon01->singleton() ) {

        // Convert (X+con1)*con0 into X*con0
          Node *mul = clone();    // mul = ()*con0
          mul->set_req(1,add1->in(1));  // mul = X*con0
          mul = phase->transform(mul);

          Node *add2 = add1->clone();
          add2->set_req(1, mul);        // X*con0 + con0*con1
          add2->set_req(2, phase->makecon(tcon01) );
          progress = add2;
        }
      }
    } // End of is left input an add
  } // End of is right input a Mul

  return progress;
}

//------------------------------Value-----------------------------------------
const Type *MulNode::Value( PhaseTransform *phase ) const {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Either input is ZERO ==> the result is ZERO.
  // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
  int op = Opcode();
  if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
    const Type *zero = add_id();        // The multiplicative zero
    if( t1->higher_equal( zero ) ) return zero;
    if( t2->higher_equal( zero ) ) return zero;
  }

  // Either input is BOTTOM ==> the result is the local BOTTOM
  if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
    return bottom_type();

#if defined(IA32)
  // Can't trust native compilers to properly fold strict double
  // multiplication with round-to-zero on this platform.
  if (op == Op_MulD && phase->C->method()->is_strict()) {
    return TypeD::DOUBLE;
  }
#endif

  return mul_ring(t1,t2);            // Local flavor of type multiplication
}

//=============================================================================
//------------------------------Ideal------------------------------------------
// Check for power-of-2 multiply, then try the regular MulNode::Ideal
Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Swap constant to right
  jint con;
  if ((con = in(1)->find_int_con(0)) != 0) {
    swap_edges(1, 2);
    // Finish rest of method to use info in 'con'
  } else if ((con = in(2)->find_int_con(0)) == 0) {
    return MulNode::Ideal(phase, can_reshape);
  }

  // Now we have a constant Node on the right and the constant in con
  if (con == 0) return NULL;   // By zero is handled by Value call
  if (con == 1) return NULL;   // By one  is handled by Identity call

  // Check for negative constant; if so negate the final result
  bool sign_flip = false;

  unsigned int abs_con = uabs(con);
  if (abs_con != (unsigned int)con) {
    sign_flip = true;
  }

  // Get low bit; check for being the only bit
  Node *res = NULL;
  unsigned int bit1 = abs_con & (0-abs_con);       // Extract low bit
  if (bit1 == abs_con) {           // Found a power of 2?
    res = new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit1)));
  } else {

    // Check for constant with 2 bits set
    unsigned int bit2 = abs_con-bit1;
    bit2 = bit2 & (0-bit2);          // Extract 2nd bit
    if (bit2 + bit1 == abs_con) {    // Found all bits in con?
      Node *n1 = phase->transform( new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit1))));
      Node *n2 = phase->transform( new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(bit2))));
      res = new (phase->C) AddINode(n2, n1);

    } else if (is_power_of_2(abs_con+1)) {
      // Sleezy: power-of-2 -1.  Next time be generic.
      unsigned int temp = abs_con + 1;
      Node *n1 = phase->transform(new (phase->C) LShiftINode(in(1), phase->intcon(log2_uint(temp))));
      res = new (phase->C) SubINode(n1, in(1));
    } else {
      return MulNode::Ideal(phase, can_reshape);
    }
  }

  if (sign_flip) {             // Need to negate result?
    res = phase->transform(res);// Transform, before making the zero con
    res = new (phase->C) SubINode(phase->intcon(0),res);
  }

  return res;                   // Return final result
}

//------------------------------mul_ring---------------------------------------
// Compute the product type of two integer ranges into this node.
const Type *MulINode::mul_ring(const Type *t0, const Type *t1) const {
  const TypeInt *r0 = t0->is_int(); // Handy access
  const TypeInt *r1 = t1->is_int();

  // Fetch endpoints of all ranges
  int32 lo0 = r0->_lo;
  double a = (double)lo0;
  int32 hi0 = r0->_hi;
  double b = (double)hi0;
  int32 lo1 = r1->_lo;
  double c = (double)lo1;
  int32 hi1 = r1->_hi;
  double d = (double)hi1;

  // Compute all endpoints & check for overflow
  int32 A = java_multiply(lo0, lo1);
  if( (double)A != a*c ) return TypeInt::INT; // Overflow?
  int32 B = java_multiply(lo0, hi1);
  if( (double)B != a*d ) return TypeInt::INT; // Overflow?
  int32 C = java_multiply(hi0, lo1);
  if( (double)C != b*c ) return TypeInt::INT; // Overflow?
  int32 D = java_multiply(hi0, hi1);
  if( (double)D != b*d ) return TypeInt::INT; // Overflow?

  if( A < B ) { lo0 = A; hi0 = B; } // Sort range endpoints
  else { lo0 = B; hi0 = A; }
  if( C < D ) {
    if( C < lo0 ) lo0 = C;
    if( D > hi0 ) hi0 = D;
  } else {
    if( D < lo0 ) lo0 = D;
    if( C > hi0 ) hi0 = C;
  }
  return TypeInt::make(lo0, hi0, MAX2(r0->_widen,r1->_widen));
}


//=============================================================================
//------------------------------Ideal------------------------------------------
// Check for power-of-2 multiply, then try the regular MulNode::Ideal
Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Swap constant to right
  jlong con;
  if ((con = in(1)->find_long_con(0)) != 0) {
    swap_edges(1, 2);
    // Finish rest of method to use info in 'con'
  } else if ((con = in(2)->find_long_con(0)) == 0) {
    return MulNode::Ideal(phase, can_reshape);
  }

  // Now we have a constant Node on the right and the constant in con
  if (con == CONST64(0)) return NULL;  // By zero is handled by Value call
  if (con == CONST64(1)) return NULL;  // By one  is handled by Identity call

  // Check for negative constant; if so negate the final result
  bool sign_flip = false;
  julong abs_con = uabs(con);
  if (abs_con != (julong)con) {
    sign_flip = true;
  }

  // Get low bit; check for being the only bit
  Node *res = NULL;
  julong bit1 = abs_con & (0-abs_con);      // Extract low bit
  if (bit1 == abs_con) {           // Found a power of 2?
    res = new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit1)));
  } else {

    // Check for constant with 2 bits set
    julong bit2 = abs_con-bit1;
    bit2 = bit2 & (0-bit2);          // Extract 2nd bit
    if (bit2 + bit1 == abs_con) {    // Found all bits in con?
      Node *n1 = phase->transform(new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit1))));
      Node *n2 = phase->transform(new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(bit2))));
      res = new (phase->C) AddLNode(n2, n1);

    } else if (is_power_of_2_long(abs_con+1)) {
      // Sleezy: power-of-2 -1.  Next time be generic.
      julong temp = abs_con + 1;
      Node *n1 = phase->transform( new (phase->C) LShiftLNode(in(1), phase->intcon(log2_long(temp))));
      res = new (phase->C) SubLNode(n1, in(1));
    } else {
      return MulNode::Ideal(phase, can_reshape);
    }
  }

  if (sign_flip) {             // Need to negate result?
    res = phase->transform(res);// Transform, before making the zero con
    res = new (phase->C) SubLNode(phase->longcon(0),res);
  }

  return res;                   // Return final result
}

//------------------------------mul_ring---------------------------------------
// Compute the product type of two integer ranges into this node.
const Type *MulLNode::mul_ring(const Type *t0, const Type *t1) const {
  const TypeLong *r0 = t0->is_long(); // Handy access
  const TypeLong *r1 = t1->is_long();

  // Fetch endpoints of all ranges
  jlong lo0 = r0->_lo;
  double a = (double)lo0;
  jlong hi0 = r0->_hi;
  double b = (double)hi0;
  jlong lo1 = r1->_lo;
  double c = (double)lo1;
  jlong hi1 = r1->_hi;
  double d = (double)hi1;

  // Compute all endpoints & check for overflow
  jlong A = java_multiply(lo0, lo1);
  if( (double)A != a*c ) return TypeLong::LONG; // Overflow?
  jlong B = java_multiply(lo0, hi1);
  if( (double)B != a*d ) return TypeLong::LONG; // Overflow?
  jlong C = java_multiply(hi0, lo1);
  if( (double)C != b*c ) return TypeLong::LONG; // Overflow?
  jlong D = java_multiply(hi0, hi1);
  if( (double)D != b*d ) return TypeLong::LONG; // Overflow?

  if( A < B ) { lo0 = A; hi0 = B; } // Sort range endpoints
  else { lo0 = B; hi0 = A; }
  if( C < D ) {
    if( C < lo0 ) lo0 = C;
    if( D > hi0 ) hi0 = D;
  } else {
    if( D < lo0 ) lo0 = D;
    if( C > hi0 ) hi0 = C;
  }
  return TypeLong::make(lo0, hi0, MAX2(r0->_widen,r1->_widen));
}

//=============================================================================
//------------------------------mul_ring---------------------------------------
// Compute the product type of two double ranges into this node.
const Type *MulFNode::mul_ring(const Type *t0, const Type *t1) const {
  if( t0 == Type::FLOAT || t1 == Type::FLOAT ) return Type::FLOAT;
  return TypeF::make( t0->getf() * t1->getf() );
}

//=============================================================================
//------------------------------mul_ring---------------------------------------
// Compute the product type of two double ranges into this node.
const Type *MulDNode::mul_ring(const Type *t0, const Type *t1) const {
  if( t0 == Type::DOUBLE || t1 == Type::DOUBLE ) return Type::DOUBLE;
  // We must be multiplying 2 double constants.
  return TypeD::make( t0->getd() * t1->getd() );
}

//=============================================================================
//------------------------------Value------------------------------------------
const Type *MulHiLNode::Value( PhaseTransform *phase ) const {
  // Either input is TOP ==> the result is TOP
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Either input is BOTTOM ==> the result is the local BOTTOM
  const Type *bot = bottom_type();
  if( (t1 == bot) || (t2 == bot) ||
      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
    return bot;

  // It is not worth trying to constant fold this stuff!
  return TypeLong::LONG;
}

//=============================================================================
//------------------------------mul_ring---------------------------------------
// Supplied function returns the product of the inputs IN THE CURRENT RING.
// For the logical operations the ring's MUL is really a logical AND function.
// This also type-checks the inputs for sanity.  Guaranteed never to
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
const Type *AndINode::mul_ring( const Type *t0, const Type *t1 ) const {
  const TypeInt *r0 = t0->is_int(); // Handy access
  const TypeInt *r1 = t1->is_int();
  int widen = MAX2(r0->_widen,r1->_widen);

  // If either input is a constant, might be able to trim cases
  if( !r0->is_con() && !r1->is_con() )
    return TypeInt::INT;        // No constants to be had

  // Both constants?  Return bits
  if( r0->is_con() && r1->is_con() )
    return TypeInt::make( r0->get_con() & r1->get_con() );

  if( r0->is_con() && r0->get_con() > 0 )
    return TypeInt::make(0, r0->get_con(), widen);

  if( r1->is_con() && r1->get_con() > 0 )
    return TypeInt::make(0, r1->get_con(), widen);

  if( r0 == TypeInt::BOOL || r1 == TypeInt::BOOL ) {
    return TypeInt::BOOL;
  }

  return TypeInt::INT;          // No constants to be had
}

//------------------------------Identity---------------------------------------
// Masking off the high bits of an unsigned load is not required
Node *AndINode::Identity( PhaseTransform *phase ) {

  // x & x => x
  if (phase->eqv(in(1), in(2))) return in(1);

  Node* in1 = in(1);
  uint op = in1->Opcode();
  const TypeInt* t2 = phase->type(in(2))->isa_int();
  if (t2 && t2->is_con()) {
    int con = t2->get_con();
    // Masking off high bits which are always zero is useless.
    const TypeInt* t1 = phase->type( in(1) )->isa_int();
    if (t1 != NULL && t1->_lo >= 0) {
      jint t1_support = right_n_bits(1 + log2_jint(t1->_hi));
      if ((t1_support & con) == t1_support)
        return in1;
    }
    // Masking off the high bits of a unsigned-shift-right is not
    // needed either.
    if (op == Op_URShiftI) {
      const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
      if (t12 && t12->is_con()) {  // Shift is by a constant
        int shift = t12->get_con();
        shift &= BitsPerJavaInteger - 1;  // semantics of Java shifts
        int mask = max_juint >> shift;
        if ((mask & con) == mask)  // If AND is useless, skip it
          return in1;
      }
    }
  }
  return MulNode::Identity(phase);
}

//------------------------------Ideal------------------------------------------
Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Special case constant AND mask
  const TypeInt *t2 = phase->type( in(2) )->isa_int();
  if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
  const int mask = t2->get_con();
  Node *load = in(1);
  uint lop = load->Opcode();

  // Masking bits off of a Character?  Hi bits are already zero.
  if( lop == Op_LoadUS &&
      (mask & 0xFFFF0000) )     // Can we make a smaller mask?
    return new (phase->C) AndINode(load,phase->intcon(mask&0xFFFF));

  // Masking bits off of a Short?  Loading a Character does some masking
  if (can_reshape &&
      load->outcnt() == 1 && load->unique_out() == this) {
    if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
      Node *ldus = new (phase->C) LoadUSNode(load->in(MemNode::Control),
                                             load->in(MemNode::Memory),
                                             load->in(MemNode::Address),
                                             load->adr_type(),
                                             TypeInt::CHAR, MemNode::unordered);
      ldus = phase->transform(ldus);
      return new (phase->C) AndINode(ldus, phase->intcon(mask & 0xFFFF));
    }

    // Masking sign bits off of a Byte?  Do an unsigned byte load plus
    // an and.
    if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
      Node* ldub = new (phase->C) LoadUBNode(load->in(MemNode::Control),
                                             load->in(MemNode::Memory),
                                             load->in(MemNode::Address),
                                             load->adr_type(),
                                             TypeInt::UBYTE, MemNode::unordered);
      ldub = phase->transform(ldub);
      return new (phase->C) AndINode(ldub, phase->intcon(mask));
    }
  }

  // Masking off sign bits?  Dont make them!
  if( lop == Op_RShiftI ) {
    const TypeInt *t12 = phase->type(load->in(2))->isa_int();
    if( t12 && t12->is_con() ) { // Shift is by a constant
      int shift = t12->get_con();
      shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
      const int sign_bits_mask = ~right_n_bits(BitsPerJavaInteger - shift);
      // If the AND'ing of the 2 masks has no bits, then only original shifted
      // bits survive.  NO sign-extension bits survive the maskings.
      if( (sign_bits_mask & mask) == 0 ) {
        // Use zero-fill shift instead
        Node *zshift = phase->transform(new (phase->C) URShiftINode(load->in(1),load->in(2)));
        return new (phase->C) AndINode( zshift, in(2) );
      }
    }
  }

  // Check for 'negate/and-1', a pattern emitted when someone asks for
  // 'mod 2'.  Negate leaves the low order bit unchanged (think: complement
  // plus 1) and the mask is of the low order bit.  Skip the negate.
  if( lop == Op_SubI && mask == 1 && load->in(1) &&
      phase->type(load->in(1)) == TypeInt::ZERO )
    return new (phase->C) AndINode( load->in(2), in(2) );

  return MulNode::Ideal(phase, can_reshape);
}

//=============================================================================
//------------------------------mul_ring---------------------------------------
// Supplied function returns the product of the inputs IN THE CURRENT RING.
// For the logical operations the ring's MUL is really a logical AND function.
// This also type-checks the inputs for sanity.  Guaranteed never to
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
const Type *AndLNode::mul_ring( const Type *t0, const Type *t1 ) const {
  const TypeLong *r0 = t0->is_long(); // Handy access
  const TypeLong *r1 = t1->is_long();
  int widen = MAX2(r0->_widen,r1->_widen);

  // If either input is a constant, might be able to trim cases
  if( !r0->is_con() && !r1->is_con() )
    return TypeLong::LONG;      // No constants to be had

  // Both constants?  Return bits
  if( r0->is_con() && r1->is_con() )
    return TypeLong::make( r0->get_con() & r1->get_con() );

  if( r0->is_con() && r0->get_con() > 0 )
    return TypeLong::make(CONST64(0), r0->get_con(), widen);

  if( r1->is_con() && r1->get_con() > 0 )
    return TypeLong::make(CONST64(0), r1->get_con(), widen);

  return TypeLong::LONG;        // No constants to be had
}

//------------------------------Identity---------------------------------------
// Masking off the high bits of an unsigned load is not required
Node *AndLNode::Identity( PhaseTransform *phase ) {

  // x & x => x
  if (phase->eqv(in(1), in(2))) return in(1);

  Node *usr = in(1);
  const TypeLong *t2 = phase->type( in(2) )->isa_long();
  if( t2 && t2->is_con() ) {
    jlong con = t2->get_con();
    // Masking off high bits which are always zero is useless.
    const TypeLong* t1 = phase->type( in(1) )->isa_long();
    if (t1 != NULL && t1->_lo >= 0) {
      int bit_count = log2_long(t1->_hi) + 1;
      jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count));
      if ((t1_support & con) == t1_support)
        return usr;
    }
    uint lop = usr->Opcode();
    // Masking off the high bits of a unsigned-shift-right is not
    // needed either.
    if( lop == Op_URShiftL ) {
      const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
      if( t12 && t12->is_con() ) {  // Shift is by a constant
        int shift = t12->get_con();
        shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
        jlong mask = max_julong >> shift;
        if( (mask&con) == mask )  // If AND is useless, skip it
          return usr;
      }
    }
  }
  return MulNode::Identity(phase);
}

//------------------------------Ideal------------------------------------------
Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Special case constant AND mask
  const TypeLong *t2 = phase->type( in(2) )->isa_long();
  if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape);
  const jlong mask = t2->get_con();

  Node* in1 = in(1);
  uint op = in1->Opcode();

  // Are we masking a long that was converted from an int with a mask
  // that fits in 32-bits?  Commute them and use an AndINode.  Don't
  // convert masks which would cause a sign extension of the integer
  // value.  This check includes UI2L masks (0x00000000FFFFFFFF) which
  // would be optimized away later in Identity.
  if (op == Op_ConvI2L && (mask & CONST64(0xFFFFFFFF80000000)) == 0) {
    Node* andi = new (phase->C) AndINode(in1->in(1), phase->intcon(mask));
    andi = phase->transform(andi);
    return new (phase->C) ConvI2LNode(andi);
  }

  // Masking off sign bits?  Dont make them!
  if (op == Op_RShiftL) {
    const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
    if( t12 && t12->is_con() ) { // Shift is by a constant
      int shift = t12->get_con();
      shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
      const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1);
      // If the AND'ing of the 2 masks has no bits, then only original shifted
      // bits survive.  NO sign-extension bits survive the maskings.
      if( (sign_bits_mask & mask) == 0 ) {
        // Use zero-fill shift instead
        Node *zshift = phase->transform(new (phase->C) URShiftLNode(in1->in(1), in1->in(2)));
        return new (phase->C) AndLNode(zshift, in(2));
      }
    }
  }

  return MulNode::Ideal(phase, can_reshape);
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *LShiftINode::Identity( PhaseTransform *phase ) {
  const TypeInt *ti = phase->type( in(2) )->isa_int();  // shift count is an int
  return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerInt - 1 ) ) == 0 ) ? in(1) : this;
}

//------------------------------Ideal------------------------------------------
// If the right input is a constant, and the left input is an add of a
// constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
  const Type *t  = phase->type( in(2) );
  if( t == Type::TOP ) return NULL;       // Right input is dead
  const TypeInt *t2 = t->isa_int();
  if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
  const int con = t2->get_con() & ( BitsPerInt - 1 );  // masked shift count

  if ( con == 0 )  return NULL; // let Identity() handle 0 shift count

  // Left input is an add of a constant?
  Node *add1 = in(1);
  int add1_op = add1->Opcode();
  if( add1_op == Op_AddI ) {    // Left input is an add?
    assert( add1 != add1->in(1), "dead loop in LShiftINode::Ideal" );
    const TypeInt *t12 = phase->type(add1->in(2))->isa_int();
    if( t12 && t12->is_con() ){ // Left input is an add of a con?
      // Transform is legal, but check for profit.  Avoid breaking 'i2s'
      // and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
      if( con < 16 ) {
        // Compute X << con0
        Node *lsh = phase->transform( new (phase->C) LShiftINode( add1->in(1), in(2) ) );
        // Compute X<<con0 + (con1<<con0)
        return new (phase->C) AddINode( lsh, phase->intcon(t12->get_con() << con));
      }
    }
  }

  // Check for "(x>>c0)<<c0" which just masks off low bits
  if( (add1_op == Op_RShiftI || add1_op == Op_URShiftI ) &&
      add1->in(2) == in(2) )
    // Convert to "(x & -(1<<c0))"
    return new (phase->C) AndINode(add1->in(1),phase->intcon( -(1<<con)));

  // Check for "((x>>c0) & Y)<<c0" which just masks off more low bits
  if( add1_op == Op_AndI ) {
    Node *add2 = add1->in(1);
    int add2_op = add2->Opcode();
    if( (add2_op == Op_RShiftI || add2_op == Op_URShiftI ) &&
        add2->in(2) == in(2) ) {
      // Convert to "(x & (Y<<c0))"
      Node *y_sh = phase->transform( new (phase->C) LShiftINode( add1->in(2), in(2) ) );
      return new (phase->C) AndINode( add2->in(1), y_sh );
    }
  }

  // Check for ((x & ((1<<(32-c0))-1)) << c0) which ANDs off high bits
  // before shifting them away.
  const jint bits_mask = right_n_bits(BitsPerJavaInteger-con);
  if( add1_op == Op_AndI &&
      phase->type(add1->in(2)) == TypeInt::make( bits_mask ) )
    return new (phase->C) LShiftINode( add1->in(1), in(2) );

  return NULL;
}

//------------------------------Value------------------------------------------
// A LShiftINode shifts its input2 left by input1 amount.
const Type *LShiftINode::Value( PhaseTransform *phase ) const {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if( (t1 == TypeInt::INT) || (t2 == TypeInt::INT) ||
      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
    return TypeInt::INT;

  const TypeInt *r1 = t1->is_int(); // Handy access
  const TypeInt *r2 = t2->is_int(); // Handy access

  if (!r2->is_con())
    return TypeInt::INT;

  uint shift = r2->get_con();
  shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
  // Shift by a multiple of 32 does nothing:
  if (shift == 0)  return t1;

  // If the shift is a constant, shift the bounds of the type,
  // unless this could lead to an overflow.
  if (!r1->is_con()) {
    jint lo = r1->_lo, hi = r1->_hi;
    if (((lo << shift) >> shift) == lo &&
        ((hi << shift) >> shift) == hi) {
      // No overflow.  The range shifts up cleanly.
      return TypeInt::make((jint)lo << (jint)shift,
                           (jint)hi << (jint)shift,
                           MAX2(r1->_widen,r2->_widen));
    }
    return TypeInt::INT;
  }

  return TypeInt::make( (jint)r1->get_con() << (jint)shift );
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *LShiftLNode::Identity( PhaseTransform *phase ) {
  const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
  return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}

//------------------------------Ideal------------------------------------------
// If the right input is a constant, and the left input is an add of a
// constant, flatten the tree: (X+con1)<<con0 ==> X<<con0 + con1<<con0
Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  const Type *t  = phase->type( in(2) );
  if( t == Type::TOP ) return NULL;       // Right input is dead
  const TypeInt *t2 = t->isa_int();
  if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
  const int con = t2->get_con() & ( BitsPerLong - 1 );  // masked shift count

  if ( con == 0 ) return NULL;  // let Identity() handle 0 shift count

  // Left input is an add of a constant?
  Node *add1 = in(1);
  int add1_op = add1->Opcode();
  if( add1_op == Op_AddL ) {    // Left input is an add?
    // Avoid dead data cycles from dead loops
    assert( add1 != add1->in(1), "dead loop in LShiftLNode::Ideal" );
    const TypeLong *t12 = phase->type(add1->in(2))->isa_long();
    if( t12 && t12->is_con() ){ // Left input is an add of a con?
      // Compute X << con0
      Node *lsh = phase->transform( new (phase->C) LShiftLNode( add1->in(1), in(2) ) );
      // Compute X<<con0 + (con1<<con0)
      return new (phase->C) AddLNode( lsh, phase->longcon(t12->get_con() << con));
    }
  }

  // Check for "(x>>c0)<<c0" which just masks off low bits
  if( (add1_op == Op_RShiftL || add1_op == Op_URShiftL ) &&
      add1->in(2) == in(2) )
    // Convert to "(x & -(1<<c0))"
    return new (phase->C) AndLNode(add1->in(1),phase->longcon( -(CONST64(1)<<con)));

  // Check for "((x>>c0) & Y)<<c0" which just masks off more low bits
  if( add1_op == Op_AndL ) {
    Node *add2 = add1->in(1);
    int add2_op = add2->Opcode();
    if( (add2_op == Op_RShiftL || add2_op == Op_URShiftL ) &&
        add2->in(2) == in(2) ) {
      // Convert to "(x & (Y<<c0))"
      Node *y_sh = phase->transform( new (phase->C) LShiftLNode( add1->in(2), in(2) ) );
      return new (phase->C) AndLNode( add2->in(1), y_sh );
    }
  }

  // Check for ((x & ((CONST64(1)<<(64-c0))-1)) << c0) which ANDs off high bits
  // before shifting them away.
  const jlong bits_mask = jlong(max_julong >> con);
  if( add1_op == Op_AndL &&
      phase->type(add1->in(2)) == TypeLong::make( bits_mask ) )
    return new (phase->C) LShiftLNode( add1->in(1), in(2) );

  return NULL;
}

//------------------------------Value------------------------------------------
// A LShiftLNode shifts its input2 left by input1 amount.
const Type *LShiftLNode::Value( PhaseTransform *phase ) const {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if( (t1 == TypeLong::LONG) || (t2 == TypeInt::INT) ||
      (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
    return TypeLong::LONG;

  const TypeLong *r1 = t1->is_long(); // Handy access
  const TypeInt  *r2 = t2->is_int();  // Handy access

  if (!r2->is_con())
    return TypeLong::LONG;

  uint shift = r2->get_con();
  shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
  // Shift by a multiple of 64 does nothing:
  if (shift == 0)  return t1;

  // If the shift is a constant, shift the bounds of the type,
  // unless this could lead to an overflow.
  if (!r1->is_con()) {
    jlong lo = r1->_lo, hi = r1->_hi;
    if (((lo << shift) >> shift) == lo &&
        ((hi << shift) >> shift) == hi) {
      // No overflow.  The range shifts up cleanly.
      return TypeLong::make((jlong)lo << (jint)shift,
                            (jlong)hi << (jint)shift,
                            MAX2(r1->_widen,r2->_widen));
    }
    return TypeLong::LONG;
  }

  return TypeLong::make( (jlong)r1->get_con() << (jint)shift );
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *RShiftINode::Identity( PhaseTransform *phase ) {
  const TypeInt *t2 = phase->type(in(2))->isa_int();
  if( !t2 ) return this;
  if ( t2->is_con() && ( t2->get_con() & ( BitsPerInt - 1 ) ) == 0 )
    return in(1);

  // Check for useless sign-masking
  if( in(1)->Opcode() == Op_LShiftI &&
      in(1)->req() == 3 &&
      in(1)->in(2) == in(2) &&
      t2->is_con() ) {
    uint shift = t2->get_con();
    shift &= BitsPerJavaInteger-1; // semantics of Java shifts
    // Compute masks for which this shifting doesn't change
    int lo = (-1 << (BitsPerJavaInteger - shift-1)); // FFFF8000
    int hi = ~lo;               // 00007FFF
    const TypeInt *t11 = phase->type(in(1)->in(1))->isa_int();
    if( !t11 ) return this;
    // Does actual value fit inside of mask?
    if( lo <= t11->_lo && t11->_hi <= hi )
      return in(1)->in(1);      // Then shifting is a nop
  }

  return this;
}

//------------------------------Ideal------------------------------------------
Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
  // Inputs may be TOP if they are dead.
  const TypeInt *t1 = phase->type( in(1) )->isa_int();
  if( !t1 ) return NULL;        // Left input is an integer
  const TypeInt *t2 = phase->type( in(2) )->isa_int();
  if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
  const TypeInt *t3;  // type of in(1).in(2)
  int shift = t2->get_con();
  shift &= BitsPerJavaInteger-1;  // semantics of Java shifts

  if ( shift == 0 ) return NULL;  // let Identity() handle 0 shift count

  // Check for (x & 0xFF000000) >> 24, whose mask can be made smaller.
  // Such expressions arise normally from shift chains like (byte)(x >> 24).
  const Node *mask = in(1);
  if( mask->Opcode() == Op_AndI &&
      (t3 = phase->type(mask->in(2))->isa_int()) &&
      t3->is_con() ) {
    Node *x = mask->in(1);
    jint maskbits = t3->get_con();
    // Convert to "(x >> shift) & (mask >> shift)"
    Node *shr_nomask = phase->transform( new (phase->C) RShiftINode(mask->in(1), in(2)) );
    return new (phase->C) AndINode(shr_nomask, phase->intcon( maskbits >> shift));
  }

  // Check for "(short[i] <<16)>>16" which simply sign-extends
  const Node *shl = in(1);
  if( shl->Opcode() != Op_LShiftI ) return NULL;

  if( shift == 16 &&
      (t3 = phase->type(shl->in(2))->isa_int()) &&
      t3->is_con(16) ) {
    Node *ld = shl->in(1);
    if( ld->Opcode() == Op_LoadS ) {
      // Sign extension is just useless here.  Return a RShiftI of zero instead
      // returning 'ld' directly.  We cannot return an old Node directly as
      // that is the job of 'Identity' calls and Identity calls only work on
      // direct inputs ('ld' is an extra Node removed from 'this').  The
      // combined optimization requires Identity only return direct inputs.
      set_req(1, ld);
      set_req(2, phase->intcon(0));
      return this;
    }
    else if( can_reshape &&
             ld->Opcode() == Op_LoadUS &&
             ld->outcnt() == 1 && ld->unique_out() == shl)
      // Replace zero-extension-load with sign-extension-load
      return new (phase->C) LoadSNode( ld->in(MemNode::Control),
                                       ld->in(MemNode::Memory),
                                       ld->in(MemNode::Address),
                                       ld->adr_type(), TypeInt::SHORT,
                                       MemNode::unordered);
  }

  // Check for "(byte[i] <<24)>>24" which simply sign-extends
  if( shift == 24 &&
      (t3 = phase->type(shl->in(2))->isa_int()) &&
      t3->is_con(24) ) {
    Node *ld = shl->in(1);
    if( ld->Opcode() == Op_LoadB ) {
      // Sign extension is just useless here
      set_req(1, ld);
      set_req(2, phase->intcon(0));
      return this;
    }
  }

  return NULL;
}

//------------------------------Value------------------------------------------
// A RShiftINode shifts its input2 right by input1 amount.
const Type *RShiftINode::Value( PhaseTransform *phase ) const {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
    return TypeInt::INT;

  if (t2 == TypeInt::INT)
    return TypeInt::INT;

  const TypeInt *r1 = t1->is_int(); // Handy access
  const TypeInt *r2 = t2->is_int(); // Handy access

  // If the shift is a constant, just shift the bounds of the type.
  // For example, if the shift is 31, we just propagate sign bits.
  if (r2->is_con()) {
    uint shift = r2->get_con();
    shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
    // Shift by a multiple of 32 does nothing:
    if (shift == 0)  return t1;
    // Calculate reasonably aggressive bounds for the result.
    // This is necessary if we are to correctly type things
    // like (x<<24>>24) == ((byte)x).
    jint lo = (jint)r1->_lo >> (jint)shift;
    jint hi = (jint)r1->_hi >> (jint)shift;
    assert(lo <= hi, "must have valid bounds");
    const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
#ifdef ASSERT
    // Make sure we get the sign-capture idiom correct.
    if (shift == BitsPerJavaInteger-1) {
      if (r1->_lo >= 0) assert(ti == TypeInt::ZERO,    ">>31 of + is  0");
      if (r1->_hi <  0) assert(ti == TypeInt::MINUS_1, ">>31 of - is -1");
    }
#endif
    return ti;
  }

  if( !r1->is_con() || !r2->is_con() )
    return TypeInt::INT;

  // Signed shift right
  return TypeInt::make( r1->get_con() >> (r2->get_con()&31) );
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *RShiftLNode::Identity( PhaseTransform *phase ) {
  const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
  return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}

//------------------------------Value------------------------------------------
// A RShiftLNode shifts its input2 right by input1 amount.
const Type *RShiftLNode::Value( PhaseTransform *phase ) const {
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
    return TypeLong::LONG;

  if (t2 == TypeInt::INT)
    return TypeLong::LONG;

  const TypeLong *r1 = t1->is_long(); // Handy access
  const TypeInt  *r2 = t2->is_int (); // Handy access

  // If the shift is a constant, just shift the bounds of the type.
  // For example, if the shift is 63, we just propagate sign bits.
  if (r2->is_con()) {
    uint shift = r2->get_con();
    shift &= (2*BitsPerJavaInteger)-1;  // semantics of Java shifts
    // Shift by a multiple of 64 does nothing:
    if (shift == 0)  return t1;
    // Calculate reasonably aggressive bounds for the result.
    // This is necessary if we are to correctly type things
    // like (x<<24>>24) == ((byte)x).
    jlong lo = (jlong)r1->_lo >> (jlong)shift;
    jlong hi = (jlong)r1->_hi >> (jlong)shift;
    assert(lo <= hi, "must have valid bounds");
    const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
    #ifdef ASSERT
    // Make sure we get the sign-capture idiom correct.
    if (shift == (2*BitsPerJavaInteger)-1) {
      if (r1->_lo >= 0) assert(tl == TypeLong::ZERO,    ">>63 of + is 0");
      if (r1->_hi < 0)  assert(tl == TypeLong::MINUS_1, ">>63 of - is -1");
    }
    #endif
    return tl;
  }

  return TypeLong::LONG;                // Give up
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *URShiftINode::Identity( PhaseTransform *phase ) {
  const TypeInt *ti = phase->type( in(2) )->isa_int();
  if ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerInt - 1 ) ) == 0 ) return in(1);

  // Check for "((x << LogBytesPerWord) + (wordSize-1)) >> LogBytesPerWord" which is just "x".
  // Happens during new-array length computation.
  // Safe if 'x' is in the range [0..(max_int>>LogBytesPerWord)]
  Node *add = in(1);
  if( add->Opcode() == Op_AddI ) {
    const TypeInt *t2  = phase->type(add->in(2))->isa_int();
    if( t2 && t2->is_con(wordSize - 1) &&
        add->in(1)->Opcode() == Op_LShiftI ) {
      // Check that shift_counts are LogBytesPerWord
      Node          *lshift_count   = add->in(1)->in(2);
      const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int();
      if( t_lshift_count && t_lshift_count->is_con(LogBytesPerWord) &&
          t_lshift_count == phase->type(in(2)) ) {
        Node          *x   = add->in(1)->in(1);
        const TypeInt *t_x = phase->type(x)->isa_int();
        if( t_x != NULL && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord) ) {
          return x;
        }
      }
    }
  }

  return (phase->type(in(2))->higher_equal(TypeInt::ZERO)) ? in(1) : this;
}

//------------------------------Ideal------------------------------------------
Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
  const TypeInt *t2 = phase->type( in(2) )->isa_int();
  if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
  const int con = t2->get_con() & 31; // Shift count is always masked
  if ( con == 0 ) return NULL;  // let Identity() handle a 0 shift count
  // We'll be wanting the right-shift amount as a mask of that many bits
  const int mask = right_n_bits(BitsPerJavaInteger - con);

  int in1_op = in(1)->Opcode();

  // Check for ((x>>>a)>>>b) and replace with (x>>>(a+b)) when a+b < 32
  if( in1_op == Op_URShiftI ) {
    const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int();
    if( t12 && t12->is_con() ) { // Right input is a constant
      assert( in(1) != in(1)->in(1), "dead loop in URShiftINode::Ideal" );
      const int con2 = t12->get_con() & 31; // Shift count is always masked
      const int con3 = con+con2;
      if( con3 < 32 )           // Only merge shifts if total is < 32
        return new (phase->C) URShiftINode( in(1)->in(1), phase->intcon(con3) );
    }
  }

  // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
  // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
  // If Q is "X << z" the rounding is useless.  Look for patterns like
  // ((X<<Z) + Y) >>> Z  and replace with (X + Y>>>Z) & Z-mask.
  Node *add = in(1);
  if( in1_op == Op_AddI ) {
    Node *lshl = add->in(1);
    if( lshl->Opcode() == Op_LShiftI &&
        phase->type(lshl->in(2)) == t2 ) {
      Node *y_z = phase->transform( new (phase->C) URShiftINode(add->in(2),in(2)) );
      Node *sum = phase->transform( new (phase->C) AddINode( lshl->in(1), y_z ) );
      return new (phase->C) AndINode( sum, phase->intcon(mask) );
    }
  }

  // Check for (x & mask) >>> z.  Replace with (x >>> z) & (mask >>> z)
  // This shortens the mask.  Also, if we are extracting a high byte and
  // storing it to a buffer, the mask will be removed completely.
  Node *andi = in(1);
  if( in1_op == Op_AndI ) {
    const TypeInt *t3 = phase->type( andi->in(2) )->isa_int();
    if( t3 && t3->is_con() ) { // Right input is a constant
      jint mask2 = t3->get_con();
      mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
      Node *newshr = phase->transform( new (phase->C) URShiftINode(andi->in(1), in(2)) );
      return new (phase->C) AndINode(newshr, phase->intcon(mask2));
      // The negative values are easier to materialize than positive ones.
      // A typical case from address arithmetic is ((x & ~15) >> 4).
      // It's better to change that to ((x >> 4) & ~0) versus
      // ((x >> 4) & 0x0FFFFFFF).  The difference is greatest in LP64.
    }
  }

  // Check for "(X << z ) >>> z" which simply zero-extends
  Node *shl = in(1);
  if( in1_op == Op_LShiftI &&
      phase->type(shl->in(2)) == t2 )
    return new (phase->C) AndINode( shl->in(1), phase->intcon(mask) );

  return NULL;
}

//------------------------------Value------------------------------------------
// A URShiftINode shifts its input2 right by input1 amount.
const Type *URShiftINode::Value( PhaseTransform *phase ) const {
  // (This is a near clone of RShiftINode::Value.)
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
    return TypeInt::INT;

  if (t2 == TypeInt::INT)
    return TypeInt::INT;

  const TypeInt *r1 = t1->is_int();     // Handy access
  const TypeInt *r2 = t2->is_int();     // Handy access

  if (r2->is_con()) {
    uint shift = r2->get_con();
    shift &= BitsPerJavaInteger-1;  // semantics of Java shifts
    // Shift by a multiple of 32 does nothing:
    if (shift == 0)  return t1;
    // Calculate reasonably aggressive bounds for the result.
    jint lo = (juint)r1->_lo >> (juint)shift;
    jint hi = (juint)r1->_hi >> (juint)shift;
    if (r1->_hi >= 0 && r1->_lo < 0) {
      // If the type has both negative and positive values,
      // there are two separate sub-domains to worry about:
      // The positive half and the negative half.
      jint neg_lo = lo;
      jint neg_hi = (juint)-1 >> (juint)shift;
      jint pos_lo = (juint) 0 >> (juint)shift;
      jint pos_hi = hi;
      lo = MIN2(neg_lo, pos_lo);  // == 0
      hi = MAX2(neg_hi, pos_hi);  // == -1 >>> shift;
    }
    assert(lo <= hi, "must have valid bounds");
    const TypeInt* ti = TypeInt::make(lo, hi, MAX2(r1->_widen,r2->_widen));
    #ifdef ASSERT
    // Make sure we get the sign-capture idiom correct.
    if (shift == BitsPerJavaInteger-1) {
      if (r1->_lo >= 0) assert(ti == TypeInt::ZERO, ">>>31 of + is 0");
      if (r1->_hi < 0)  assert(ti == TypeInt::ONE,  ">>>31 of - is +1");
    }
    #endif
    return ti;
  }

  //
  // Do not support shifted oops in info for GC
  //
  // else if( t1->base() == Type::InstPtr ) {
  //
  //   const TypeInstPtr *o = t1->is_instptr();
  //   if( t1->singleton() )
  //     return TypeInt::make( ((uint32)o->const_oop() + o->_offset) >> shift );
  // }
  // else if( t1->base() == Type::KlassPtr ) {
  //   const TypeKlassPtr *o = t1->is_klassptr();
  //   if( t1->singleton() )
  //     return TypeInt::make( ((uint32)o->const_oop() + o->_offset) >> shift );
  // }

  return TypeInt::INT;
}

//=============================================================================
//------------------------------Identity---------------------------------------
Node *URShiftLNode::Identity( PhaseTransform *phase ) {
  const TypeInt *ti = phase->type( in(2) )->isa_int(); // shift count is an int
  return ( ti && ti->is_con() && ( ti->get_con() & ( BitsPerLong - 1 ) ) == 0 ) ? in(1) : this;
}

//------------------------------Ideal------------------------------------------
Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  const TypeInt *t2 = phase->type( in(2) )->isa_int();
  if( !t2 || !t2->is_con() ) return NULL; // Right input is a constant
  const int con = t2->get_con() & ( BitsPerLong - 1 ); // Shift count is always masked
  if ( con == 0 ) return NULL;  // let Identity() handle a 0 shift count
                              // note: mask computation below does not work for 0 shift count
  // We'll be wanting the right-shift amount as a mask of that many bits
  const jlong mask = jlong(max_julong >> con);

  // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
  // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
  // If Q is "X << z" the rounding is useless.  Look for patterns like
  // ((X<<Z) + Y) >>> Z  and replace with (X + Y>>>Z) & Z-mask.
  Node *add = in(1);
  if( add->Opcode() == Op_AddL ) {
    Node *lshl = add->in(1);
    if( lshl->Opcode() == Op_LShiftL &&
        phase->type(lshl->in(2)) == t2 ) {
      Node *y_z = phase->transform( new (phase->C) URShiftLNode(add->in(2),in(2)) );
      Node *sum = phase->transform( new (phase->C) AddLNode( lshl->in(1), y_z ) );
      return new (phase->C) AndLNode( sum, phase->longcon(mask) );
    }
  }

  // Check for (x & mask) >>> z.  Replace with (x >>> z) & (mask >>> z)
  // This shortens the mask.  Also, if we are extracting a high byte and
  // storing it to a buffer, the mask will be removed completely.
  Node *andi = in(1);
  if( andi->Opcode() == Op_AndL ) {
    const TypeLong *t3 = phase->type( andi->in(2) )->isa_long();
    if( t3 && t3->is_con() ) { // Right input is a constant
      jlong mask2 = t3->get_con();
      mask2 >>= con;  // *signed* shift downward (high-order zeroes do not help)
      Node *newshr = phase->transform( new (phase->C) URShiftLNode(andi->in(1), in(2)) );
      return new (phase->C) AndLNode(newshr, phase->longcon(mask2));
    }
  }

  // Check for "(X << z ) >>> z" which simply zero-extends
  Node *shl = in(1);
  if( shl->Opcode() == Op_LShiftL &&
      phase->type(shl->in(2)) == t2 )
    return new (phase->C) AndLNode( shl->in(1), phase->longcon(mask) );

  return NULL;
}

//------------------------------Value------------------------------------------
// A URShiftINode shifts its input2 right by input1 amount.
const Type *URShiftLNode::Value( PhaseTransform *phase ) const {
  // (This is a near clone of RShiftLNode::Value.)
  const Type *t1 = phase->type( in(1) );
  const Type *t2 = phase->type( in(2) );
  // Either input is TOP ==> the result is TOP
  if( t1 == Type::TOP ) return Type::TOP;
  if( t2 == Type::TOP ) return Type::TOP;

  // Left input is ZERO ==> the result is ZERO.
  if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
  // Shift by zero does nothing
  if( t2 == TypeInt::ZERO ) return t1;

  // Either input is BOTTOM ==> the result is BOTTOM
  if (t1 == Type::BOTTOM || t2 == Type::BOTTOM)
    return TypeLong::LONG;

  if (t2 == TypeInt::INT)
    return TypeLong::LONG;

  const TypeLong *r1 = t1->is_long(); // Handy access
  const TypeInt  *r2 = t2->is_int (); // Handy access

  if (r2->is_con()) {
    uint shift = r2->get_con();
    shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
    // Shift by a multiple of 64 does nothing:
    if (shift == 0)  return t1;
    // Calculate reasonably aggressive bounds for the result.
    jlong lo = (julong)r1->_lo >> (juint)shift;
    jlong hi = (julong)r1->_hi >> (juint)shift;
    if (r1->_hi >= 0 && r1->_lo < 0) {
      // If the type has both negative and positive values,
      // there are two separate sub-domains to worry about:
      // The positive half and the negative half.
      jlong neg_lo = lo;
      jlong neg_hi = (julong)-1 >> (juint)shift;
      jlong pos_lo = (julong) 0 >> (juint)shift;
      jlong pos_hi = hi;
      //lo = MIN2(neg_lo, pos_lo);  // == 0
      lo = neg_lo < pos_lo ? neg_lo : pos_lo;
      //hi = MAX2(neg_hi, pos_hi);  // == -1 >>> shift;
      hi = neg_hi > pos_hi ? neg_hi : pos_hi;
    }
    assert(lo <= hi, "must have valid bounds");
    const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
    #ifdef ASSERT
    // Make sure we get the sign-capture idiom correct.
    if (shift == BitsPerJavaLong - 1) {
      if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0");
      if (r1->_hi < 0)  assert(tl == TypeLong::ONE,  ">>>63 of - is +1");
    }
    #endif
    return tl;
  }

  return TypeLong::LONG;                // Give up
}
C:\hotspot-69087d08d473\src\share\vm/opto/mulnode.hpp
/*
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_MULNODE_HPP
#define SHARE_VM_OPTO_MULNODE_HPP

#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/type.hpp"

// Portions of code courtesy of Clifford Click

class PhaseTransform;

//------------------------------MulNode----------------------------------------
// Classic MULTIPLY functionality.  This covers all the usual 'multiply'
// behaviors for an algebraic ring.  Multiply-integer, multiply-float,
// multiply-double, and binary-and are all inherited from this class.  The
// various identity values are supplied by virtual functions.
class MulNode : public Node {
  virtual uint hash() const;
public:
  MulNode( Node *in1, Node *in2 ): Node(0,in1,in2) {
    init_class_id(Class_Mul);
  }

  // Handle algebraic identities here.  If we have an identity, return the Node
  // we are equivalent to.  We look for "add of zero" as an identity.
  virtual Node *Identity( PhaseTransform *phase );

  // We also canonicalize the Node, moving constants to the right input,
  // and flatten expressions (so that 1+x+2 becomes x+3).
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);

  // Compute a new Type for this node.  Basically we just do the pre-check,
  // then call the virtual add() to set the type.
  virtual const Type *Value( PhaseTransform *phase ) const;

  // Supplied function returns the product of the inputs.
  // This also type-checks the inputs for sanity.  Guaranteed never to
  // be passed a TOP or BOTTOM type, these are filtered out by a pre-check.
  // This call recognizes the multiplicative zero type.
  virtual const Type *mul_ring( const Type *, const Type * ) const = 0;

  // Supplied function to return the multiplicative identity type
  virtual const Type *mul_id() const = 0;

  // Supplied function to return the additive identity type
  virtual const Type *add_id() const = 0;

  // Supplied function to return the additive opcode
  virtual int add_opcode() const = 0;

  // Supplied function to return the multiplicative opcode
  virtual int mul_opcode() const = 0;

};

//------------------------------MulINode---------------------------------------
// Multiply 2 integers
class MulINode : public MulNode {
public:
  MulINode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeInt::ONE; }
  const Type *add_id() const { return TypeInt::ZERO; }
  int add_opcode() const { return Op_AddI; }
  int mul_opcode() const { return Op_MulI; }
  const Type *bottom_type() const { return TypeInt::INT; }
  virtual uint ideal_reg() const { return Op_RegI; }
};

//------------------------------MulLNode---------------------------------------
// Multiply 2 longs
class MulLNode : public MulNode {
public:
  MulLNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeLong::ONE; }
  const Type *add_id() const { return TypeLong::ZERO; }
  int add_opcode() const { return Op_AddL; }
  int mul_opcode() const { return Op_MulL; }
  const Type *bottom_type() const { return TypeLong::LONG; }
  virtual uint ideal_reg() const { return Op_RegL; }
};


//------------------------------MulFNode---------------------------------------
// Multiply 2 floats
class MulFNode : public MulNode {
public:
  MulFNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
  virtual int Opcode() const;
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeF::ONE; }
  const Type *add_id() const { return TypeF::ZERO; }
  int add_opcode() const { return Op_AddF; }
  int mul_opcode() const { return Op_MulF; }
  const Type *bottom_type() const { return Type::FLOAT; }
  virtual uint ideal_reg() const { return Op_RegF; }
};

//------------------------------MulDNode---------------------------------------
// Multiply 2 doubles
class MulDNode : public MulNode {
public:
  MulDNode( Node *in1, Node *in2 ) : MulNode(in1,in2) {}
  virtual int Opcode() const;
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeD::ONE; }
  const Type *add_id() const { return TypeD::ZERO; }
  int add_opcode() const { return Op_AddD; }
  int mul_opcode() const { return Op_MulD; }
  const Type *bottom_type() const { return Type::DOUBLE; }
  virtual uint ideal_reg() const { return Op_RegD; }
};

//-------------------------------MulHiLNode------------------------------------
// Upper 64 bits of a 64 bit by 64 bit multiply
class MulHiLNode : public Node {
public:
  MulHiLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeLong::LONG; }
  virtual uint ideal_reg() const { return Op_RegL; }
};

//------------------------------AndINode---------------------------------------
// Logically AND 2 integers.  Included with the MUL nodes because it inherits
// all the behavior of multiplication on a ring.
class AndINode : public MulINode {
public:
  AndINode( Node *in1, Node *in2 ) : MulINode(in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual Node *Identity( PhaseTransform *phase );
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeInt::MINUS_1; }
  const Type *add_id() const { return TypeInt::ZERO; }
  int add_opcode() const { return Op_OrI; }
  int mul_opcode() const { return Op_AndI; }
  virtual uint ideal_reg() const { return Op_RegI; }
};

//------------------------------AndINode---------------------------------------
// Logically AND 2 longs.  Included with the MUL nodes because it inherits
// all the behavior of multiplication on a ring.
class AndLNode : public MulLNode {
public:
  AndLNode( Node *in1, Node *in2 ) : MulLNode(in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual Node *Identity( PhaseTransform *phase );
  virtual const Type *mul_ring( const Type *, const Type * ) const;
  const Type *mul_id() const { return TypeLong::MINUS_1; }
  const Type *add_id() const { return TypeLong::ZERO; }
  int add_opcode() const { return Op_OrL; }
  int mul_opcode() const { return Op_AndL; }
  virtual uint ideal_reg() const { return Op_RegL; }
};

//------------------------------LShiftINode------------------------------------
// Logical shift left
class LShiftINode : public Node {
public:
  LShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeInt::INT; }
  virtual uint ideal_reg() const { return Op_RegI; }
};

//------------------------------LShiftLNode------------------------------------
// Logical shift left
class LShiftLNode : public Node {
public:
  LShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeLong::LONG; }
  virtual uint ideal_reg() const { return Op_RegL; }
};

//------------------------------RShiftINode------------------------------------
// Signed shift right
class RShiftINode : public Node {
public:
  RShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeInt::INT; }
  virtual uint ideal_reg() const { return Op_RegI; }
};

//------------------------------RShiftLNode------------------------------------
// Signed shift right
class RShiftLNode : public Node {
public:
  RShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeLong::LONG; }
  virtual uint ideal_reg() const { return Op_RegL; }
};


//------------------------------URShiftINode-----------------------------------
// Logical shift right
class URShiftINode : public Node {
public:
  URShiftINode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeInt::INT; }
  virtual uint ideal_reg() const { return Op_RegI; }
};

//------------------------------URShiftLNode-----------------------------------
// Logical shift right
class URShiftLNode : public Node {
public:
  URShiftLNode( Node *in1, Node *in2 ) : Node(0,in1,in2) {}
  virtual int Opcode() const;
  virtual Node *Identity( PhaseTransform *phase );
  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  virtual const Type *Value( PhaseTransform *phase ) const;
  const Type *bottom_type() const { return TypeLong::LONG; }
  virtual uint ideal_reg() const { return Op_RegL; }
};

#endif // SHARE_VM_OPTO_MULNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/multnode.cpp
/*
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/matcher.hpp"
#include "opto/mathexactnode.hpp"
#include "opto/multnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/phaseX.hpp"
#include "opto/regmask.hpp"
#include "opto/type.hpp"

//=============================================================================
//------------------------------MultiNode--------------------------------------
const RegMask &MultiNode::out_RegMask() const {
  return RegMask::Empty;
}

Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); }

//------------------------------proj_out---------------------------------------
// Get a named projection
ProjNode* MultiNode::proj_out(uint which_proj) const {
  assert(Opcode() != Op_If || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0");
  assert(Opcode() != Op_If || outcnt() == 2, "bad if #1");
  for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
    Node *p = fast_out(i);
    if (p->is_Proj()) {
      ProjNode *proj = p->as_Proj();
      if (proj->_con == which_proj) {
        assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
        return proj;
      }
    } else {
      assert(p == this && this->is_Start(), "else must be proj");
      continue;
    }
  }
  return NULL;
}

//=============================================================================
//------------------------------ProjNode---------------------------------------
uint ProjNode::hash() const {
  // only one input
  return (uintptr_t)in(TypeFunc::Control) + (_con << 1) + (_is_io_use ? 1 : 0);
}
uint ProjNode::cmp( const Node &n ) const { return _con == ((ProjNode&)n)._con && ((ProjNode&)n)._is_io_use == _is_io_use; }
uint ProjNode::size_of() const { return sizeof(ProjNode); }

// Test if we propagate interesting control along this projection
bool ProjNode::is_CFG() const {
  Node *def = in(0);
  return (_con == TypeFunc::Control && def->is_CFG());
}

const Type* ProjNode::proj_type(const Type* t) const {
  if (t == Type::TOP) {
    return Type::TOP;
  }
  if (t == Type::BOTTOM) {
    return Type::BOTTOM;
  }
  t = t->is_tuple()->field_at(_con);
  Node* n = in(0);
  if ((_con == TypeFunc::Parms) &&
      n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
    // The result of autoboxing is always non-null on normal path.
    t = t->join_speculative(TypePtr::NOTNULL);
  }
  return t;
}

const Type *ProjNode::bottom_type() const {
  if (in(0) == NULL) return Type::TOP;
  return proj_type(in(0)->bottom_type());
}

const TypePtr *ProjNode::adr_type() const {
  if (bottom_type() == Type::MEMORY) {
    // in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM
    const TypePtr* adr_type = in(0)->adr_type();
    #ifdef ASSERT
    if (!is_error_reported() && !Node::in_dump())
      assert(adr_type != NULL, "source must have adr_type");
    #endif
    return adr_type;
  }
  assert(bottom_type()->base() != Type::Memory, "no other memories?");
  return NULL;
}

bool ProjNode::pinned() const { return in(0)->pinned(); }
#ifndef PRODUCT
void ProjNode::dump_spec(outputStream *st) const { st->print("#%d",_con); if(_is_io_use) st->print(" (i_o_use)");}
#endif

//----------------------------check_con----------------------------------------
void ProjNode::check_con() const {
  Node* n = in(0);
  if (n == NULL)       return;  // should be assert, but NodeHash makes bogons
  if (n->is_Mach())    return;  // mach. projs. are not type-safe
  if (n->is_Start())   return;  // alas, starts can have mach. projs. also
  if (_con == SCMemProjNode::SCMEMPROJCON ) return;
  const Type* t = n->bottom_type();
  if (t == Type::TOP)  return;  // multi is dead
  assert(_con < t->is_tuple()->cnt(), "ProjNode::_con must be in range");
}

//------------------------------Value------------------------------------------
const Type *ProjNode::Value( PhaseTransform *phase ) const {
  if (in(0) == NULL) return Type::TOP;
  return proj_type(phase->type(in(0)));
}

//------------------------------out_RegMask------------------------------------
// Pass the buck uphill
const RegMask &ProjNode::out_RegMask() const {
  return RegMask::Empty;
}

//------------------------------ideal_reg--------------------------------------
uint ProjNode::ideal_reg() const {
  return bottom_type()->ideal_reg();
}

//-------------------------------is_uncommon_trap_proj----------------------------
// Return true if proj is the form of "proj->[region->..]call_uct"
bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
  int path_limit = 10;
  Node* out = this;
  for (int ct = 0; ct < path_limit; ct++) {
    out = out->unique_ctrl_out();
    if (out == NULL)
      return false;
    if (out->is_CallStaticJava()) {
      int req = out->as_CallStaticJava()->uncommon_trap_request();
      if (req != 0) {
        Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
        if (trap_reason == reason || reason == Deoptimization::Reason_none) {
           return true;
        }
      }
      return false; // don't do further after call
    }
    if (out->Opcode() != Op_Region)
      return false;
  }
  return false;
}

//-------------------------------is_uncommon_trap_if_pattern-------------------------
// Return true  for "if(test)-> proj -> ...
//                          |
//                          V
//                      other_proj->[region->..]call_uct"
//
// "must_reason_predicate" means the uct reason must be Reason_predicate
bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
  Node *in0 = in(0);
  if (!in0->is_If()) return false;
  // Variation of a dead If node.
  if (in0->outcnt() < 2)  return false;
  IfNode* iff = in0->as_If();

  // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
  if (reason != Deoptimization::Reason_none) {
    if (iff->in(1)->Opcode() != Op_Conv2B ||
       iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
      return false;
    }
  }

  ProjNode* other_proj = iff->proj_out(1-_con);
  if (other_proj == NULL) // Should never happen, but make Parfait happy.
      return false;
  if (other_proj->is_uncommon_trap_proj(reason)) {
    assert(reason == Deoptimization::Reason_none ||
           Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
    return true;
  }
  return false;
}
C:\hotspot-69087d08d473\src\share\vm/opto/multnode.hpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_MULTNODE_HPP
#define SHARE_VM_OPTO_MULTNODE_HPP

#include "opto/node.hpp"

class Matcher;
class ProjNode;

//------------------------------MultiNode--------------------------------------
// This class defines a MultiNode, a Node which produces many values.  The
// values are wrapped up in a tuple Type, i.e. a TypeTuple.
class MultiNode : public Node {
public:
  MultiNode( uint required ) : Node(required) {
    init_class_id(Class_Multi);
  }
  virtual int Opcode() const;
  virtual const Type *bottom_type() const = 0;
  virtual bool       is_CFG() const { return true; }
  virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
  virtual bool depends_only_on_test() const { return false; }
  virtual const RegMask &out_RegMask() const;
  virtual Node *match( const ProjNode *proj, const Matcher *m );
  virtual uint ideal_reg() const { return NotAMachineReg; }
  ProjNode* proj_out(uint which_proj) const; // Get a named projection

};

//------------------------------ProjNode---------------------------------------
// This class defines a Projection node.  Projections project a single element
// out of a tuple (or Signature) type.  Only MultiNodes produce TypeTuple
// results.
class ProjNode : public Node {
protected:
  virtual uint hash() const;
  virtual uint cmp( const Node &n ) const;
  virtual uint size_of() const;
  void check_con() const;       // Called from constructor.
  const Type* proj_type(const Type* t) const;

public:
  ProjNode( Node *src, uint con, bool io_use = false )
    : Node( src ), _con(con), _is_io_use(io_use)
  {
    init_class_id(Class_Proj);
    // Optimistic setting. Need additional checks in Node::is_dead_loop_safe().
    if (con != TypeFunc::Memory || src->is_Start())
      init_flags(Flag_is_dead_loop_safe);
    debug_only(check_con());
  }
  const uint _con;              // The field in the tuple we are projecting
  const bool _is_io_use;        // Used to distinguish between the projections
                                // used on the control and io paths from a macro node
  virtual int Opcode() const;
  virtual bool      is_CFG() const;
  virtual bool depends_only_on_test() const { return false; }
  virtual const Type *bottom_type() const;
  virtual const TypePtr *adr_type() const;
  virtual bool pinned() const;
  virtual const Type *Value( PhaseTransform *phase ) const;
  virtual uint ideal_reg() const;
  virtual const RegMask &out_RegMask() const;

#ifndef PRODUCT
  virtual void dump_spec(outputStream *st) const;
#endif

  // Return true if proj is for "proj->[region->..]call_uct"
  bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
  // Return true for    "if(test)-> proj -> ...
  //                          |
  //                          V
  //                      other_proj->[region->..]call_uct"
  bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
};

#endif // SHARE_VM_OPTO_MULTNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/node.cpp
/*
 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/regmask.hpp"
#include "opto/type.hpp"
#include "utilities/copy.hpp"

class RegMask;
// #include "phase.hpp"
class PhaseTransform;
class PhaseGVN;

// Arena we are currently building Nodes in
const uint Node::NotAMachineReg = 0xffff0000;

#ifndef PRODUCT
extern int nodes_created;
#endif

#ifdef ASSERT

//-------------------------- construct_node------------------------------------
// Set a breakpoint here to identify where a particular node index is built.
void Node::verify_construction() {
  _debug_orig = NULL;
  int old_debug_idx = Compile::debug_idx();
  int new_debug_idx = old_debug_idx+1;
  if (new_debug_idx > 0) {
    // Arrange that the lowest five decimal digits of _debug_idx
    // will repeat those of _idx. In case this is somehow pathological,
    // we continue to assign negative numbers (!) consecutively.
    const int mod = 100000;
    int bump = (int)(_idx - new_debug_idx) % mod;
    if (bump < 0)  bump += mod;
    assert(bump >= 0 && bump < mod, "");
    new_debug_idx += bump;
  }
  Compile::set_debug_idx(new_debug_idx);
  set_debug_idx( new_debug_idx );
  assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
  assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
  if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
    tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
    BREAKPOINT;
  }
#if OPTO_DU_ITERATOR_ASSERT
  _last_del = NULL;
  _del_tick = 0;
#endif
  _hash_lock = 0;
}


// #ifdef ASSERT ...

#if OPTO_DU_ITERATOR_ASSERT
void DUIterator_Common::sample(const Node* node) {
  _vdui     = VerifyDUIterators;
  _node     = node;
  _outcnt   = node->_outcnt;
  _del_tick = node->_del_tick;
  _last     = NULL;
}

void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
  assert(_node     == node, "consistent iterator source");
  assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
}

void DUIterator_Common::verify_resync() {
  // Ensure that the loop body has just deleted the last guy produced.
  const Node* node = _node;
  // Ensure that at least one copy of the last-seen edge was deleted.
  // Note:  It is OK to delete multiple copies of the last-seen edge.
  // Unfortunately, we have no way to verify that all the deletions delete
  // that same edge.  On this point we must use the Honor System.
  assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
  assert(node->_last_del == _last, "must have deleted the edge just produced");
  // We liked this deletion, so accept the resulting outcnt and tick.
  _outcnt   = node->_outcnt;
  _del_tick = node->_del_tick;
}

void DUIterator_Common::reset(const DUIterator_Common& that) {
  if (this == &that)  return;  // ignore assignment to self
  if (!_vdui) {
    // We need to initialize everything, overwriting garbage values.
    _last = that._last;
    _vdui = that._vdui;
  }
  // Note:  It is legal (though odd) for an iterator over some node x
  // to be reassigned to iterate over another node y.  Some doubly-nested
  // progress loops depend on being able to do this.
  const Node* node = that._node;
  // Re-initialize everything, except _last.
  _node     = node;
  _outcnt   = node->_outcnt;
  _del_tick = node->_del_tick;
}

void DUIterator::sample(const Node* node) {
  DUIterator_Common::sample(node);      // Initialize the assertion data.
  _refresh_tick = 0;                    // No refreshes have happened, as yet.
}

void DUIterator::verify(const Node* node, bool at_end_ok) {
  DUIterator_Common::verify(node, at_end_ok);
  assert(_idx      <  node->_outcnt + (uint)at_end_ok, "idx in range");
}

void DUIterator::verify_increment() {
  if (_refresh_tick & 1) {
    // We have refreshed the index during this loop.
    // Fix up _idx to meet asserts.
    if (_idx > _outcnt)  _idx = _outcnt;
  }
  verify(_node, true);
}

void DUIterator::verify_resync() {
  // Note:  We do not assert on _outcnt, because insertions are OK here.
  DUIterator_Common::verify_resync();
  // Make sure we are still in sync, possibly with no more out-edges:
  verify(_node, true);
}

void DUIterator::reset(const DUIterator& that) {
  if (this == &that)  return;  // self assignment is always a no-op
  assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
  assert(that._idx          == 0, "assign only the result of Node::outs()");
  assert(_idx               == that._idx, "already assigned _idx");
  if (!_vdui) {
    // We need to initialize everything, overwriting garbage values.
    sample(that._node);
  } else {
    DUIterator_Common::reset(that);
    if (_refresh_tick & 1) {
      _refresh_tick++;                  // Clear the "was refreshed" flag.
    }
    assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
  }
}

void DUIterator::refresh() {
  DUIterator_Common::sample(_node);     // Re-fetch assertion data.
  _refresh_tick |= 1;                   // Set the "was refreshed" flag.
}

void DUIterator::verify_finish() {
  // If the loop has killed the node, do not require it to re-run.
  if (_node->_outcnt == 0)  _refresh_tick &= ~1;
  // If this assert triggers, it means that a loop used refresh_out_pos
  // to re-synch an iteration index, but the loop did not correctly
  // re-run itself, using a "while (progress)" construct.
  // This iterator enforces the rule that you must keep trying the loop
  // until it "runs clean" without any need for refreshing.
  assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
}


void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
  DUIterator_Common::verify(node, at_end_ok);
  Node** out    = node->_out;
  uint   cnt    = node->_outcnt;
  assert(cnt == _outcnt, "no insertions allowed");
  assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
  // This last check is carefully designed to work for NO_OUT_ARRAY.
}

void DUIterator_Fast::verify_limit() {
  const Node* node = _node;
  verify(node, true);
  assert(_outp == node->_out + node->_outcnt, "limit still correct");
}

void DUIterator_Fast::verify_resync() {
  const Node* node = _node;
  if (_outp == node->_out + _outcnt) {
    // Note that the limit imax, not the pointer i, gets updated with the
    // exact count of deletions.  (For the pointer it's always "--i".)
    assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
    // This is a limit pointer, with a name like "imax".
    // Fudge the _last field so that the common assert will be happy.
    _last = (Node*) node->_last_del;
    DUIterator_Common::verify_resync();
  } else {
    assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
    // A normal internal pointer.
    DUIterator_Common::verify_resync();
    // Make sure we are still in sync, possibly with no more out-edges:
    verify(node, true);
  }
}

void DUIterator_Fast::verify_relimit(uint n) {
  const Node* node = _node;
  assert((int)n > 0, "use imax -= n only with a positive count");
  // This must be a limit pointer, with a name like "imax".
  assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
  // The reported number of deletions must match what the node saw.
  assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
  // Fudge the _last field so that the common assert will be happy.
  _last = (Node*) node->_last_del;
  DUIterator_Common::verify_resync();
}

void DUIterator_Fast::reset(const DUIterator_Fast& that) {
  assert(_outp              == that._outp, "already assigned _outp");
  DUIterator_Common::reset(that);
}

void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
  // at_end_ok means the _outp is allowed to underflow by 1
  _outp += at_end_ok;
  DUIterator_Fast::verify(node, at_end_ok);  // check _del_tick, etc.
  _outp -= at_end_ok;
  assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
}

void DUIterator_Last::verify_limit() {
  // Do not require the limit address to be resynched.
  //verify(node, true);
  assert(_outp == _node->_out, "limit still correct");
}

void DUIterator_Last::verify_step(uint num_edges) {
  assert((int)num_edges > 0, "need non-zero edge count for loop progress");
  _outcnt   -= num_edges;
  _del_tick += num_edges;
  // Make sure we are still in sync, possibly with no more out-edges:
  const Node* node = _node;
  verify(node, true);
  assert(node->_last_del == _last, "must have deleted the edge just produced");
}

#endif //OPTO_DU_ITERATOR_ASSERT


#endif //ASSERT


// This constant used to initialize _out may be any non-null value.
// The value NULL is reserved for the top node only.
#define NO_OUT_ARRAY ((Node**)-1)

// This funny expression handshakes with Node::operator new
// to pull Compile::current out of the new node's _out field,
// and then calls a subroutine which manages most field
// initializations.  The only one which is tricky is the
// _idx field, which is const, and so must be initialized
// by a return value, not an assignment.
//
// (Aren't you thankful that Java finals don't require so many tricks?)
#define IDX_INIT(req) this->Init((req), (Compile*) this->_out)
#ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma GCC diagnostic ignored "-Wuninitialized"
#endif

// Out-of-line code from node constructors.
// Executed only when extra debug info. is being passed around.
static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
  C->set_node_notes_at(idx, nn);
}

// Shared initialization code.
inline int Node::Init(int req, Compile* C) {
  assert(Compile::current() == C, "must use operator new(Compile*)");
  int idx = C->next_unique();

  // Allocate memory for the necessary number of edges.
  if (req > 0) {
    // Allocate space for _in array to have double alignment.
    _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
#ifdef ASSERT
    _in[req-1] = this; // magic cookie for assertion check
#endif
  }
  // If there are default notes floating around, capture them:
  Node_Notes* nn = C->default_node_notes();
  if (nn != NULL)  init_node_notes(C, idx, nn);

  // Note:  At this point, C is dead,
  // and we begin to initialize the new Node.

  _cnt = _max = req;
  _outcnt = _outmax = 0;
  _class_id = Class_Node;
  _flags = 0;
  _out = NO_OUT_ARRAY;
  return idx;
}

//------------------------------Node-------------------------------------------
// Create a Node, with a given number of required edges.
Node::Node(uint req)
  : _idx(IDX_INIT(req))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  if (req == 0) {
    assert( _in == (Node**)this, "Must not pass arg count to 'new'" );
    _in = NULL;
  } else {
    assert( _in[req-1] == this, "Must pass arg count to 'new'" );
    Node** to = _in;
    for(uint i = 0; i < req; i++) {
      to[i] = NULL;
    }
  }
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0)
  : _idx(IDX_INIT(1))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[0] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1)
  : _idx(IDX_INIT(2))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[1] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2)
  : _idx(IDX_INIT(3))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[2] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  assert( is_not_dead(n2), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
  _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
  : _idx(IDX_INIT(4))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[3] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  assert( is_not_dead(n2), "can not use dead node");
  assert( is_not_dead(n3), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
  _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
  _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
  : _idx(IDX_INIT(5))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[4] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  assert( is_not_dead(n2), "can not use dead node");
  assert( is_not_dead(n3), "can not use dead node");
  assert( is_not_dead(n4), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
  _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
  _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
  _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
                     Node *n4, Node *n5)
  : _idx(IDX_INIT(6))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[5] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  assert( is_not_dead(n2), "can not use dead node");
  assert( is_not_dead(n3), "can not use dead node");
  assert( is_not_dead(n4), "can not use dead node");
  assert( is_not_dead(n5), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
  _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
  _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
  _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
  _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
}

//------------------------------Node-------------------------------------------
Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
                     Node *n4, Node *n5, Node *n6)
  : _idx(IDX_INIT(7))
#ifdef ASSERT
  , _parse_idx(_idx)
#endif
{
  debug_only( verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Assert we allocated space for input array already
  assert( _in[6] == this, "Must pass arg count to 'new'" );
  assert( is_not_dead(n0), "can not use dead node");
  assert( is_not_dead(n1), "can not use dead node");
  assert( is_not_dead(n2), "can not use dead node");
  assert( is_not_dead(n3), "can not use dead node");
  assert( is_not_dead(n4), "can not use dead node");
  assert( is_not_dead(n5), "can not use dead node");
  assert( is_not_dead(n6), "can not use dead node");
  _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
  _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
  _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
  _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
  _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
  _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
  _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
}

#ifdef __clang__
#pragma clang diagnostic pop
#endif


//------------------------------clone------------------------------------------
// Clone a Node.
Node *Node::clone() const {
  Compile* C = Compile::current();
  uint s = size_of();           // Size of inherited Node
  Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
  Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
  // Set the new input pointer array
  n->_in = (Node**)(((char*)n)+s);
  // Cannot share the old output pointer array, so kill it
  n->_out = NO_OUT_ARRAY;
  // And reset the counters to 0
  n->_outcnt = 0;
  n->_outmax = 0;
  // Unlock this guy, since he is not in any hash table.
  debug_only(n->_hash_lock = 0);
  // Walk the old node's input list to duplicate its edges
  uint i;
  for( i = 0; i < len(); i++ ) {
    Node *x = in(i);
    n->_in[i] = x;
    if (x != NULL) x->add_out(n);
  }
  if (is_macro())
    C->add_macro_node(n);
  if (is_expensive())
    C->add_expensive_node(n);
  // If the cloned node is a range check dependent CastII, add it to the list.
  CastIINode* cast = n->isa_CastII();
  if (cast != NULL && cast->has_range_check()) {
    C->add_range_check_cast(cast);
  }

  n->set_idx(C->next_unique()); // Get new unique index as well
  debug_only( n->verify_construction() );
  NOT_PRODUCT(nodes_created++);
  // Do not patch over the debug_idx of a clone, because it makes it
  // impossible to break on the clone's moment of creation.
  //debug_only( n->set_debug_idx( debug_idx() ) );

  C->copy_node_notes_to(n, (Node*) this);

  // MachNode clone
  uint nopnds;
  if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
    MachNode *mach  = n->as_Mach();
    MachNode *mthis = this->as_Mach();
    // Get address of _opnd_array.
    // It should be the same offset since it is the clone of this node.
    MachOper **from = mthis->_opnds;
    MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
                    pointer_delta((const void*)from,
                                  (const void*)(&mthis->_opnds), 1));
    mach->_opnds = to;
    for ( uint i = 0; i < nopnds; ++i ) {
      to[i] = from[i]->clone(C);
    }
  }
  // cloning CallNode may need to clone JVMState
  if (n->is_Call()) {
    n->as_Call()->clone_jvms(C);
  }
  if (n->is_SafePoint()) {
    n->as_SafePoint()->clone_replaced_nodes();
  }
  return n;                     // Return the clone
}

//---------------------------setup_is_top--------------------------------------
// Call this when changing the top node, to reassert the invariants
// required by Node::is_top.  See Compile::set_cached_top_node.
void Node::setup_is_top() {
  if (this == (Node*)Compile::current()->top()) {
    // This node has just become top.  Kill its out array.
    _outcnt = _outmax = 0;
    _out = NULL;                           // marker value for top
    assert(is_top(), "must be top");
  } else {
    if (_out == NULL)  _out = NO_OUT_ARRAY;
    assert(!is_top(), "must not be top");
  }
}


//------------------------------~Node------------------------------------------
// Fancy destructor; eagerly attempt to reclaim Node numberings and storage
extern int reclaim_idx ;
extern int reclaim_in  ;
extern int reclaim_node;
void Node::destruct() {
  // Eagerly reclaim unique Node numberings
  Compile* compile = Compile::current();
  if ((uint)_idx+1 == compile->unique()) {
    compile->set_unique(compile->unique()-1);
#ifdef ASSERT
    reclaim_idx++;
#endif
  }
  // Clear debug info:
  Node_Notes* nn = compile->node_notes_at(_idx);
  if (nn != NULL)  nn->clear();
  // Walk the input array, freeing the corresponding output edges
  _cnt = _max;  // forget req/prec distinction
  uint i;
  for( i = 0; i < _max; i++ ) {
    set_req(i, NULL);
    //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
  }
  assert(outcnt() == 0, "deleting a node must not leave a dangling use");
  // See if the input array was allocated just prior to the object
  int edge_size = _max*sizeof(void*);
  int out_edge_size = _outmax*sizeof(void*);
  char *edge_end = ((char*)_in) + edge_size;
  char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
  char *out_edge_end = out_array + out_edge_size;
  int node_size = size_of();

  // Free the output edge array
  if (out_edge_size > 0) {
#ifdef ASSERT
    if( out_edge_end == compile->node_arena()->hwm() )
      reclaim_in  += out_edge_size;  // count reclaimed out edges with in edges
#endif
    compile->node_arena()->Afree(out_array, out_edge_size);
  }

  // Free the input edge array and the node itself
  if( edge_end == (char*)this ) {
#ifdef ASSERT
    if( edge_end+node_size == compile->node_arena()->hwm() ) {
      reclaim_in  += edge_size;
      reclaim_node+= node_size;
    }
#else
    // It was; free the input array and object all in one hit
    compile->node_arena()->Afree(_in,edge_size+node_size);
#endif
  } else {

    // Free just the input array
#ifdef ASSERT
    if( edge_end == compile->node_arena()->hwm() )
      reclaim_in  += edge_size;
#endif
    compile->node_arena()->Afree(_in,edge_size);

    // Free just the object
#ifdef ASSERT
    if( ((char*)this) + node_size == compile->node_arena()->hwm() )
      reclaim_node+= node_size;
#else
    compile->node_arena()->Afree(this,node_size);
#endif
  }
  if (is_macro()) {
    compile->remove_macro_node(this);
  }
  if (is_expensive()) {
    compile->remove_expensive_node(this);
  }
  CastIINode* cast = isa_CastII();
  if (cast != NULL && cast->has_range_check()) {
    compile->remove_range_check_cast(cast);
  }

  if (is_SafePoint()) {
    as_SafePoint()->delete_replaced_nodes();
  }
#ifdef ASSERT
  // We will not actually delete the storage, but we'll make the node unusable.
  *(address*)this = badAddress;  // smash the C++ vtbl, probably
  _in = _out = (Node**) badAddress;
  _max = _cnt = _outmax = _outcnt = 0;
#endif
}

//------------------------------grow-------------------------------------------
// Grow the input array, making space for more edges
void Node::grow( uint len ) {
  Arena* arena = Compile::current()->node_arena();
  uint new_max = _max;
  if( new_max == 0 ) {
    _max = 4;
    _in = (Node**)arena->Amalloc(4*sizeof(Node*));
    Node** to = _in;
    to[0] = NULL;
    to[1] = NULL;
    to[2] = NULL;
    to[3] = NULL;
    return;
  }
  while( new_max <= len ) new_max <<= 1; // Find next power-of-2
  // Trimming to limit allows a uint8 to handle up to 255 edges.
  // Previously I was using only powers-of-2 which peaked at 128 edges.
  //if( new_max >= limit ) new_max = limit-1;
  _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
  Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
  _max = new_max;               // Record new max length
  // This assertion makes sure that Node::_max is wide enough to
  // represent the numerical value of new_max.
  assert(_max == new_max && _max > len, "int width of _max is too small");
}

//-----------------------------out_grow----------------------------------------
// Grow the input array, making space for more edges
void Node::out_grow( uint len ) {
  assert(!is_top(), "cannot grow a top node's out array");
  Arena* arena = Compile::current()->node_arena();
  uint new_max = _outmax;
  if( new_max == 0 ) {
    _outmax = 4;
    _out = (Node **)arena->Amalloc(4*sizeof(Node*));
    return;
  }
  while( new_max <= len ) new_max <<= 1; // Find next power-of-2
  // Trimming to limit allows a uint8 to handle up to 255 edges.
  // Previously I was using only powers-of-2 which peaked at 128 edges.
  //if( new_max >= limit ) new_max = limit-1;
  assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
  _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
  //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
  _outmax = new_max;               // Record new max length
  // This assertion makes sure that Node::_max is wide enough to
  // represent the numerical value of new_max.
  assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
}

#ifdef ASSERT
//------------------------------is_dead----------------------------------------
bool Node::is_dead() const {
  // Mach and pinch point nodes may look like dead.
  if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
    return false;
  for( uint i = 0; i < _max; i++ )
    if( _in[i] != NULL )
      return false;
  dump();
  return true;
}
#endif


//------------------------------is_unreachable---------------------------------
bool Node::is_unreachable(PhaseIterGVN &igvn) const {
  assert(!is_Mach(), "doesn't work with MachNodes");
  return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
}

//------------------------------add_req----------------------------------------
// Add a new required input at the end
void Node::add_req( Node *n ) {
  assert( is_not_dead(n), "can not use dead node");

  // Look to see if I can move precedence down one without reallocating
  if( (_cnt >= _max) || (in(_max-1) != NULL) )
    grow( _max+1 );

  // Find a precedence edge to move
  if( in(_cnt) != NULL ) {       // Next precedence edge is busy?
    uint i;
    for( i=_cnt; i<_max; i++ )
      if( in(i) == NULL )       // Find the NULL at end of prec edge list
        break;                  // There must be one, since we grew the array
    _in[i] = in(_cnt);          // Move prec over, making space for req edge
  }
  _in[_cnt++] = n;            // Stuff over old prec edge
  if (n != NULL) n->add_out((Node *)this);
}

//---------------------------add_req_batch-------------------------------------
// Add a new required input at the end
void Node::add_req_batch( Node *n, uint m ) {
  assert( is_not_dead(n), "can not use dead node");
  // check various edge cases
  if ((int)m <= 1) {
    assert((int)m >= 0, "oob");
    if (m != 0)  add_req(n);
    return;
  }

  // Look to see if I can move precedence down one without reallocating
  if( (_cnt+m) > _max || _in[_max-m] )
    grow( _max+m );

  // Find a precedence edge to move
  if( _in[_cnt] != NULL ) {     // Next precedence edge is busy?
    uint i;
    for( i=_cnt; i<_max; i++ )
      if( _in[i] == NULL )      // Find the NULL at end of prec edge list
        break;                  // There must be one, since we grew the array
    // Slide all the precs over by m positions (assume #prec << m).
    Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
  }

  // Stuff over the old prec edges
  for(uint i=0; i<m; i++ ) {
    _in[_cnt++] = n;
  }

  // Insert multiple out edges on the node.
  if (n != NULL && !n->is_top()) {
    for(uint i=0; i<m; i++ ) {
      n->add_out((Node *)this);
    }
  }
}

//------------------------------del_req----------------------------------------
// Delete the required edge and compact the edge array
void Node::del_req( uint idx ) {
  assert( idx < _cnt, "oob");
  assert( !VerifyHashTableKeys || _hash_lock == 0,
          "remove node from hash table before modifying it");
  // First remove corresponding def-use edge
  Node *n = in(idx);
  if (n != NULL) n->del_out((Node *)this);
  _in[idx] = in(--_cnt); // Compact the array
  // Avoid spec violation: Gap in prec edges.
  close_prec_gap_at(_cnt);
}

//------------------------------del_req_ordered--------------------------------
// Delete the required edge and compact the edge array with preserved order
void Node::del_req_ordered( uint idx ) {
  assert( idx < _cnt, "oob");
  assert( !VerifyHashTableKeys || _hash_lock == 0,
          "remove node from hash table before modifying it");
  // First remove corresponding def-use edge
  Node *n = in(idx);
  if (n != NULL) n->del_out((Node *)this);
  if (idx < --_cnt) {    // Not last edge ?
    Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
  }
  // Avoid spec violation: Gap in prec edges.
  close_prec_gap_at(_cnt);
}

//------------------------------ins_req----------------------------------------
// Insert a new required input at the end
void Node::ins_req( uint idx, Node *n ) {
  assert( is_not_dead(n), "can not use dead node");
  add_req(NULL);                // Make space
  assert( idx < _max, "Must have allocated enough space");
  // Slide over
  if(_cnt-idx-1 > 0) {
    Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
  }
  _in[idx] = n;                            // Stuff over old required edge
  if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
}

//-----------------------------find_edge---------------------------------------
int Node::find_edge(Node* n) {
  for (uint i = 0; i < len(); i++) {
    if (_in[i] == n)  return i;
  }
  return -1;
}

//----------------------------replace_edge-------------------------------------
int Node::replace_edge(Node* old, Node* neww) {
  if (old == neww)  return 0;  // nothing to do
  uint nrep = 0;
  for (uint i = 0; i < len(); i++) {
    if (in(i) == old) {
      if (i < req()) {
        set_req(i, neww);
      } else {
        assert(find_prec_edge(neww) == -1, err_msg("spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx));
        set_prec(i, neww);
      }
      nrep++;
    }
  }
  return nrep;
}

/**
 * Replace input edges in the range pointing to 'old' node.
 */
int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
  if (old == neww)  return 0;  // nothing to do
  uint nrep = 0;
  for (int i = start; i < end; i++) {
    if (in(i) == old) {
      set_req(i, neww);
      nrep++;
    }
  }
  return nrep;
}

//-------------------------disconnect_inputs-----------------------------------
// NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this'
int Node::disconnect_inputs(Node *n, Compile* C) {
  int edges_to_n = 0;

  uint cnt = req();
  for( uint i = 0; i < cnt; ++i ) {
    if( in(i) == 0 ) continue;
    if( in(i) == n ) ++edges_to_n;
    set_req(i, NULL);
  }
  // Remove precedence edges if any exist
  // Note: Safepoints may have precedence edges, even during parsing
  if( (req() != len()) && (in(req()) != NULL) ) {
    uint max = len();
    for( uint i = 0; i < max; ++i ) {
      if( in(i) == 0 ) continue;
      if( in(i) == n ) ++edges_to_n;
      set_prec(i, NULL);
    }
  }

  // Node::destruct requires all out edges be deleted first
  // debug_only(destruct();)   // no reuse benefit expected
  if (edges_to_n == 0) {
    C->record_dead_node(_idx);
  }
  return edges_to_n;
}

//-----------------------------uncast---------------------------------------
// %%% Temporary, until we sort out CheckCastPP vs. CastPP.
// Strip away casting.  (It is depth-limited.)
Node* Node::uncast() const {
  // Should be inline:
  //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
  if (is_ConstraintCast() || is_CheckCastPP())
    return uncast_helper(this);
  else
    return (Node*) this;
}

// Find out of current node that matches opcode.
Node* Node::find_out_with(int opcode) {
  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
    Node* use = fast_out(i);
    if (use->Opcode() == opcode) {
      return use;
    }
  }
  return NULL;
}

//---------------------------uncast_helper-------------------------------------
Node* Node::uncast_helper(const Node* p) {
#ifdef ASSERT
  uint depth_count = 0;
  const Node* orig_p = p;
#endif

  while (true) {
#ifdef ASSERT
    if (depth_count >= K) {
      orig_p->dump(4);
      if (p != orig_p)
        p->dump(1);
    }
    assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
#endif
    if (p == NULL || p->req() != 2) {
      break;
    } else if (p->is_ConstraintCast()) {
      p = p->in(1);
    } else if (p->is_CheckCastPP()) {
      p = p->in(1);
    } else {
      break;
    }
  }
  return (Node*) p;
}

//------------------------------add_prec---------------------------------------
// Add a new precedence input.  Precedence inputs are unordered, with
// duplicates removed and NULLs packed down at the end.
void Node::add_prec( Node *n ) {
  assert( is_not_dead(n), "can not use dead node");

  // Check for NULL at end
  if( _cnt >= _max || in(_max-1) )
    grow( _max+1 );

  // Find a precedence edge to move
  uint i = _cnt;
  while( in(i) != NULL ) {
    if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
    i++;
  }
  _in[i] = n;                                // Stuff prec edge over NULL
  if ( n != NULL) n->add_out((Node *)this);  // Add mirror edge

#ifdef ASSERT
  while ((++i)<_max) { assert(_in[i] == NULL, err_msg("spec violation: Gap in prec edges (node %d)", _idx)); }
#endif
}

//------------------------------rm_prec----------------------------------------
// Remove a precedence input.  Precedence inputs are unordered, with
// duplicates removed and NULLs packed down at the end.
void Node::rm_prec( uint j ) {
  assert(j < _max, err_msg("oob: i=%d, _max=%d", j, _max));
  assert(j >= _cnt, "not a precedence edge");
  if (_in[j] == NULL) return;   // Avoid spec violation: Gap in prec edges.
  _in[j]->del_out((Node *)this);
  close_prec_gap_at(j);
}

//------------------------------size_of----------------------------------------
uint Node::size_of() const { return sizeof(*this); }

//------------------------------ideal_reg--------------------------------------
uint Node::ideal_reg() const { return 0; }

//------------------------------jvms-------------------------------------------
JVMState* Node::jvms() const { return NULL; }

#ifdef ASSERT
//------------------------------jvms-------------------------------------------
bool Node::verify_jvms(const JVMState* using_jvms) const {
  for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
    if (jvms == using_jvms)  return true;
  }
  return false;
}

//------------------------------init_NodeProperty------------------------------
void Node::init_NodeProperty() {
  assert(_max_classes <= max_jushort, "too many NodeProperty classes");
  assert(_max_flags <= max_jushort, "too many NodeProperty flags");
}
#endif

//------------------------------format-----------------------------------------
// Print as assembly
void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
//------------------------------emit-------------------------------------------
// Emit bytes starting at parameter 'ptr'.
void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
//------------------------------size-------------------------------------------
// Size of instruction in bytes
uint Node::size(PhaseRegAlloc *ra_) const { return 0; }

//------------------------------CFG Construction-------------------------------
// Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
// Goto and Return.
const Node *Node::is_block_proj() const { return 0; }

// Minimum guaranteed type
const Type *Node::bottom_type() const { return Type::BOTTOM; }


//------------------------------raise_bottom_type------------------------------
// Get the worst-case Type output for this Node.
void Node::raise_bottom_type(const Type* new_type) {
  if (is_Type()) {
    TypeNode *n = this->as_Type();
    if (VerifyAliases) {
      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
    }
    n->set_type(new_type);
  } else if (is_Load()) {
    LoadNode *n = this->as_Load();
    if (VerifyAliases) {
      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
    }
    n->set_type(new_type);
  }
}

//------------------------------Identity---------------------------------------
// Return a node that the given node is equivalent to.
Node *Node::Identity( PhaseTransform * ) {
  return this;                  // Default to no identities
}

//------------------------------Value------------------------------------------
// Compute a new Type for a node using the Type of the inputs.
const Type *Node::Value( PhaseTransform * ) const {
  return bottom_type();         // Default to worst-case Type
}

//------------------------------Ideal------------------------------------------
//
// 'Idealize' the graph rooted at this Node.
//
// In order to be efficient and flexible there are some subtle invariants
// these Ideal calls need to hold.  Running with '+VerifyIterativeGVN' checks
// these invariants, although its too slow to have on by default.  If you are
// hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
//
// The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
// pointer.  If ANY change is made, it must return the root of the reshaped
// graph - even if the root is the same Node.  Example: swapping the inputs
// to an AddINode gives the same answer and same root, but you still have to
// return the 'this' pointer instead of NULL.
//
// You cannot return an OLD Node, except for the 'this' pointer.  Use the
// Identity call to return an old Node; basically if Identity can find
// another Node have the Ideal call make no change and return NULL.
// Example: AddINode::Ideal must check for add of zero; in this case it
// returns NULL instead of doing any graph reshaping.
//
// You cannot modify any old Nodes except for the 'this' pointer.  Due to
// sharing there may be other users of the old Nodes relying on their current
// semantics.  Modifying them will break the other users.
// Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
// "X+3" unchanged in case it is shared.
//
// If you modify the 'this' pointer's inputs, you should use
// 'set_req'.  If you are making a new Node (either as the new root or
// some new internal piece) you may use 'init_req' to set the initial
// value.  You can make a new Node with either 'new' or 'clone'.  In
// either case, def-use info is correctly maintained.
//
// Example: reshape "(X+3)+4" into "X+7":
//    set_req(1, in(1)->in(1));
//    set_req(2, phase->intcon(7));
//    return this;
// Example: reshape "X*4" into "X<<2"
//    return new (C) LShiftINode(in(1), phase->intcon(2));
//
// You must call 'phase->transform(X)' on any new Nodes X you make, except
// for the returned root node.  Example: reshape "X*31" with "(X<<5)-X".
//    Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5)));
//    return new (C) AddINode(shift, in(1));
//
// When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
// These forms are faster than 'phase->transform(new (C) ConNode())' and Do
// The Right Thing with def-use info.
//
// You cannot bury the 'this' Node inside of a graph reshape.  If the reshaped
// graph uses the 'this' Node it must be the root.  If you want a Node with
// the same Opcode as the 'this' pointer use 'clone'.
//
Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
  return NULL;                  // Default to being Ideal already
}

// Some nodes have specific Ideal subgraph transformations only if they are
// unique users of specific nodes. Such nodes should be put on IGVN worklist
// for the transformations to happen.
bool Node::has_special_unique_user() const {
  assert(outcnt() == 1, "match only for unique out");
  Node* n = unique_out();
  int op  = Opcode();
  if( this->is_Store() ) {
    // Condition for back-to-back stores folding.
    return n->Opcode() == op && n->in(MemNode::Memory) == this;
  } else if (this->is_Load() || this->is_DecodeN()) {
    // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
    return n->Opcode() == Op_MemBarAcquire;
  } else if( op == Op_AddL ) {
    // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
    return n->Opcode() == Op_ConvL2I && n->in(1) == this;
  } else if( op == Op_SubI || op == Op_SubL ) {
    // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
    return n->Opcode() == op && n->in(2) == this;
  }
  return false;
};

//--------------------------find_exact_control---------------------------------
// Skip Proj and CatchProj nodes chains. Check for Null and Top.
Node* Node::find_exact_control(Node* ctrl) {
  if (ctrl == NULL && this->is_Region())
    ctrl = this->as_Region()->is_copy();

  if (ctrl != NULL && ctrl->is_CatchProj()) {
    if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
      ctrl = ctrl->in(0);
    if (ctrl != NULL && !ctrl->is_top())
      ctrl = ctrl->in(0);
  }

  if (ctrl != NULL && ctrl->is_Proj())
    ctrl = ctrl->in(0);

  return ctrl;
}

//--------------------------dominates------------------------------------------
// Helper function for MemNode::all_controls_dominate().
// Check if 'this' control node dominates or equal to 'sub' control node.
// We already know that if any path back to Root or Start reaches 'this',
// then all paths so, so this is a simple search for one example,
// not an exhaustive search for a counterexample.
bool Node::dominates(Node* sub, Node_List &nlist) {
  assert(this->is_CFG(), "expecting control");
  assert(sub != NULL && sub->is_CFG(), "expecting control");

  // detect dead cycle without regions
  int iterations_without_region_limit = DominatorSearchLimit;

  Node* orig_sub = sub;
  Node* dom      = this;
  bool  met_dom  = false;
  nlist.clear();

  // Walk 'sub' backward up the chain to 'dom', watching for regions.
  // After seeing 'dom', continue up to Root or Start.
  // If we hit a region (backward split point), it may be a loop head.
  // Keep going through one of the region's inputs.  If we reach the
  // same region again, go through a different input.  Eventually we
  // will either exit through the loop head, or give up.
  // (If we get confused, break out and return a conservative 'false'.)
  while (sub != NULL) {
    if (sub->is_top())  break; // Conservative answer for dead code.
    if (sub == dom) {
      if (nlist.size() == 0) {
        // No Region nodes except loops were visited before and the EntryControl
        // path was taken for loops: it did not walk in a cycle.
        return true;
      } else if (met_dom) {
        break;          // already met before: walk in a cycle
      } else {
        // Region nodes were visited. Continue walk up to Start or Root
        // to make sure that it did not walk in a cycle.
        met_dom = true; // first time meet
        iterations_without_region_limit = DominatorSearchLimit; // Reset
     }
    }
    if (sub->is_Start() || sub->is_Root()) {
      // Success if we met 'dom' along a path to Start or Root.
      // We assume there are no alternative paths that avoid 'dom'.
      // (This assumption is up to the caller to ensure!)
      return met_dom;
    }
    Node* up = sub->in(0);
    // Normalize simple pass-through regions and projections:
    up = sub->find_exact_control(up);
    // If sub == up, we found a self-loop.  Try to push past it.
    if (sub == up && sub->is_Loop()) {
      // Take loop entry path on the way up to 'dom'.
      up = sub->in(1); // in(LoopNode::EntryControl);
    } else if (sub == up && sub->is_Region() && sub->req() != 3) {
      // Always take in(1) path on the way up to 'dom' for clone regions
      // (with only one input) or regions which merge > 2 paths
      // (usually used to merge fast/slow paths).
      up = sub->in(1);
    } else if (sub == up && sub->is_Region()) {
      // Try both paths for Regions with 2 input paths (it may be a loop head).
      // It could give conservative 'false' answer without information
      // which region's input is the entry path.
      iterations_without_region_limit = DominatorSearchLimit; // Reset

      bool region_was_visited_before = false;
      // Was this Region node visited before?
      // If so, we have reached it because we accidentally took a
      // loop-back edge from 'sub' back into the body of the loop,
      // and worked our way up again to the loop header 'sub'.
      // So, take the first unexplored path on the way up to 'dom'.
      for (int j = nlist.size() - 1; j >= 0; j--) {
        intptr_t ni = (intptr_t)nlist.at(j);
        Node* visited = (Node*)(ni & ~1);
        bool  visited_twice_already = ((ni & 1) != 0);
        if (visited == sub) {
          if (visited_twice_already) {
            // Visited 2 paths, but still stuck in loop body.  Give up.
            return false;
          }
          // The Region node was visited before only once.
          // (We will repush with the low bit set, below.)
          nlist.remove(j);
          // We will find a new edge and re-insert.
          region_was_visited_before = true;
          break;
        }
      }

      // Find an incoming edge which has not been seen yet; walk through it.
      assert(up == sub, "");
      uint skip = region_was_visited_before ? 1 : 0;
      for (uint i = 1; i < sub->req(); i++) {
        Node* in = sub->in(i);
        if (in != NULL && !in->is_top() && in != sub) {
          if (skip == 0) {
            up = in;
            break;
          }
          --skip;               // skip this nontrivial input
        }
      }

      // Set 0 bit to indicate that both paths were taken.
      nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
    }

    if (up == sub) {
      break;    // some kind of tight cycle
    }
    if (up == orig_sub && met_dom) {
      // returned back after visiting 'dom'
      break;    // some kind of cycle
    }
    if (--iterations_without_region_limit < 0) {
      break;    // dead cycle
    }
    sub = up;
  }

  // Did not meet Root or Start node in pred. chain.
  // Conservative answer for dead code.
  return false;
}

//------------------------------remove_dead_region-----------------------------
// This control node is dead.  Follow the subgraph below it making everything
// using it dead as well.  This will happen normally via the usual IterGVN
// worklist but this call is more efficient.  Do not update use-def info
// inside the dead region, just at the borders.
static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
  // Con's are a popular node to re-hit in the hash table again.
  if( dead->is_Con() ) return;

  // Can't put ResourceMark here since igvn->_worklist uses the same arena
  // for verify pass with +VerifyOpto and we add/remove elements in it here.
  Node_List  nstack(Thread::current()->resource_area());

  Node *top = igvn->C->top();
  nstack.push(dead);
  bool has_irreducible_loop = igvn->C->has_irreducible_loop();

  while (nstack.size() > 0) {
    dead = nstack.pop();
    if (dead->Opcode() == Op_SafePoint) {
      dead->as_SafePoint()->disconnect_from_root(igvn);
    }
    if (dead->outcnt() > 0) {
      // Keep dead node on stack until all uses are processed.
      nstack.push(dead);
      // For all Users of the Dead...    ;-)
      for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
        Node* use = dead->last_out(k);
        igvn->hash_delete(use);       // Yank from hash table prior to mod
        if (use->in(0) == dead) {     // Found another dead node
          assert (!use->is_Con(), "Control for Con node should be Root node.");
          use->set_req(0, top);       // Cut dead edge to prevent processing
          nstack.push(use);           // the dead node again.
        } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
                   use->is_Loop() && !use->is_Root() &&       // Don't kill Root (RootNode extends LoopNode)
                   use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
          use->set_req(LoopNode::EntryControl, top);          // Cut dead edge to prevent processing
          use->set_req(0, top);       // Cut self edge
          nstack.push(use);
        } else {                      // Else found a not-dead user
          // Dead if all inputs are top or null
          bool dead_use = !use->is_Root(); // Keep empty graph alive
          for (uint j = 1; j < use->req(); j++) {
            Node* in = use->in(j);
            if (in == dead) {         // Turn all dead inputs into TOP
              use->set_req(j, top);
            } else if (in != NULL && !in->is_top()) {
              dead_use = false;
            }
          }
          if (dead_use) {
            if (use->is_Region()) {
              use->set_req(0, top);   // Cut self edge
            }
            nstack.push(use);
          } else {
            igvn->_worklist.push(use);
          }
        }
        // Refresh the iterator, since any number of kills might have happened.
        k = dead->last_outs(kmin);
      }
    } else { // (dead->outcnt() == 0)
      // Done with outputs.
      igvn->hash_delete(dead);
      igvn->_worklist.remove(dead);
      igvn->set_type(dead, Type::TOP);
      if (dead->is_macro()) {
        igvn->C->remove_macro_node(dead);
      }
      if (dead->is_expensive()) {
        igvn->C->remove_expensive_node(dead);
      }
      CastIINode* cast = dead->isa_CastII();
      if (cast != NULL && cast->has_range_check()) {
        igvn->C->remove_range_check_cast(cast);
      }
      igvn->C->record_dead_node(dead->_idx);
      // Kill all inputs to the dead guy
      for (uint i=0; i < dead->req(); i++) {
        Node *n = dead->in(i);      // Get input to dead guy
        if (n != NULL && !n->is_top()) { // Input is valid?
          dead->set_req(i, top);    // Smash input away
          if (n->outcnt() == 0) {   // Input also goes dead?
            if (!n->is_Con())
              nstack.push(n);       // Clear it out as well
          } else if (n->outcnt() == 1 &&
                     n->has_special_unique_user()) {
            igvn->add_users_to_worklist( n );
          } else if (n->outcnt() <= 2 && n->is_Store()) {
            // Push store's uses on worklist to enable folding optimization for
            // store/store and store/load to the same address.
            // The restriction (outcnt() <= 2) is the same as in set_req_X()
            // and remove_globally_dead_node().
            igvn->add_users_to_worklist( n );
          }
        }
      }
    } // (dead->outcnt() == 0)
  }   // while (nstack.size() > 0) for outputs
  return;
}

//------------------------------remove_dead_region-----------------------------
bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
  Node *n = in(0);
  if( !n ) return false;
  // Lost control into this guy?  I.e., it became unreachable?
  // Aggressively kill all unreachable code.
  if (can_reshape && n->is_top()) {
    kill_dead_code(this, phase->is_IterGVN());
    return false; // Node is dead.
  }

  if( n->is_Region() && n->as_Region()->is_copy() ) {
    Node *m = n->nonnull_req();
    set_req(0, m);
    return true;
  }
  return false;
}

//------------------------------Ideal_DU_postCCP-------------------------------
// Idealize graph, using DU info.  Must clone result into new-space
Node *Node::Ideal_DU_postCCP( PhaseCCP * ) {
  return NULL;                 // Default to no change
}

//------------------------------hash-------------------------------------------
// Hash function over Nodes.
uint Node::hash() const {
  uint sum = 0;
  for( uint i=0; i<_cnt; i++ )  // Add in all inputs
    sum = (sum<<1)-(uintptr_t)in(i);        // Ignore embedded NULLs
  return (sum>>2) + _cnt + Opcode();
}

//------------------------------cmp--------------------------------------------
// Compare special parts of simple Nodes
uint Node::cmp( const Node &n ) const {
  return 1;                     // Must be same
}

//------------------------------rematerialize-----------------------------------
// Should we clone rather than spill this instruction?
bool Node::rematerialize() const {
  if ( is_Mach() )
    return this->as_Mach()->rematerialize();
  else
    return (_flags & Flag_rematerialize) != 0;
}

//------------------------------needs_anti_dependence_check---------------------
// Nodes which use memory without consuming it, hence need antidependences.
bool Node::needs_anti_dependence_check() const {
  if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
    return false;
  else
    return in(1)->bottom_type()->has_memory();
}


// Get an integer constant from a ConNode (or CastIINode).
// Return a default value if there is no apparent constant here.
const TypeInt* Node::find_int_type() const {
  if (this->is_Type()) {
    return this->as_Type()->type()->isa_int();
  } else if (this->is_Con()) {
    assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
    return this->bottom_type()->isa_int();
  }
  return NULL;
}

// Get a pointer constant from a ConstNode.
// Returns the constant if it is a pointer ConstNode
intptr_t Node::get_ptr() const {
  assert( Opcode() == Op_ConP, "" );
  return ((ConPNode*)this)->type()->is_ptr()->get_con();
}

// Get a narrow oop constant from a ConNNode.
intptr_t Node::get_narrowcon() const {
  assert( Opcode() == Op_ConN, "" );
  return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
}

// Get a long constant from a ConNode.
// Return a default value if there is no apparent constant here.
const TypeLong* Node::find_long_type() const {
  if (this->is_Type()) {
    return this->as_Type()->type()->isa_long();
  } else if (this->is_Con()) {
    assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
    return this->bottom_type()->isa_long();
  }
  return NULL;
}


/**
 * Return a ptr type for nodes which should have it.
 */
const TypePtr* Node::get_ptr_type() const {
  const TypePtr* tp = this->bottom_type()->make_ptr();
#ifdef ASSERT
  if (tp == NULL) {
    this->dump(1);
    assert((tp != NULL), "unexpected node type");
  }
#endif
  return tp;
}

// Get a double constant from a ConstNode.
// Returns the constant if it is a double ConstNode
jdouble Node::getd() const {
  assert( Opcode() == Op_ConD, "" );
  return ((ConDNode*)this)->type()->is_double_constant()->getd();
}

// Get a float constant from a ConstNode.
// Returns the constant if it is a float ConstNode
jfloat Node::getf() const {
  assert( Opcode() == Op_ConF, "" );
  return ((ConFNode*)this)->type()->is_float_constant()->getf();
}

#ifndef PRODUCT

//----------------------------NotANode----------------------------------------
// Used in debugging code to avoid walking across dead or uninitialized edges.
static inline bool NotANode(const Node* n) {
  if (n == NULL)                   return true;
  if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
  if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
  return false;
}


//------------------------------find------------------------------------------
// Find a neighbor of this Node with the given _idx
// If idx is negative, find its absolute value, following both _in and _out.
static void find_recur(Compile* C,  Node* &result, Node *n, int idx, bool only_ctrl,
                        VectorSet* old_space, VectorSet* new_space ) {
  int node_idx = (idx >= 0) ? idx : -idx;
  if (NotANode(n))  return;  // Gracefully handle NULL, -1, 0xabababab, etc.
  // Contained in new_space or old_space?   Check old_arena first since it's mostly empty.
  VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
  if( v->test(n->_idx) ) return;
  if( (int)n->_idx == node_idx
      debug_only(|| n->debug_idx() == node_idx) ) {
    if (result != NULL)
      tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
                 (uintptr_t)result, (uintptr_t)n, node_idx);
    result = n;
  }
  v->set(n->_idx);
  for( uint i=0; i<n->len(); i++ ) {
    if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
    find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
  }
  // Search along forward edges also:
  if (idx < 0 && !only_ctrl) {
    for( uint j=0; j<n->outcnt(); j++ ) {
      find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
    }
  }
#ifdef ASSERT
  // Search along debug_orig edges last, checking for cycles
  Node* orig = n->debug_orig();
  if (orig != NULL) {
    do {
      if (NotANode(orig))  break;
      find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
      orig = orig->debug_orig();
    } while (orig != NULL && orig != n->debug_orig());
  }
#endif //ASSERT
}

// call this from debugger:
Node* find_node(Node* n, int idx) {
  return n->find(idx);
}

//------------------------------find-------------------------------------------
Node* Node::find(int idx) const {
  ResourceArea *area = Thread::current()->resource_area();
  VectorSet old_space(area), new_space(area);
  Node* result = NULL;
  find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
  return result;
}

//------------------------------find_ctrl--------------------------------------
// Find an ancestor to this node in the control history with given _idx
Node* Node::find_ctrl(int idx) const {
  ResourceArea *area = Thread::current()->resource_area();
  VectorSet old_space(area), new_space(area);
  Node* result = NULL;
  find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
  return result;
}
#endif



#ifndef PRODUCT

// -----------------------------Name-------------------------------------------
extern const char *NodeClassNames[];
const char *Node::Name() const { return NodeClassNames[Opcode()]; }

static bool is_disconnected(const Node* n) {
  for (uint i = 0; i < n->req(); i++) {
    if (n->in(i) != NULL)  return false;
  }
  return true;
}

#ifdef ASSERT
static void dump_orig(Node* orig, outputStream *st) {
  Compile* C = Compile::current();
  if (NotANode(orig)) orig = NULL;
  if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
  if (orig == NULL) return;
  st->print(" !orig=");
  Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
  if (NotANode(fast)) fast = NULL;
  while (orig != NULL) {
    bool discon = is_disconnected(orig);  // if discon, print [123] else 123
    if (discon) st->print("[");
    if (!Compile::current()->node_arena()->contains(orig))
      st->print("o");
    st->print("%d", orig->_idx);
    if (discon) st->print("]");
    orig = orig->debug_orig();
    if (NotANode(orig)) orig = NULL;
    if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
    if (orig != NULL) st->print(",");
    if (fast != NULL) {
      // Step fast twice for each single step of orig:
      fast = fast->debug_orig();
      if (NotANode(fast)) fast = NULL;
      if (fast != NULL && fast != orig) {
        fast = fast->debug_orig();
        if (NotANode(fast)) fast = NULL;
      }
      if (fast == orig) {
        st->print("...");
        break;
      }
    }
  }
}

void Node::set_debug_orig(Node* orig) {
  _debug_orig = orig;
  if (BreakAtNode == 0)  return;
  if (NotANode(orig))  orig = NULL;
  int trip = 10;
  while (orig != NULL) {
    if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
      tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
                    this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
      BREAKPOINT;
    }
    orig = orig->debug_orig();
    if (NotANode(orig))  orig = NULL;
    if (trip-- <= 0)  break;
  }
}
#endif //ASSERT

//------------------------------dump------------------------------------------
// Dump a Node
void Node::dump(const char* suffix, outputStream *st) const {
  Compile* C = Compile::current();
  bool is_new = C->node_arena()->contains(this);
  C->_in_dump_cnt++;
  st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name());

  // Dump the required and precedence inputs
  dump_req(st);
  dump_prec(st);
  // Dump the outputs
  dump_out(st);

  if (is_disconnected(this)) {
#ifdef ASSERT
    st->print("  [%d]",debug_idx());
    dump_orig(debug_orig(), st);
#endif
    st->cr();
    C->_in_dump_cnt--;
    return;                     // don't process dead nodes
  }

  // Dump node-specific info
  dump_spec(st);
#ifdef ASSERT
  // Dump the non-reset _debug_idx
  if (Verbose && WizardMode) {
    st->print("  [%d]",debug_idx());
  }
#endif

  const Type *t = bottom_type();

  if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
    const TypeInstPtr  *toop = t->isa_instptr();
    const TypeKlassPtr *tkls = t->isa_klassptr();
    ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
    if (klass && klass->is_loaded() && klass->is_interface()) {
      st->print("  Interface:");
    } else if (toop) {
      st->print("  Oop:");
    } else if (tkls) {
      st->print("  Klass:");
    }
    t->dump_on(st);
  } else if (t == Type::MEMORY) {
    st->print("  Memory:");
    MemNode::dump_adr_type(this, adr_type(), st);
  } else if (Verbose || WizardMode) {
    st->print("  Type:");
    if (t) {
      t->dump_on(st);
    } else {
      st->print("no type");
    }
  } else if (t->isa_vect() && this->is_MachSpillCopy()) {
    // Dump MachSpillcopy vector type.
    t->dump_on(st);
  }
  if (is_new) {
    debug_only(dump_orig(debug_orig(), st));
    Node_Notes* nn = C->node_notes_at(_idx);
    if (nn != NULL && !nn->is_clear()) {
      if (nn->jvms() != NULL) {
        st->print(" !jvms:");
        nn->jvms()->dump_spec(st);
      }
    }
  }
  if (suffix) st->print("%s", suffix);
  C->_in_dump_cnt--;
}

//------------------------------dump_req--------------------------------------
void Node::dump_req(outputStream *st) const {
  // Dump the required input edges
  for (uint i = 0; i < req(); i++) {    // For all required inputs
    Node* d = in(i);
    if (d == NULL) {
      st->print("_ ");
    } else if (NotANode(d)) {
      st->print("NotANode ");  // uninitialized, sentinel, garbage, etc.
    } else {
      st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
    }
  }
}


//------------------------------dump_prec-------------------------------------
void Node::dump_prec(outputStream *st) const {
  // Dump the precedence edges
  int any_prec = 0;
  for (uint i = req(); i < len(); i++) {       // For all precedence inputs
    Node* p = in(i);
    if (p != NULL) {
      if (!any_prec++) st->print(" |");
      if (NotANode(p)) { st->print("NotANode "); continue; }
      st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
    }
  }
}

//------------------------------dump_out--------------------------------------
void Node::dump_out(outputStream *st) const {
  // Delimit the output edges
  st->print(" [[");
  // Dump the output edges
  for (uint i = 0; i < _outcnt; i++) {    // For all outputs
    Node* u = _out[i];
    if (u == NULL) {
      st->print("_ ");
    } else if (NotANode(u)) {
      st->print("NotANode ");
    } else {
      st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
    }
  }
  st->print("]] ");
}

//------------------------------dump_nodes-------------------------------------
static void dump_nodes(const Node* start, int d, bool only_ctrl) {
  Node* s = (Node*)start; // remove const
  if (NotANode(s)) return;

  uint depth = (uint)ABS(d);
  int direction = d;
  Compile* C = Compile::current();
  GrowableArray <Node *> nstack(C->live_nodes());

  nstack.append(s);
  int begin = 0;
  int end = 0;
  for(uint i = 0; i < depth; i++) {
    end = nstack.length();
    for(int j = begin; j < end; j++) {
      Node* tp  = nstack.at(j);
      uint limit = direction > 0 ? tp->len() : tp->outcnt();
      for(uint k = 0; k < limit; k++) {
        Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);

        if (NotANode(n))  continue;
        // do not recurse through top or the root (would reach unrelated stuff)
        if (n->is_Root() || n->is_top())  continue;
        if (only_ctrl && !n->is_CFG()) continue;

        bool on_stack = nstack.contains(n);
        if (!on_stack) {
          nstack.append(n);
        }
      }
    }
    begin = end;
  }
  end = nstack.length();
  if (direction > 0) {
    for(int j = end-1; j >= 0; j--) {
      nstack.at(j)->dump();
    }
  } else {
    for(int j = 0; j < end; j++) {
      nstack.at(j)->dump();
    }
  }
}

//------------------------------dump-------------------------------------------
void Node::dump(int d) const {
  dump_nodes(this, d, false);
}

//------------------------------dump_ctrl--------------------------------------
// Dump a Node's control history to depth
void Node::dump_ctrl(int d) const {
  dump_nodes(this, d, true);
}

// VERIFICATION CODE
// For each input edge to a node (ie - for each Use-Def edge), verify that
// there is a corresponding Def-Use edge.
//------------------------------verify_edges-----------------------------------
void Node::verify_edges(Unique_Node_List &visited) {
  uint i, j, idx;
  int  cnt;
  Node *n;

  // Recursive termination test
  if (visited.member(this))  return;
  visited.push(this);

  // Walk over all input edges, checking for correspondence
  for( i = 0; i < len(); i++ ) {
    n = in(i);
    if (n != NULL && !n->is_top()) {
      // Count instances of (Node *)this
      cnt = 0;
      for (idx = 0; idx < n->_outcnt; idx++ ) {
        if (n->_out[idx] == (Node *)this)  cnt++;
      }
      assert( cnt > 0,"Failed to find Def-Use edge." );
      // Check for duplicate edges
      // walk the input array downcounting the input edges to n
      for( j = 0; j < len(); j++ ) {
        if( in(j) == n ) cnt--;
      }
      assert( cnt == 0,"Mismatched edge count.");
    } else if (n == NULL) {
      assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
    } else {
      assert(n->is_top(), "sanity");
      // Nothing to check.
    }
  }
  // Recursive walk over all input edges
  for( i = 0; i < len(); i++ ) {
    n = in(i);
    if( n != NULL )
      in(i)->verify_edges(visited);
  }
}

//------------------------------verify_recur-----------------------------------
static const Node *unique_top = NULL;

void Node::verify_recur(const Node *n, int verify_depth,
                        VectorSet &old_space, VectorSet &new_space) {
  if ( verify_depth == 0 )  return;
  if (verify_depth > 0)  --verify_depth;

  Compile* C = Compile::current();

  // Contained in new_space or old_space?
  VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
  // Check for visited in the proper space.  Numberings are not unique
  // across spaces so we need a separate VectorSet for each space.
  if( v->test_set(n->_idx) ) return;

  if (n->is_Con() && n->bottom_type() == Type::TOP) {
    if (C->cached_top_node() == NULL)
      C->set_cached_top_node((Node*)n);
    assert(C->cached_top_node() == n, "TOP node must be unique");
  }

  for( uint i = 0; i < n->len(); i++ ) {
    Node *x = n->in(i);
    if (!x || x->is_top()) continue;

    // Verify my input has a def-use edge to me
    if (true /*VerifyDefUse*/) {
      // Count use-def edges from n to x
      int cnt = 0;
      for( uint j = 0; j < n->len(); j++ )
        if( n->in(j) == x )
          cnt++;
      // Count def-use edges from x to n
      uint max = x->_outcnt;
      for( uint k = 0; k < max; k++ )
        if (x->_out[k] == n)
          cnt--;
      assert( cnt == 0, "mismatched def-use edge counts" );
    }

    verify_recur(x, verify_depth, old_space, new_space);
  }

}

//------------------------------verify-----------------------------------------
// Check Def-Use info for my subgraph
void Node::verify() const {
  Compile* C = Compile::current();
  Node* old_top = C->cached_top_node();
  ResourceMark rm;
  ResourceArea *area = Thread::current()->resource_area();
  VectorSet old_space(area), new_space(area);
  verify_recur(this, -1, old_space, new_space);
  C->set_cached_top_node(old_top);
}
#endif


//------------------------------walk-------------------------------------------
// Graph walk, with both pre-order and post-order functions
void Node::walk(NFunc pre, NFunc post, void *env) {
  VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
  walk_(pre, post, env, visited);
}

void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
  if( visited.test_set(_idx) ) return;
  pre(*this,env);               // Call the pre-order walk function
  for( uint i=0; i<_max; i++ )
    if( in(i) )                 // Input exists and is not walked?
      in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
  post(*this,env);              // Call the post-order walk function
}

void Node::nop(Node &, void*) {}

//------------------------------Registers--------------------------------------
// Do we Match on this edge index or not?  Generally false for Control
// and true for everything else.  Weird for calls & returns.
uint Node::match_edge(uint idx) const {
  return idx;                   // True for other than index 0 (control)
}

static RegMask _not_used_at_all;
// Register classes are defined for specific machines
const RegMask &Node::out_RegMask() const {
  ShouldNotCallThis();
  return _not_used_at_all;
}

const RegMask &Node::in_RegMask(uint) const {
  ShouldNotCallThis();
  return _not_used_at_all;
}

//=============================================================================
//-----------------------------------------------------------------------------
void Node_Array::reset( Arena *new_arena ) {
  _a->Afree(_nodes,_max*sizeof(Node*));
  _max   = 0;
  _nodes = NULL;
  _a     = new_arena;
}

//------------------------------clear------------------------------------------
// Clear all entries in _nodes to NULL but keep storage
void Node_Array::clear() {
  Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
}

//-----------------------------------------------------------------------------
void Node_Array::grow( uint i ) {
  if( !_max ) {
    _max = 1;
    _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
    _nodes[0] = NULL;
  }
  uint old = _max;
  while( i >= _max ) _max <<= 1;        // Double to fit
  _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
  Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
}

//-----------------------------------------------------------------------------
void Node_Array::insert( uint i, Node *n ) {
  if( _nodes[_max-1] ) grow(_max);      // Get more space if full
  Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
  _nodes[i] = n;
}

//-----------------------------------------------------------------------------
void Node_Array::remove( uint i ) {
  Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
  _nodes[_max-1] = NULL;
}

//-----------------------------------------------------------------------------
void Node_Array::sort( C_sort_func_t func) {
  qsort( _nodes, _max, sizeof( Node* ), func );
}

//-----------------------------------------------------------------------------
void Node_Array::dump() const {
#ifndef PRODUCT
  for( uint i = 0; i < _max; i++ ) {
    Node *nn = _nodes[i];
    if( nn != NULL ) {
      tty->print("%5d--> ",i); nn->dump();
    }
  }
#endif
}

//--------------------------is_iteratively_computed------------------------------
// Operation appears to be iteratively computed (such as an induction variable)
// It is possible for this operation to return false for a loop-varying
// value, if it appears (by local graph inspection) to be computed by a simple conditional.
bool Node::is_iteratively_computed() {
  if (ideal_reg()) { // does operation have a result register?
    for (uint i = 1; i < req(); i++) {
      Node* n = in(i);
      if (n != NULL && n->is_Phi()) {
        for (uint j = 1; j < n->req(); j++) {
          if (n->in(j) == this) {
            return true;
          }
        }
      }
    }
  }
  return false;
}

//--------------------------find_similar------------------------------
// Return a node with opcode "opc" and same inputs as "this" if one can
// be found; Otherwise return NULL;
Node* Node::find_similar(int opc) {
  if (req() >= 2) {
    Node* def = in(1);
    if (def && def->outcnt() >= 2) {
      for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
        Node* use = def->fast_out(i);
        if (use->Opcode() == opc &&
            use->req() == req()) {
          uint j;
          for (j = 0; j < use->req(); j++) {
            if (use->in(j) != in(j)) {
              break;
            }
          }
          if (j == use->req()) {
            return use;
          }
        }
      }
    }
  }
  return NULL;
}


//--------------------------unique_ctrl_out------------------------------
// Return the unique control out if only one. Null if none or more than one.
Node* Node::unique_ctrl_out() {
  Node* found = NULL;
  for (uint i = 0; i < outcnt(); i++) {
    Node* use = raw_out(i);
    if (use->is_CFG() && use != this) {
      if (found != NULL) return NULL;
      found = use;
    }
  }
  return found;
}

//=============================================================================
//------------------------------yank-------------------------------------------
// Find and remove
void Node_List::yank( Node *n ) {
  uint i;
  for( i = 0; i < _cnt; i++ )
    if( _nodes[i] == n )
      break;

  if( i < _cnt )
    _nodes[i] = _nodes[--_cnt];
}

//------------------------------dump-------------------------------------------
void Node_List::dump() const {
#ifndef PRODUCT
  for( uint i = 0; i < _cnt; i++ )
    if( _nodes[i] ) {
      tty->print("%5d--> ",i);
      _nodes[i]->dump();
    }
#endif
}

void Node_List::dump_simple() const {
#ifndef PRODUCT
  for( uint i = 0; i < _cnt; i++ )
    if( _nodes[i] ) {
      tty->print(" %d", _nodes[i]->_idx);
    } else {
      tty->print(" NULL");
    }
#endif
}

//=============================================================================
//------------------------------remove-----------------------------------------
void Unique_Node_List::remove( Node *n ) {
  if( _in_worklist[n->_idx] ) {
    for( uint i = 0; i < size(); i++ )
      if( _nodes[i] == n ) {
        map(i,Node_List::pop());
        _in_worklist >>= n->_idx;
        return;
      }
    ShouldNotReachHere();
  }
}

//-----------------------remove_useless_nodes----------------------------------
// Remove useless nodes from worklist
void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {

  for( uint i = 0; i < size(); ++i ) {
    Node *n = at(i);
    assert( n != NULL, "Did not expect null entries in worklist");
    if( ! useful.test(n->_idx) ) {
      _in_worklist >>= n->_idx;
      map(i,Node_List::pop());
      // Node *replacement = Node_List::pop();
      // if( i != size() ) { // Check if removing last entry
      //   _nodes[i] = replacement;
      // }
      --i;  // Visit popped node
      // If it was last entry, loop terminates since size() was also reduced
    }
  }
}

//=============================================================================
void Node_Stack::grow() {
  size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
  size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
  size_t max = old_max << 1;             // max * 2
  _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
  _inode_max = _inodes + max;
  _inode_top = _inodes + old_top;        // restore _top
}

// Node_Stack is used to map nodes.
Node* Node_Stack::find(uint idx) const {
  uint sz = size();
  for (uint i=0; i < sz; i++) {
    if (idx == index_at(i) )
      return node_at(i);
  }
  return NULL;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值