sssssss73


  if (depth() == 1) {
    assert(map()->memory()->Opcode() == Op_Parm, "");
    // Insert the memory aliasing node
    set_all_memory(reset_memory());
  }
  assert(merged_memory(), "");

  // Now add the locals which are initially bound to arguments:
  uint arg_size = tf()->domain()->cnt();
  ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
  for (i = TypeFunc::Parms; i < arg_size; i++) {
    map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
  }

  // Clear out the rest of the map (locals and stack)
  for (i = arg_size; i < len; i++) {
    map()->init_req(i, top());
  }

  SafePointNode* entry_map = stop();
  return entry_map;
}

//-----------------------------do_method_entry--------------------------------
// Emit any code needed in the pseudo-block before BCI zero.
// The main thing to do is lock the receiver of a synchronized method.
void Parse::do_method_entry() {
  set_parse_bci(InvocationEntryBci); // Pseudo-BCP
  set_sp(0);                      // Java Stack Pointer

  NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )

  if (C->env()->dtrace_method_probes()) {
    make_dtrace_method_entry(method());
  }

  // If the method is synchronized, we need to construct a lock node, attach
  // it to the Start node, and pin it there.
  if (method()->is_synchronized()) {
    // Insert a FastLockNode right after the Start which takes as arguments
    // the current thread pointer, the "this" pointer & the address of the
    // stack slot pair used for the lock.  The "this" pointer is a projection
    // off the start node, but the locking spot has to be constructed by
    // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
    // becomes the second argument to the FastLockNode call.  The
    // FastLockNode becomes the new control parent to pin it to the start.

    // Setup Object Pointer
    Node *lock_obj = NULL;
    if(method()->is_static()) {
      ciInstance* mirror = _method->holder()->java_mirror();
      const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
      lock_obj = makecon(t_lock);
    } else {                  // Else pass the "this" pointer,
      lock_obj = local(0);    // which is Parm0 from StartNode
    }
    // Clear out dead values from the debug info.
    kill_dead_locals();
    // Build the FastLockNode
    _synch_lock = shared_lock(lock_obj);
  }

  // Feed profiling data for parameters to the type system so it can
  // propagate it as speculative types
  record_profiled_parameters_for_speculation();

  if (depth() == 1) {
    increment_and_test_invocation_counter(Tier2CompileThreshold);
  }
}

//------------------------------init_blocks------------------------------------
// Initialize our parser map to contain the types/monitors at method entry.
void Parse::init_blocks() {
  // Create the blocks.
  _block_count = flow()->block_count();
  _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
  Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count);

  int rpo;

  // Initialize the structs.
  for (rpo = 0; rpo < block_count(); rpo++) {
    Block* block = rpo_at(rpo);
    block->init_node(this, rpo);
  }

  // Collect predecessor and successor information.
  for (rpo = 0; rpo < block_count(); rpo++) {
    Block* block = rpo_at(rpo);
    block->init_graph(this);
  }
}

//-------------------------------init_node-------------------------------------
void Parse::Block::init_node(Parse* outer, int rpo) {
  _flow = outer->flow()->rpo_at(rpo);
  _pred_count = 0;
  _preds_parsed = 0;
  _count = 0;
  assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
  assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
  assert(_live_locals.size() == 0, "sanity");

  // entry point has additional predecessor
  if (flow()->is_start())  _pred_count++;
  assert(flow()->is_start() == (this == outer->start_block()), "");
}

//-------------------------------init_graph------------------------------------
void Parse::Block::init_graph(Parse* outer) {
  // Create the successor list for this parser block.
  GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
  GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
  int ns = tfs->length();
  int ne = tfe->length();
  _num_successors = ns;
  _all_successors = ns+ne;
  _successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne);
  int p = 0;
  for (int i = 0; i < ns+ne; i++) {
    ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
    Block* block2 = outer->rpo_at(tf2->rpo());
    _successors[i] = block2;

    // Accumulate pred info for the other block, too.
    if (i < ns) {
      block2->_pred_count++;
    } else {
      block2->_is_handler = true;
    }

    #ifdef ASSERT
    // A block's successors must be distinguishable by BCI.
    // That is, no bytecode is allowed to branch to two different
    // clones of the same code location.
    for (int j = 0; j < i; j++) {
      Block* block1 = _successors[j];
      if (block1 == block2)  continue;  // duplicates are OK
      assert(block1->start() != block2->start(), "successors have unique bcis");
    }
    #endif
  }

  // Note: We never call next_path_num along exception paths, so they
  // never get processed as "ready".  Also, the input phis of exception
  // handlers get specially processed, so that
}

//---------------------------successor_for_bci---------------------------------
Parse::Block* Parse::Block::successor_for_bci(int bci) {
  for (int i = 0; i < all_successors(); i++) {
    Block* block2 = successor_at(i);
    if (block2->start() == bci)  return block2;
  }
  // We can actually reach here if ciTypeFlow traps out a block
  // due to an unloaded class, and concurrently with compilation the
  // class is then loaded, so that a later phase of the parser is
  // able to see more of the bytecode CFG.  Or, the flow pass and
  // the parser can have a minor difference of opinion about executability
  // of bytecodes.  For example, "obj.field = null" is executable even
  // if the field's type is an unloaded class; the flow pass used to
  // make a trap for such code.
  return NULL;
}


//-----------------------------stack_type_at-----------------------------------
const Type* Parse::Block::stack_type_at(int i) const {
  return get_type(flow()->stack_type_at(i));
}


//-----------------------------local_type_at-----------------------------------
const Type* Parse::Block::local_type_at(int i) const {
  // Make dead locals fall to bottom.
  if (_live_locals.size() == 0) {
    MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
    // This bitmap can be zero length if we saw a breakpoint.
    // In such cases, pretend they are all live.
    ((Block*)this)->_live_locals = live_locals;
  }
  if (_live_locals.size() > 0 && !_live_locals.at(i))
    return Type::BOTTOM;

  return get_type(flow()->local_type_at(i));
}


#ifndef PRODUCT

//----------------------------name_for_bc--------------------------------------
// helper method for BytecodeParseHistogram
static const char* name_for_bc(int i) {
  return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
}

//----------------------------BytecodeParseHistogram------------------------------------
Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
  _parser   = p;
  _compiler = c;
  if( ! _initialized ) { _initialized = true; reset(); }
}

//----------------------------current_count------------------------------------
int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) {
  switch( bph_type ) {
  case BPH_transforms: { return _parser->gvn().made_progress(); }
  case BPH_values:     { return _parser->gvn().made_new_values(); }
  default: { ShouldNotReachHere(); return 0; }
  }
}

//----------------------------initialized--------------------------------------
bool Parse::BytecodeParseHistogram::initialized() { return _initialized; }

//----------------------------reset--------------------------------------------
void Parse::BytecodeParseHistogram::reset() {
  int i = Bytecodes::number_of_codes;
  while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; }
}

//----------------------------set_initial_state--------------------------------
// Record info when starting to parse one bytecode
void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) {
  if( PrintParseStatistics && !_parser->is_osr_parse() ) {
    _initial_bytecode    = bc;
    _initial_node_count  = _compiler->unique();
    _initial_transforms  = current_count(BPH_transforms);
    _initial_values      = current_count(BPH_values);
  }
}

//----------------------------record_change--------------------------------
// Record results of parsing one bytecode
void Parse::BytecodeParseHistogram::record_change() {
  if( PrintParseStatistics && !_parser->is_osr_parse() ) {
    ++_bytecodes_parsed[_initial_bytecode];
    _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count);
    _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms);
    _new_values        [_initial_bytecode] += (current_count(BPH_values)     - _initial_values);
  }
}


//----------------------------print--------------------------------------------
void Parse::BytecodeParseHistogram::print(float cutoff) {
  ResourceMark rm;
  // print profile
  int total  = 0;
  int i      = 0;
  for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; }
  int abs_sum = 0;
  tty->cr();   //0123456789012345678901234567890123456789012345678901234567890123456789
  tty->print_cr("Histogram of %d parsed bytecodes:", total);
  if( total == 0 ) { return; }
  tty->cr();
  tty->print_cr("absolute:  count of compiled bytecodes of this type");
  tty->print_cr("relative:  percentage contribution to compiled nodes");
  tty->print_cr("nodes   :  Average number of nodes constructed per bytecode");
  tty->print_cr("rnodes  :  Significance towards total nodes constructed, (nodes*relative)");
  tty->print_cr("transforms: Average amount of tranform progress per bytecode compiled");
  tty->print_cr("values  :  Average number of node values improved per bytecode");
  tty->print_cr("name    :  Bytecode name");
  tty->cr();
  tty->print_cr("  absolute  relative   nodes  rnodes  transforms  values   name");
  tty->print_cr("----------------------------------------------------------------------");
  while (--i > 0) {
    int       abs = _bytecodes_parsed[i];
    float     rel = abs * 100.0F / total;
    float   nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i];
    float  rnodes = _bytecodes_parsed[i] == 0 ? 0 :  rel * nodes;
    float  xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i];
    float  values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values       [i])/_bytecodes_parsed[i];
    if (cutoff <= rel) {
      tty->print_cr("%10d  %7.2f%%  %6.1f  %6.2f   %6.1f   %6.1f     %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i));
      abs_sum += abs;
    }
  }
  tty->print_cr("----------------------------------------------------------------------");
  float rel_sum = abs_sum * 100.0F / total;
  tty->print_cr("%10d  %7.2f%%    (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
  tty->print_cr("----------------------------------------------------------------------");
  tty->cr();
}
#endif

//----------------------------load_state_from----------------------------------
// Load block/map/sp.  But not do not touch iter/bci.
void Parse::load_state_from(Block* block) {
  set_block(block);
  // load the block's JVM state:
  set_map(block->start_map());
  set_sp( block->start_sp());
}


//-----------------------------record_state------------------------------------
void Parse::Block::record_state(Parse* p) {
  assert(!is_merged(), "can only record state once, on 1st inflow");
  assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
  set_start_map(p->stop());
}


//------------------------------do_one_block-----------------------------------
void Parse::do_one_block() {
  if (TraceOptoParse) {
    Block *b = block();
    int ns = b->num_successors();
    int nt = b->all_successors();

    tty->print("Parsing block #%d at bci [%d,%d), successors: ",
                  block()->rpo(), block()->start(), block()->limit());
    for (int i = 0; i < nt; i++) {
      tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo());
    }
    if (b->is_loop_head()) tty->print("  lphd");
    tty->cr();
  }

  assert(block()->is_merged(), "must be merged before being parsed");
  block()->mark_parsed();
  ++_blocks_parsed;

  // Set iterator to start of block.
  iter().reset_to_bci(block()->start());

  CompileLog* log = C->log();

  // Parse bytecodes
  while (!stopped() && !failing()) {
    iter().next();

    // Learn the current bci from the iterator:
    set_parse_bci(iter().cur_bci());

    if (bci() == block()->limit()) {
      // Do not walk into the next block until directed by do_all_blocks.
      merge(bci());
      break;
    }
    assert(bci() < block()->limit(), "bci still in block");

    if (log != NULL) {
      // Output an optional context marker, to help place actions
      // that occur during parsing of this BC.  If there is no log
      // output until the next context string, this context string
      // will be silently ignored.
      log->set_context("bc code='%d' bci='%d'", (int)bc(), bci());
    }

    if (block()->has_trap_at(bci())) {
      // We must respect the flow pass's traps, because it will refuse
      // to produce successors for trapping blocks.
      int trap_index = block()->flow()->trap_index();
      assert(trap_index != 0, "trap index must be valid");
      uncommon_trap(trap_index);
      break;
    }

    NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );

#ifdef ASSERT
    int pre_bc_sp = sp();
    int inputs, depth;
    bool have_se = !stopped() && compute_stack_effects(inputs, depth);
    assert(!have_se || pre_bc_sp >= inputs, err_msg_res("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs));
#endif //ASSERT

    do_one_bytecode();

    assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
           err_msg_res("incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth));

    do_exceptions();

    NOT_PRODUCT( parse_histogram()->record_change(); );

    if (log != NULL)
      log->clear_context();  // skip marker if nothing was printed

    // Fall into next bytecode.  Each bytecode normally has 1 sequential
    // successor which is typically made ready by visiting this bytecode.
    // If the successor has several predecessors, then it is a merge
    // point, starts a new basic block, and is handled like other basic blocks.
  }
}


//------------------------------merge------------------------------------------
void Parse::set_parse_bci(int bci) {
  set_bci(bci);
  Node_Notes* nn = C->default_node_notes();
  if (nn == NULL)  return;

  // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
  if (!DebugInlinedCalls && depth() > 1) {
    return;
  }

  // Update the JVMS annotation, if present.
  JVMState* jvms = nn->jvms();
  if (jvms != NULL && jvms->bci() != bci) {
    // Update the JVMS.
    jvms = jvms->clone_shallow(C);
    jvms->set_bci(bci);
    nn->set_jvms(jvms);
  }
}

//------------------------------merge------------------------------------------
// Merge the current mapping into the basic block starting at bci
void Parse::merge(int target_bci) {
  Block* target = successor_for_bci(target_bci);
  if (target == NULL) { handle_missing_successor(target_bci); return; }
  assert(!target->is_ready(), "our arrival must be expected");
  int pnum = target->next_path_num();
  merge_common(target, pnum);
}

//-------------------------merge_new_path--------------------------------------
// Merge the current mapping into the basic block, using a new path
void Parse::merge_new_path(int target_bci) {
  Block* target = successor_for_bci(target_bci);
  if (target == NULL) { handle_missing_successor(target_bci); return; }
  assert(!target->is_ready(), "new path into frozen graph");
  int pnum = target->add_new_path();
  merge_common(target, pnum);
}

//-------------------------merge_exception-------------------------------------
// Merge the current mapping into the basic block starting at bci
// The ex_oop must be pushed on the stack, unlike throw_to_exit.
void Parse::merge_exception(int target_bci) {
  assert(sp() == 1, "must have only the throw exception on the stack");
  Block* target = successor_for_bci(target_bci);
  if (target == NULL) { handle_missing_successor(target_bci); return; }
  assert(target->is_handler(), "exceptions are handled by special blocks");
  int pnum = target->add_new_path();
  merge_common(target, pnum);
}

//--------------------handle_missing_successor---------------------------------
void Parse::handle_missing_successor(int target_bci) {
#ifndef PRODUCT
  Block* b = block();
  int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
  tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
#endif
  ShouldNotReachHere();
}

//--------------------------merge_common---------------------------------------
void Parse::merge_common(Parse::Block* target, int pnum) {
  if (TraceOptoParse) {
    tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
  }

  // Zap extra stack slots to top
  assert(sp() == target->start_sp(), "");
  clean_stack(sp());

  if (!target->is_merged()) {   // No prior mapping at this bci
    if (TraceOptoParse) { tty->print(" with empty state");  }

    // If this path is dead, do not bother capturing it as a merge.
    // It is "as if" we had 1 fewer predecessors from the beginning.
    if (stopped()) {
      if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
      return;
    }

    // Record that a new block has been merged.
    ++_blocks_merged;

    // Make a region if we know there are multiple or unpredictable inputs.
    // (Also, if this is a plain fall-through, we might see another region,
    // which must not be allowed into this block's map.)
    if (pnum > PhiNode::Input         // Known multiple inputs.
        || target->is_handler()       // These have unpredictable inputs.
        || target->is_loop_head()     // Known multiple inputs
        || control()->is_Region()) {  // We must hide this guy.

      int current_bci = bci();
      set_parse_bci(target->start()); // Set target bci
      if (target->is_SEL_head()) {
        DEBUG_ONLY( target->mark_merged_backedge(block()); )
        if (target->start() == 0) {
          // Add loop predicate for the special case when
          // there are backbranches to the method entry.
          add_predicate();
        }
      }
      // Add a Region to start the new basic block.  Phis will be added
      // later lazily.
      int edges = target->pred_count();
      if (edges < pnum)  edges = pnum;  // might be a new path!
      RegionNode *r = new (C) RegionNode(edges+1);
      gvn().set_type(r, Type::CONTROL);
      record_for_igvn(r);
      // zap all inputs to NULL for debugging (done in Node(uint) constructor)
      // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
      r->init_req(pnum, control());
      set_control(r);
      set_parse_bci(current_bci); // Restore bci
    }

    // Convert the existing Parser mapping into a mapping at this bci.
    store_state_to(target);
    assert(target->is_merged(), "do not come here twice");

  } else {                      // Prior mapping at this bci
    if (TraceOptoParse) {  tty->print(" with previous state"); }
#ifdef ASSERT
    if (target->is_SEL_head()) {
      target->mark_merged_backedge(block());
    }
#endif
    // We must not manufacture more phis if the target is already parsed.
    bool nophi = target->is_parsed();

    SafePointNode* newin = map();// Hang on to incoming mapping
    Block* save_block = block(); // Hang on to incoming block;
    load_state_from(target);    // Get prior mapping

    assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
    assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
    assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
    assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");

    // Iterate over my current mapping and the old mapping.
    // Where different, insert Phi functions.
    // Use any existing Phi functions.
    assert(control()->is_Region(), "must be merging to a region");
    RegionNode* r = control()->as_Region();

    // Compute where to merge into
    // Merge incoming control path
    r->init_req(pnum, newin->control());

    if (pnum == 1) {            // Last merge for this Region?
      if (!block()->flow()->is_irreducible_entry()) {
        Node* result = _gvn.transform_no_reclaim(r);
        if (r != result && TraceOptoParse) {
          tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
        }
      }
      record_for_igvn(r);
    }

    // Update all the non-control inputs to map:
    assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
    bool check_elide_phi = target->is_SEL_backedge(save_block);
    for (uint j = 1; j < newin->req(); j++) {
      Node* m = map()->in(j);   // Current state of target.
      Node* n = newin->in(j);   // Incoming change to target state.
      PhiNode* phi;
      if (m->is_Phi() && m->as_Phi()->region() == r)
        phi = m->as_Phi();
      else
        phi = NULL;
      if (m != n) {             // Different; must merge
        switch (j) {
        // Frame pointer and Return Address never changes
        case TypeFunc::FramePtr:// Drop m, use the original value
        case TypeFunc::ReturnAdr:
          break;
        case TypeFunc::Memory:  // Merge inputs to the MergeMem node
          assert(phi == NULL, "the merge contains phis, not vice versa");
          merge_memory_edges(n->as_MergeMem(), pnum, nophi);
          continue;
        default:                // All normal stuff
          if (phi == NULL) {
            const JVMState* jvms = map()->jvms();
            if (EliminateNestedLocks &&
                jvms->is_mon(j) && jvms->is_monitor_box(j)) {
              // BoxLock nodes are not commoning.
              // Use old BoxLock node as merged box.
              assert(newin->jvms()->is_monitor_box(j), "sanity");
              // This assert also tests that nodes are BoxLock.
              assert(BoxLockNode::same_slot(n, m), "sanity");
              C->gvn_replace_by(n, m);
            } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
              phi = ensure_phi(j, nophi);
            }
          }
          break;
        }
      }
      // At this point, n might be top if:
      //  - there is no phi (because TypeFlow detected a conflict), or
      //  - the corresponding control edges is top (a dead incoming path)
      // It is a bug if we create a phi which sees a garbage value on a live path.

      if (phi != NULL) {
        assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
        assert(phi->region() == r, "");
        phi->set_req(pnum, n);  // Then add 'n' to the merge
        if (pnum == PhiNode::Input) {
          // Last merge for this Phi.
          // So far, Phis have had a reasonable type from ciTypeFlow.
          // Now _gvn will join that with the meet of current inputs.
          // BOTTOM is never permissible here, 'cause pessimistically
          // Phis of pointers cannot lose the basic pointer type.
          debug_only(const Type* bt1 = phi->bottom_type());
          assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
          map()->set_req(j, _gvn.transform_no_reclaim(phi));
          debug_only(const Type* bt2 = phi->bottom_type());
          assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
          record_for_igvn(phi);
        }
      }
    } // End of for all values to be merged

    if (pnum == PhiNode::Input &&
        !r->in(0)) {         // The occasional useless Region
      assert(control() == r, "");
      set_control(r->nonnull_req());
    }

    map()->merge_replaced_nodes_with(newin);

    // newin has been subsumed into the lazy merge, and is now dead.
    set_block(save_block);

    stop();                     // done with this guy, for now
  }

  if (TraceOptoParse) {
    tty->print_cr(" on path %d", pnum);
  }

  // Done with this parser state.
  assert(stopped(), "");
}


//--------------------------merge_memory_edges---------------------------------
void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
  // (nophi means we must not create phis, because we already parsed here)
  assert(n != NULL, "");
  // Merge the inputs to the MergeMems
  MergeMemNode* m = merged_memory();

  assert(control()->is_Region(), "must be merging to a region");
  RegionNode* r = control()->as_Region();

  PhiNode* base = NULL;
  MergeMemNode* remerge = NULL;
  for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
    Node *p = mms.force_memory();
    Node *q = mms.memory2();
    if (mms.is_empty() && nophi) {
      // Trouble:  No new splits allowed after a loop body is parsed.
      // Instead, wire the new split into a MergeMem on the backedge.
      // The optimizer will sort it out, slicing the phi.
      if (remerge == NULL) {
        assert(base != NULL, "");
        assert(base->in(0) != NULL, "should not be xformed away");
        remerge = MergeMemNode::make(C, base->in(pnum));
        gvn().set_type(remerge, Type::MEMORY);
        base->set_req(pnum, remerge);
      }
      remerge->set_memory_at(mms.alias_idx(), q);
      continue;
    }
    assert(!q->is_MergeMem(), "");
    PhiNode* phi;
    if (p != q) {
      phi = ensure_memory_phi(mms.alias_idx(), nophi);
    } else {
      if (p->is_Phi() && p->as_Phi()->region() == r)
        phi = p->as_Phi();
      else
        phi = NULL;
    }
    // Insert q into local phi
    if (phi != NULL) {
      assert(phi->region() == r, "");
      p = phi;
      phi->set_req(pnum, q);
      if (mms.at_base_memory()) {
        base = phi;  // delay transforming it
      } else if (pnum == 1) {
        record_for_igvn(phi);
        p = _gvn.transform_no_reclaim(phi);
      }
      mms.set_memory(p);// store back through the iterator
    }
  }
  // Transform base last, in case we must fiddle with remerging.
  if (base != NULL && pnum == 1) {
    record_for_igvn(base);
    m->set_base_memory( _gvn.transform_no_reclaim(base) );
  }
}


//------------------------ensure_phis_everywhere-------------------------------
void Parse::ensure_phis_everywhere() {
  ensure_phi(TypeFunc::I_O);

  // Ensure a phi on all currently known memories.
  for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
    ensure_memory_phi(mms.alias_idx());
    debug_only(mms.set_memory());  // keep the iterator happy
  }

  // Note:  This is our only chance to create phis for memory slices.
  // If we miss a slice that crops up later, it will have to be
  // merged into the base-memory phi that we are building here.
  // Later, the optimizer will comb out the knot, and build separate
  // phi-loops for each memory slice that matters.

  // Monitors must nest nicely and not get confused amongst themselves.
  // Phi-ify everything up to the monitors, though.
  uint monoff = map()->jvms()->monoff();
  uint nof_monitors = map()->jvms()->nof_monitors();

  assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
  bool check_elide_phi = block()->is_SEL_head();
  for (uint i = TypeFunc::Parms; i < monoff; i++) {
    if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) {
      ensure_phi(i);
    }
  }

  // Even monitors need Phis, though they are well-structured.
  // This is true for OSR methods, and also for the rare cases where
  // a monitor object is the subject of a replace_in_map operation.
  // See bugs 4426707 and 5043395.
  for (uint m = 0; m < nof_monitors; m++) {
    ensure_phi(map()->jvms()->monitor_obj_offset(m));
  }
}


//-----------------------------add_new_path------------------------------------
// Add a previously unaccounted predecessor to this block.
int Parse::Block::add_new_path() {
  // If there is no map, return the lowest unused path number.
  if (!is_merged())  return pred_count()+1;  // there will be a map shortly

  SafePointNode* map = start_map();
  if (!map->control()->is_Region())
    return pred_count()+1;  // there may be a region some day
  RegionNode* r = map->control()->as_Region();

  // Add new path to the region.
  uint pnum = r->req();
  r->add_req(NULL);

  for (uint i = 1; i < map->req(); i++) {
    Node* n = map->in(i);
    if (i == TypeFunc::Memory) {
      // Ensure a phi on all currently known memories.
      for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
        Node* phi = mms.memory();
        if (phi->is_Phi() && phi->as_Phi()->region() == r) {
          assert(phi->req() == pnum, "must be same size as region");
          phi->add_req(NULL);
        }
      }
    } else {
      if (n->is_Phi() && n->as_Phi()->region() == r) {
        assert(n->req() == pnum, "must be same size as region");
        n->add_req(NULL);
      }
    }
  }

  return pnum;
}

//------------------------------ensure_phi-------------------------------------
// Turn the idx'th entry of the current map into a Phi
PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
  SafePointNode* map = this->map();
  Node* region = map->control();
  assert(region->is_Region(), "");

  Node* o = map->in(idx);
  assert(o != NULL, "");

  if (o == top())  return NULL; // TOP always merges into TOP

  if (o->is_Phi() && o->as_Phi()->region() == region) {
    return o->as_Phi();
  }

  // Now use a Phi here for merging
  assert(!nocreate, "Cannot build a phi for a block already parsed.");
  const JVMState* jvms = map->jvms();
  const Type* t = NULL;
  if (jvms->is_loc(idx)) {
    t = block()->local_type_at(idx - jvms->locoff());
  } else if (jvms->is_stk(idx)) {
    t = block()->stack_type_at(idx - jvms->stkoff());
  } else if (jvms->is_mon(idx)) {
    assert(!jvms->is_monitor_box(idx), "no phis for boxes");
    t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
  } else if ((uint)idx < TypeFunc::Parms) {
    t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
  } else {
    assert(false, "no type information for this phi");
  }

  // If the type falls to bottom, then this must be a local that
  // is mixing ints and oops or some such.  Forcing it to top
  // makes it go dead.
  if (t == Type::BOTTOM) {
    map->set_req(idx, top());
    return NULL;
  }

  // Do not create phis for top either.
  // A top on a non-null control flow must be an unused even after the.phi.
  if (t == Type::TOP || t == Type::HALF) {
    map->set_req(idx, top());
    return NULL;
  }

  PhiNode* phi = PhiNode::make(region, o, t);
  gvn().set_type(phi, t);
  if (C->do_escape_analysis()) record_for_igvn(phi);
  map->set_req(idx, phi);
  return phi;
}

//--------------------------ensure_memory_phi----------------------------------
// Turn the idx'th slice of the current memory into a Phi
PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
  MergeMemNode* mem = merged_memory();
  Node* region = control();
  assert(region->is_Region(), "");

  Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
  assert(o != NULL && o != top(), "");

  PhiNode* phi;
  if (o->is_Phi() && o->as_Phi()->region() == region) {
    phi = o->as_Phi();
    if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
      // clone the shared base memory phi to make a new memory split
      assert(!nocreate, "Cannot build a phi for a block already parsed.");
      const Type* t = phi->bottom_type();
      const TypePtr* adr_type = C->get_adr_type(idx);
      phi = phi->slice_memory(adr_type);
      gvn().set_type(phi, t);
    }
    return phi;
  }

  // Now use a Phi here for merging
  assert(!nocreate, "Cannot build a phi for a block already parsed.");
  const Type* t = o->bottom_type();
  const TypePtr* adr_type = C->get_adr_type(idx);
  phi = PhiNode::make(region, o, t, adr_type);
  gvn().set_type(phi, t);
  if (idx == Compile::AliasIdxBot)
    mem->set_base_memory(phi);
  else
    mem->set_memory_at(idx, phi);
  return phi;
}

//------------------------------call_register_finalizer-----------------------
// Check the klass of the receiver and call register_finalizer if the
// class need finalization.
void Parse::call_register_finalizer() {
  Node* receiver = local(0);
  assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL,
         "must have non-null instance type");

  const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
  if (tinst != NULL && tinst->klass()->is_loaded() && !tinst->klass_is_exact()) {
    // The type isn't known exactly so see if CHA tells us anything.
    ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
    if (!Dependencies::has_finalizable_subclass(ik)) {
      // No finalizable subclasses so skip the dynamic check.
      C->dependencies()->assert_has_no_finalizable_subclasses(ik);
      return;
    }
  }

  // Insert a dynamic test for whether the instance needs
  // finalization.  In general this will fold up since the concrete
  // class is often visible so the access flags are constant.
  Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
  Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));

  Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
  Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);

  Node* mask  = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
  Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
  Node* test  = _gvn.transform(new (C) BoolNode(check, BoolTest::ne));

  IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);

  RegionNode* result_rgn = new (C) RegionNode(3);
  record_for_igvn(result_rgn);

  Node *skip_register = _gvn.transform(new (C) IfFalseNode(iff));
  result_rgn->init_req(1, skip_register);

  Node *needs_register = _gvn.transform(new (C) IfTrueNode(iff));
  set_control(needs_register);
  if (stopped()) {
    // There is no slow path.
    result_rgn->init_req(2, top());
  } else {
    Node *call = make_runtime_call(RC_NO_LEAF,
                                   OptoRuntime::register_finalizer_Type(),
                                   OptoRuntime::register_finalizer_Java(),
                                   NULL, TypePtr::BOTTOM,
                                   receiver);
    make_slow_call_ex(call, env()->Throwable_klass(), true);

    Node* fast_io  = call->in(TypeFunc::I_O);
    Node* fast_mem = call->in(TypeFunc::Memory);
    // These two phis are pre-filled with copies of of the fast IO and Memory
    Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
    Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);

    result_rgn->init_req(2, control());
    io_phi    ->init_req(2, i_o());
    mem_phi   ->init_req(2, reset_memory());

    set_all_memory( _gvn.transform(mem_phi) );
    set_i_o(        _gvn.transform(io_phi) );
  }

  set_control( _gvn.transform(result_rgn) );
}

// Add check to deoptimize if RTM state is not ProfileRTM
void Parse::rtm_deopt() {
#if INCLUDE_RTM_OPT
  if (C->profile_rtm()) {
    assert(C->method() != NULL, "only for normal compilations");
    assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
    assert(depth() == 1, "generate check only for main compiled method");

    // Set starting bci for uncommon trap.
    set_parse_bci(is_osr_parse() ? osr_bci() : 0);

    // Load the rtm_state from the MethodData.
    const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
    Node* mdo = makecon(adr_type);
    int offset = MethodData::rtm_state_offset_in_bytes();
    Node* adr_node = basic_plus_adr(mdo, mdo, offset);
    Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);

    // Separate Load from Cmp by Opaque.
    // In expand_macro_nodes() it will be replaced either
    // with this load when there are locks in the code
    // or with ProfileRTM (cmp->in(2)) otherwise so that
    // the check will fold.
    Node* profile_state = makecon(TypeInt::make(ProfileRTM));
    Node* opq   = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) );
    Node* chk   = _gvn.transform( new (C) CmpINode(opq, profile_state) );
    Node* tst   = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
    // Branch to failure if state was changed
    { BuildCutout unless(this, tst, PROB_ALWAYS);
      uncommon_trap(Deoptimization::Reason_rtm_state_change,
                    Deoptimization::Action_make_not_entrant);
    }
  }
#endif
}

//------------------------------return_current---------------------------------
// Append current _map to _exit_return
void Parse::return_current(Node* value) {
  if (RegisterFinalizersAtInit &&
      method()->intrinsic_id() == vmIntrinsics::_Object_init) {
    call_register_finalizer();
  }

  // Do not set_parse_bci, so that return goo is credited to the return insn.
  set_bci(InvocationEntryBci);
  if (method()->is_synchronized() && GenerateSynchronizationCode) {
    shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
  }
  if (C->env()->dtrace_method_probes()) {
    make_dtrace_method_exit(method());
  }
  SafePointNode* exit_return = _exits.map();
  exit_return->in( TypeFunc::Control  )->add_req( control() );
  exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
  Node *mem = exit_return->in( TypeFunc::Memory   );
  for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
    if (mms.is_empty()) {
      // get a copy of the base memory, and patch just this one input
      const TypePtr* adr_type = mms.adr_type(C);
      Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
      assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
      gvn().set_type_bottom(phi);
      phi->del_req(phi->req()-1);  // prepare to re-patch
      mms.set_memory(phi);
    }
    mms.memory()->add_req(mms.memory2());
  }

  // frame pointer is always same, already captured
  if (value != NULL) {
    // If returning oops to an interface-return, there is a silent free
    // cast from oop to interface allowed by the Verifier.  Make it explicit
    // here.
    Node* phi = _exits.argument(0);
    const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
    if (tr && tr->klass()->is_loaded() &&
        tr->klass()->is_interface()) {
      const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
      if (tp && tp->klass()->is_loaded() &&
          !tp->klass()->is_interface()) {
        // sharpen the type eagerly; this eases certain assert checking
        if (tp->higher_equal(TypeInstPtr::NOTNULL))
          tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
        value = _gvn.transform(new (C) CheckCastPPNode(0, value, tr));
      }
    } else {
      // Also handle returns of oop-arrays to an arrays-of-interface return
      const TypeInstPtr* phi_tip;
      const TypeInstPtr* val_tip;
      Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip);
      if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
          val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
         value = _gvn.transform(new (C) CheckCastPPNode(0, value, phi->bottom_type()));
      }
    }
    phi->add_req(value);
  }

  if (_first_return) {
    _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
    _first_return = false;
  } else {
    _exits.map()->merge_replaced_nodes_with(map());
  }

  stop_and_kill_map();          // This CFG path dies here
}


//------------------------------add_safepoint----------------------------------
void Parse::add_safepoint() {
  // See if we can avoid this safepoint.  No need for a SafePoint immediately
  // after a Call (except Leaf Call) or another SafePoint.
  Node *proj = control();
  bool add_poll_param = SafePointNode::needs_polling_address_input();
  uint parms = add_poll_param ? TypeFunc::Parms+1 : TypeFunc::Parms;
  if( proj->is_Proj() ) {
    Node *n0 = proj->in(0);
    if( n0->is_Catch() ) {
      n0 = n0->in(0)->in(0);
      assert( n0->is_Call(), "expect a call here" );
    }
    if( n0->is_Call() ) {
      if( n0->as_Call()->guaranteed_safepoint() )
        return;
    } else if( n0->is_SafePoint() && n0->req() >= parms ) {
      return;
    }
  }

  // Clear out dead values from the debug info.
  kill_dead_locals();

  // Clone the JVM State
  SafePointNode *sfpnt = new (C) SafePointNode(parms, NULL);

  // Capture memory state BEFORE a SafePoint.  Since we can block at a
  // SafePoint we need our GC state to be safe; i.e. we need all our current
  // write barriers (card marks) to not float down after the SafePoint so we
  // must read raw memory.  Likewise we need all oop stores to match the card
  // marks.  If deopt can happen, we need ALL stores (we need the correct JVM
  // state on a deopt).

  // We do not need to WRITE the memory state after a SafePoint.  The control
  // edge will keep card-marks and oop-stores from floating up from below a
  // SafePoint and our true dependency added here will keep them from floating
  // down below a SafePoint.

  // Clone the current memory state
  Node* mem = MergeMemNode::make(C, map()->memory());

  mem = _gvn.transform(mem);

  // Pass control through the safepoint
  sfpnt->init_req(TypeFunc::Control  , control());
  // Fix edges normally used by a call
  sfpnt->init_req(TypeFunc::I_O      , top() );
  sfpnt->init_req(TypeFunc::Memory   , mem   );
  sfpnt->init_req(TypeFunc::ReturnAdr, top() );
  sfpnt->init_req(TypeFunc::FramePtr , top() );

  // Create a node for the polling address
  if( add_poll_param ) {
    Node *polladr = ConPNode::make(C, (address)os::get_polling_page());
    sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
  }

  // Fix up the JVM State edges
  add_safepoint_edges(sfpnt);
  Node *transformed_sfpnt = _gvn.transform(sfpnt);
  set_control(transformed_sfpnt);

  // Provide an edge from root to safepoint.  This makes the safepoint
  // appear useful until the parse has completed.
  if( OptoRemoveUseless && transformed_sfpnt->is_SafePoint() ) {
    assert(C->root() != NULL, "Expect parse is still valid");
    C->root()->add_prec(transformed_sfpnt);
  }
}

#ifndef PRODUCT
//------------------------show_parse_info--------------------------------------
void Parse::show_parse_info() {
  InlineTree* ilt = NULL;
  if (C->ilt() != NULL) {
    JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller();
    ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method());
  }
  if (PrintCompilation && Verbose) {
    if (depth() == 1) {
      if( ilt->count_inlines() ) {
        tty->print("    __inlined %d (%d bytes)", ilt->count_inlines(),
                     ilt->count_inline_bcs());
        tty->cr();
      }
    } else {
      if (method()->is_synchronized())         tty->print("s");
      if (method()->has_exception_handlers())  tty->print("!");
      // Check this is not the final compiled version
      if (C->trap_can_recompile()) {
        tty->print("-");
      } else {
        tty->print(" ");
      }
      method()->print_short_name();
      if (is_osr_parse()) {
        tty->print(" @ %d", osr_bci());
      }
      tty->print(" (%d bytes)",method()->code_size());
      if (ilt->count_inlines()) {
        tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
                   ilt->count_inline_bcs());
      }
      tty->cr();
    }
  }
  if (PrintOpto && (depth() == 1 || PrintOptoInlining)) {
    // Print that we succeeded; suppress this message on the first osr parse.

    if (method()->is_synchronized())         tty->print("s");
    if (method()->has_exception_handlers())  tty->print("!");
    // Check this is not the final compiled version
    if (C->trap_can_recompile() && depth() == 1) {
      tty->print("-");
    } else {
      tty->print(" ");
    }
    if( depth() != 1 ) { tty->print("   "); }  // missing compile count
    for (int i = 1; i < depth(); ++i) { tty->print("  "); }
    method()->print_short_name();
    if (is_osr_parse()) {
      tty->print(" @ %d", osr_bci());
    }
    if (ilt->caller_bci() != -1) {
      tty->print(" @ %d", ilt->caller_bci());
    }
    tty->print(" (%d bytes)",method()->code_size());
    if (ilt->count_inlines()) {
      tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
                 ilt->count_inline_bcs());
    }
    tty->cr();
  }
}


//------------------------------dump-------------------------------------------
// Dump information associated with the bytecodes of current _method
void Parse::dump() {
  if( method() != NULL ) {
    // Iterate over bytecodes
    ciBytecodeStream iter(method());
    for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) {
      dump_bci( iter.cur_bci() );
      tty->cr();
    }
  }
}

// Dump information associated with a byte code index, 'bci'
void Parse::dump_bci(int bci) {
  // Output info on merge-points, cloning, and within _jsr..._ret
  // NYI
  tty->print(" bci:%d", bci);
}

#endif
C:\hotspot-69087d08d473\src\share\vm/opto/parse2.cpp
/*
 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "ci/ciMethodData.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/universe.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/divnode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/parse.hpp"
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"

extern int explicit_null_checks_inserted,
           explicit_null_checks_elided;

//---------------------------------array_load----------------------------------
void Parse::array_load(BasicType elem_type) {
  const Type* elem = Type::TOP;
  Node* adr = array_addressing(elem_type, 0, &elem);
  if (stopped())  return;     // guaranteed null or range check
  dec_sp(2);                  // Pop array and index
  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
  Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered);
  push(ld);
}


//--------------------------------array_store----------------------------------
void Parse::array_store(BasicType elem_type) {
  const Type* elem = Type::TOP;
  Node* adr = array_addressing(elem_type, 1, &elem);
  if (stopped())  return;     // guaranteed null or range check
  Node* val = pop();
  dec_sp(2);                  // Pop array and index
  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
  if (elem == TypeInt::BOOL) {
    elem_type = T_BOOLEAN;
  }
  store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type));
}


//------------------------------array_addressing-------------------------------
// Pull array and index from the stack.  Compute pointer-to-element.
Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
  Node *idx   = peek(0+vals);   // Get from stack without popping
  Node *ary   = peek(1+vals);   // in case of exception

  // Null check the array base, with correct stack contents
  ary = null_check(ary, T_ARRAY);
  // Compile-time detect of null-exception?
  if (stopped())  return top();

  const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
  const TypeInt*    sizetype = arytype->size();
  const Type*       elemtype = arytype->elem();

  if (UseUniqueSubclasses && result2 != NULL) {
    const Type* el = elemtype->make_ptr();
    if (el && el->isa_instptr()) {
      const TypeInstPtr* toop = el->is_instptr();
      if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
        // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
        const Type* subklass = Type::get_const_type(toop->klass());
        elemtype = subklass->join_speculative(el);
      }
    }
  }

  // Check for big class initializers with all constant offsets
  // feeding into a known-size array.
  const TypeInt* idxtype = _gvn.type(idx)->is_int();
  // See if the highest idx value is less than the lowest array bound,
  // and if the idx value cannot be negative:
  bool need_range_check = true;
  if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
    need_range_check = false;
    if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
  }

  ciKlass * arytype_klass = arytype->klass();
  if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
    // Only fails for some -Xcomp runs
    // The class is unloaded.  We have to run this bytecode in the interpreter.
    uncommon_trap(Deoptimization::Reason_unloaded,
                  Deoptimization::Action_reinterpret,
                  arytype->klass(), "!loaded array");
    return top();
  }

  // Do the range check
  if (GenerateRangeChecks && need_range_check) {
    Node* tst;
    if (sizetype->_hi <= 0) {
      // The greatest array bound is negative, so we can conclude that we're
      // compiling unreachable code, but the unsigned compare trick used below
      // only works with non-negative lengths.  Instead, hack "tst" to be zero so
      // the uncommon_trap path will always be taken.
      tst = _gvn.intcon(0);
    } else {
      // Range is constant in array-oop, so we can use the original state of mem
      Node* len = load_array_length(ary);

      // Test length vs index (standard trick using unsigned compare)
      Node* chk = _gvn.transform( new (C) CmpUNode(idx, len) );
      BoolTest::mask btest = BoolTest::lt;
      tst = _gvn.transform( new (C) BoolNode(chk, btest) );
    }
    // Branch to failure if out of bounds
    { BuildCutout unless(this, tst, PROB_MAX);
      if (C->allow_range_check_smearing()) {
        // Do not use builtin_throw, since range checks are sometimes
        // made more stringent by an optimistic transformation.
        // This creates "tentative" range checks at this point,
        // which are not guaranteed to throw exceptions.
        // See IfNode::Ideal, is_range_check, adjust_check.
        uncommon_trap(Deoptimization::Reason_range_check,
                      Deoptimization::Action_make_not_entrant,
                      NULL, "range_check");
      } else {
        // If we have already recompiled with the range-check-widening
        // heroic optimization turned off, then we must really be throwing
        // range check exceptions.
        builtin_throw(Deoptimization::Reason_range_check, idx);
      }
    }
  }
  // Check for always knowing you are throwing a range-check exception
  if (stopped())  return top();

  // Make array address computation control dependent to prevent it
  // from floating above the range check during loop optimizations.
  Node* ptr = array_element_address(ary, idx, type, sizetype, control());

  if (result2 != NULL)  *result2 = elemtype;

  assert(ptr != top(), "top should go hand-in-hand with stopped");

  return ptr;
}


// returns IfNode
IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
  Node   *cmp = _gvn.transform( new (C) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
  Node   *tst = _gvn.transform( new (C) BoolNode( cmp, mask));
  IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
  return iff;
}

// return Region node
Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
  Node *region  = new (C) RegionNode(3); // 2 results
  record_for_igvn(region);
  region->init_req(1, iffalse);
  region->init_req(2, iftrue );
  _gvn.set_type(region, Type::CONTROL);
  region = _gvn.transform(region);
  set_control (region);
  return region;
}


//------------------------------helper for tableswitch-------------------------
void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
  // True branch, use existing map info
  { PreserveJVMState pjvms(this);
    Node *iftrue  = _gvn.transform( new (C) IfTrueNode (iff) );
    set_control( iftrue );
    profile_switch_case(prof_table_index);
    merge_new_path(dest_bci_if_true);
  }

  // False branch
  Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff) );
  set_control( iffalse );
}

void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
  // True branch, use existing map info
  { PreserveJVMState pjvms(this);
    Node *iffalse  = _gvn.transform( new (C) IfFalseNode (iff) );
    set_control( iffalse );
    profile_switch_case(prof_table_index);
    merge_new_path(dest_bci_if_true);
  }

  // False branch
  Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff) );
  set_control( iftrue );
}

void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
  // False branch, use existing map and control()
  profile_switch_case(prof_table_index);
  merge_new_path(dest_bci);
}


extern "C" {
  static int jint_cmp(const void *i, const void *j) {
    int a = *(jint *)i;
    int b = *(jint *)j;
    return a > b ? 1 : a < b ? -1 : 0;
  }
}


// Default value for methodData switch indexing. Must be a negative value to avoid
// conflict with any legal switch index.
#define NullTableIndex -1

class SwitchRange : public StackObj {
  // a range of integers coupled with a bci destination
  jint _lo;                     // inclusive lower limit
  jint _hi;                     // inclusive upper limit
  int _dest;
  int _table_index;             // index into method data table

public:
  jint lo() const              { return _lo;   }
  jint hi() const              { return _hi;   }
  int  dest() const            { return _dest; }
  int  table_index() const     { return _table_index; }
  bool is_singleton() const    { return _lo == _hi; }

  void setRange(jint lo, jint hi, int dest, int table_index) {
    assert(lo <= hi, "must be a non-empty range");
    _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
  }
  bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
    assert(lo <= hi, "must be a non-empty range");
    if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
      _hi = hi;
      return true;
    }
    return false;
  }

  void set (jint value, int dest, int table_index) {
    setRange(value, value, dest, table_index);
  }
  bool adjoin(jint value, int dest, int table_index) {
    return adjoinRange(value, value, dest, table_index);
  }

  void print() {
    if (is_singleton())
      tty->print(" {%d}=>%d", lo(), dest());
    else if (lo() == min_jint)
      tty->print(" {..%d}=>%d", hi(), dest());
    else if (hi() == max_jint)
      tty->print(" {%d..}=>%d", lo(), dest());
    else
      tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
  }
};


//-------------------------------do_tableswitch--------------------------------
void Parse::do_tableswitch() {
  Node* lookup = pop();

  // Get information about tableswitch
  int default_dest = iter().get_dest_table(0);
  int lo_index     = iter().get_int_table(1);
  int hi_index     = iter().get_int_table(2);
  int len          = hi_index - lo_index + 1;

  if (len < 1) {
    // If this is a backward branch, add safepoint
    maybe_add_safepoint(default_dest);
    merge(default_dest);
    return;
  }

  // generate decision tree, using trichotomy when possible
  int rnum = len+2;
  bool makes_backward_branch = false;
  SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
  int rp = -1;
  if (lo_index != min_jint) {
    ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
  }
  for (int j = 0; j < len; j++) {
    jint match_int = lo_index+j;
    int  dest      = iter().get_dest_table(j+3);
    makes_backward_branch |= (dest <= bci());
    int  table_index = method_data_update() ? j : NullTableIndex;
    if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
      ranges[++rp].set(match_int, dest, table_index);
    }
  }
  jint highest = lo_index+(len-1);
  assert(ranges[rp].hi() == highest, "");
  if (highest != max_jint
      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
  }
  assert(rp < len+2, "not too many ranges");

  // Safepoint in case if backward branch observed
  if( makes_backward_branch && UseLoopSafepoints )
    add_safepoint();

  jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
}


//------------------------------do_lookupswitch--------------------------------
void Parse::do_lookupswitch() {
  Node *lookup = pop();         // lookup value
  // Get information about lookupswitch
  int default_dest = iter().get_dest_table(0);
  int len          = iter().get_int_table(1);

  if (len < 1) {    // If this is a backward branch, add safepoint
    maybe_add_safepoint(default_dest);
    merge(default_dest);
    return;
  }

  // generate decision tree, using trichotomy when possible
  jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
  {
    for( int j = 0; j < len; j++ ) {
      table[j+j+0] = iter().get_int_table(2+j+j);
      table[j+j+1] = iter().get_dest_table(2+j+j+1);
    }
    qsort( table, len, 2*sizeof(table[0]), jint_cmp );
  }

  int rnum = len*2+1;
  bool makes_backward_branch = false;
  SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
  int rp = -1;
  for( int j = 0; j < len; j++ ) {
    jint match_int   = table[j+j+0];
    int  dest        = table[j+j+1];
    int  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
    int  table_index = method_data_update() ? j : NullTableIndex;
    makes_backward_branch |= (dest <= bci());
    if( match_int != next_lo ) {
      ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
    }
    if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
      ranges[++rp].set(match_int, dest, table_index);
    }
  }
  jint highest = table[2*(len-1)];
  assert(ranges[rp].hi() == highest, "");
  if( highest != max_jint
      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
  }
  assert(rp < rnum, "not too many ranges");

  // Safepoint in case backward branch observed
  if( makes_backward_branch && UseLoopSafepoints )
    add_safepoint();

  jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
}

//----------------------------create_jump_tables-------------------------------
bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
  // Are jumptables enabled
  if (!UseJumpTables)  return false;

  // Are jumptables supported
  if (!Matcher::has_match_rule(Op_Jump))  return false;

  // Don't make jump table if profiling
  if (method_data_update())  return false;

  // Decide if a guard is needed to lop off big ranges at either (or
  // both) end(s) of the input set. We'll call this the default target
  // even though we can't be sure that it is the true "default".

  bool needs_guard = false;
  int default_dest;
  int64 total_outlier_size = 0;
  int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
  int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;

  if (lo->dest() == hi->dest()) {
    total_outlier_size = hi_size + lo_size;
    default_dest = lo->dest();
  } else if (lo_size > hi_size) {
    total_outlier_size = lo_size;
    default_dest = lo->dest();
  } else {
    total_outlier_size = hi_size;
    default_dest = hi->dest();
  }

  // If a guard test will eliminate very sparse end ranges, then
  // it is worth the cost of an extra jump.
  if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
    needs_guard = true;
    if (default_dest == lo->dest()) lo++;
    if (default_dest == hi->dest()) hi--;
  }

  // Find the total number of cases and ranges
  int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
  int num_range = hi - lo + 1;

  // Don't create table if: too large, too small, or too sparse.
  if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
    return false;
  if (num_cases > (MaxJumpTableSparseness * num_range))
    return false;

  // Normalize table lookups to zero
  int lowval = lo->lo();
  key_val = _gvn.transform( new (C) SubINode(key_val, _gvn.intcon(lowval)) );

  // Generate a guard to protect against input keyvals that aren't
  // in the switch domain.
  if (needs_guard) {
    Node*   size = _gvn.intcon(num_cases);
    Node*   cmp = _gvn.transform( new (C) CmpUNode(key_val, size) );
    Node*   tst = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ge) );
    IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
    jump_if_true_fork(iff, default_dest, NullTableIndex);
  }

  // Create an ideal node JumpTable that has projections
  // of all possible ranges for a switch statement
  // The key_val input must be converted to a pointer offset and scaled.
  // Compare Parse::array_addressing above.
#ifdef _LP64
  // Clean the 32-bit int into a real 64-bit offset.
  // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
  const TypeInt* ikeytype = TypeInt::make(0, num_cases-1, Type::WidenMin);
  // Make I2L conversion control dependent to prevent it from
  // floating above the range check during loop optimizations.
  key_val = C->constrained_convI2L(&_gvn, key_val, ikeytype, control());
#endif

  // Shift the value by wordsize so we have an index into the table, rather
  // than a switch value
  Node *shiftWord = _gvn.MakeConX(wordSize);
  key_val = _gvn.transform( new (C) MulXNode( key_val, shiftWord));

  // Create the JumpNode
  Node* jtn = _gvn.transform( new (C) JumpNode(control(), key_val, num_cases) );

  // These are the switch destinations hanging off the jumpnode
  int i = 0;
  for (SwitchRange* r = lo; r <= hi; r++) {
    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
      {
        PreserveJVMState pjvms(this);
        set_control(input);
        jump_if_always_fork(r->dest(), r->table_index());
      }
    }
  }
  assert(i == num_cases, "miscount of cases");
  stop_and_kill_map();  // no more uses for this JVMS
  return true;
}

//----------------------------jump_switch_ranges-------------------------------
void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
  Block* switch_block = block();

  if (switch_depth == 0) {
    // Do special processing for the top-level call.
    assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
    assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");

    // Decrement pred-numbers for the unique set of nodes.
#ifdef ASSERT
    // Ensure that the block's successors are a (duplicate-free) set.
    int successors_counted = 0;  // block occurrences in [hi..lo]
    int unique_successors = switch_block->num_successors();
    for (int i = 0; i < unique_successors; i++) {
      Block* target = switch_block->successor_at(i);

      // Check that the set of successors is the same in both places.
      int successors_found = 0;
      for (SwitchRange* p = lo; p <= hi; p++) {
        if (p->dest() == target->start())  successors_found++;
      }
      assert(successors_found > 0, "successor must be known");
      successors_counted += successors_found;
    }
    assert(successors_counted == (hi-lo)+1, "no unexpected successors");
#endif

    // Maybe prune the inputs, based on the type of key_val.
    jint min_val = min_jint;
    jint max_val = max_jint;
    const TypeInt* ti = key_val->bottom_type()->isa_int();
    if (ti != NULL) {
      min_val = ti->_lo;
      max_val = ti->_hi;
      assert(min_val <= max_val, "invalid int type");
    }
    while (lo->hi() < min_val)  lo++;
    if (lo->lo() < min_val)  lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
    while (hi->lo() > max_val)  hi--;
    if (hi->hi() > max_val)  hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
  }

#ifndef PRODUCT
  if (switch_depth == 0) {
    _max_switch_depth = 0;
    _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
  }
#endif

  assert(lo <= hi, "must be a non-empty set of ranges");
  if (lo == hi) {
    jump_if_always_fork(lo->dest(), lo->table_index());
  } else {
    assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
    assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");

    if (create_jump_tables(key_val, lo, hi)) return;

    int nr = hi - lo + 1;

    SwitchRange* mid = lo + nr/2;
    // if there is an easy choice, pivot at a singleton:
    if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;

    assert(lo < mid && mid <= hi, "good pivot choice");
    assert(nr != 2 || mid == hi,   "should pick higher of 2");
    assert(nr != 3 || mid == hi-1, "should pick middle of 3");

    Node *test_val = _gvn.intcon(mid->lo());

    if (mid->is_singleton()) {
      IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
      jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());

      // Special Case:  If there are exactly three ranges, and the high
      // and low range each go to the same place, omit the "gt" test,
      // since it will not discriminate anything.
      bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
      if (eq_test_only) {
        assert(mid == hi-1, "");
      }

      // if there is a higher range, test for it and process it:
      if (mid < hi && !eq_test_only) {
        // two comparisons of same values--should enable 1 test for 2 branches
        // Use BoolTest::le instead of BoolTest::gt
        IfNode *iff_le  = jump_if_fork_int(key_val, test_val, BoolTest::le);
        Node   *iftrue  = _gvn.transform( new (C) IfTrueNode(iff_le) );
        Node   *iffalse = _gvn.transform( new (C) IfFalseNode(iff_le) );
        { PreserveJVMState pjvms(this);
          set_control(iffalse);
          jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
        }
        set_control(iftrue);
      }

    } else {
      // mid is a range, not a singleton, so treat mid..hi as a unit
      IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);

      // if there is a higher range, test for it and process it:
      if (mid == hi) {
        jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
      } else {
        Node *iftrue  = _gvn.transform( new (C) IfTrueNode(iff_ge) );
        Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff_ge) );
        { PreserveJVMState pjvms(this);
          set_control(iftrue);
          jump_switch_ranges(key_val, mid, hi, switch_depth+1);
        }
        set_control(iffalse);
      }
    }

    // in any case, process the lower range
    jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
  }

  // Decrease pred_count for each successor after all is done.
  if (switch_depth == 0) {
    int unique_successors = switch_block->num_successors();
    for (int i = 0; i < unique_successors; i++) {
      Block* target = switch_block->successor_at(i);
      // Throw away the pre-allocated path for each unique successor.
      target->next_path_num();
    }
  }

#ifndef PRODUCT
  _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
  if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
    SwitchRange* r;
    int nsing = 0;
    for( r = lo; r <= hi; r++ ) {
      if( r->is_singleton() )  nsing++;
    }
    tty->print(">>> ");
    _method->print_short_name();
    tty->print_cr(" switch decision tree");
    tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
                  (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
    if (_max_switch_depth > _est_switch_depth) {
      tty->print_cr("******** BAD SWITCH DEPTH ********");
    }
    tty->print("   ");
    for( r = lo; r <= hi; r++ ) {
      r->print();
    }
    tty->cr();
  }
#endif
}

void Parse::modf() {
  Node *f2 = pop();
  Node *f1 = pop();
  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
                              CAST_FROM_FN_PTR(address, SharedRuntime::frem),
                              "frem", NULL, //no memory effects
                              f1, f2);
  Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));

  push(res);
}

void Parse::modd() {
  Node *d2 = pop_pair();
  Node *d1 = pop_pair();
  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
                              CAST_FROM_FN_PTR(address, SharedRuntime::drem),
                              "drem", NULL, //no memory effects
                              d1, top(), d2, top());
  Node* res_d   = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));

#ifdef ASSERT
  Node* res_top = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 1));
  assert(res_top == top(), "second value must be top");
#endif

  push_pair(res_d);
}

void Parse::l2f() {
  Node* f2 = pop();
  Node* f1 = pop();
  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
                              CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
                              "l2f", NULL, //no memory effects
                              f1, f2);
  Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));

  push(res);
}

void Parse::do_irem() {
  // Must keep both values on the expression-stack during null-check
  zero_check_int(peek());
  // Compile-time detect of null-exception?
  if (stopped())  return;

  Node* b = pop();
  Node* a = pop();

  const Type *t = _gvn.type(b);
  if (t != Type::TOP) {
    const TypeInt *ti = t->is_int();
    if (ti->is_con()) {
      int divisor = ti->get_con();
      // check for positive power of 2
      if (divisor > 0 &&
          (divisor & ~(divisor-1)) == divisor) {
        // yes !
        Node *mask = _gvn.intcon((divisor - 1));
        // Sigh, must handle negative dividends
        Node *zero = _gvn.intcon(0);
        IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
        Node *iff = _gvn.transform( new (C) IfFalseNode(ifff) );
        Node *ift = _gvn.transform( new (C) IfTrueNode (ifff) );
        Node *reg = jump_if_join(ift, iff);
        Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
        // Negative path; negate/and/negate
        Node *neg = _gvn.transform( new (C) SubINode(zero, a) );
        Node *andn= _gvn.transform( new (C) AndINode(neg, mask) );
        Node *negn= _gvn.transform( new (C) SubINode(zero, andn) );
        phi->init_req(1, negn);
        // Fast positive case
        Node *andx = _gvn.transform( new (C) AndINode(a, mask) );
        phi->init_req(2, andx);
        // Push the merge
        push( _gvn.transform(phi) );
        return;
      }
    }
  }
  // Default case
  push( _gvn.transform( new (C) ModINode(control(),a,b) ) );
}

// Handle jsr and jsr_w bytecode
void Parse::do_jsr() {
  assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");

  // Store information about current state, tagged with new _jsr_bci
  int return_bci = iter().next_bci();
  int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();

  // Update method data
  profile_taken_branch(jsr_bci);

  // The way we do things now, there is only one successor block
  // for the jsr, because the target code is cloned by ciTypeFlow.
  Block* target = successor_for_bci(jsr_bci);

  // What got pushed?
  const Type* ret_addr = target->peek();
  assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");

  // Effect on jsr on stack
  push(_gvn.makecon(ret_addr));

  // Flow to the jsr.
  merge(jsr_bci);
}

// Handle ret bytecode
void Parse::do_ret() {
  // Find to whom we return.
  assert(block()->num_successors() == 1, "a ret can only go one place now");
  Block* target = block()->successor_at(0);
  assert(!target->is_ready(), "our arrival must be expected");
  profile_ret(target->flow()->start());
  int pnum = target->next_path_num();
  merge_common(target, pnum);
}

static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
  if (btest != BoolTest::eq && btest != BoolTest::ne) {
    // Only ::eq and ::ne are supported for profile injection.
    return false;
  }
  if (test->is_Cmp() &&
      test->in(1)->Opcode() == Op_ProfileBoolean) {
    ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
    int false_cnt = profile->false_count();
    int  true_cnt = profile->true_count();

    // Counts matching depends on the actual test operation (::eq or ::ne).
    // No need to scale the counts because profile injection was designed
    // to feed exact counts into VM.
    taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
    not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;

    profile->consume();
    return true;
  }
  return false;
}
//--------------------------dynamic_branch_prediction--------------------------
// Try to gather dynamic branch prediction behavior.  Return a probability
// of the branch being taken and set the "cnt" field.  Returns a -1.0
// if we need to use static prediction for some reason.
float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
  ResourceMark rm;

  cnt  = COUNT_UNKNOWN;

  int     taken = 0;
  int not_taken = 0;

  bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);

  if (use_mdo) {
    // Use MethodData information if it is available
    // FIXME: free the ProfileData structure
    ciMethodData* methodData = method()->method_data();
    if (!methodData->is_mature())  return PROB_UNKNOWN;
    ciProfileData* data = methodData->bci_to_data(bci());
    if (data == NULL) {
      return PROB_UNKNOWN;
    }
    if (!data->is_JumpData())  return PROB_UNKNOWN;

    // get taken and not taken values
    taken = data->as_JumpData()->taken();
    not_taken = 0;
    if (data->is_BranchData()) {
      not_taken = data->as_BranchData()->not_taken();
    }

    // scale the counts to be commensurate with invocation counts:
    taken = method()->scale_count(taken);
    not_taken = method()->scale_count(not_taken);
  }

  // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
  // We also check that individual counters are positive first, otherwise the sum can become positive.
  if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
    if (C->log() != NULL) {
      C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
    }
    return PROB_UNKNOWN;
  }

  // Compute frequency that we arrive here
  float sum = taken + not_taken;
  // Adjust, if this block is a cloned private block but the
  // Jump counts are shared.  Taken the private counts for
  // just this path instead of the shared counts.
  if( block()->count() > 0 )
    sum = block()->count();
  cnt = sum / FreqCountInvocations;

  // Pin probability to sane limits
  float prob;
  if( !taken )
    prob = (0+PROB_MIN) / 2;
  else if( !not_taken )
    prob = (1+PROB_MAX) / 2;
  else {                         // Compute probability of true path
    prob = (float)taken / (float)(taken + not_taken);
    if (prob > PROB_MAX)  prob = PROB_MAX;
    if (prob < PROB_MIN)   prob = PROB_MIN;
  }

  assert((cnt > 0.0f) && (prob > 0.0f),
         "Bad frequency assignment in if");

  if (C->log() != NULL) {
    const char* prob_str = NULL;
    if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
    if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
    char prob_str_buf[30];
    if (prob_str == NULL) {
      sprintf(prob_str_buf, "%g", prob);
      prob_str = prob_str_buf;
    }
    C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
                   iter().get_dest(), taken, not_taken, cnt, prob_str);
  }
  return prob;
}

//-----------------------------branch_prediction-------------------------------
float Parse::branch_prediction(float& cnt,
                               BoolTest::mask btest,
                               int target_bci,
                               Node* test) {
  float prob = dynamic_branch_prediction(cnt, btest, test);
  // If prob is unknown, switch to static prediction
  if (prob != PROB_UNKNOWN)  return prob;

  prob = PROB_FAIR;                   // Set default value
  if (btest == BoolTest::eq)          // Exactly equal test?
    prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
  else if (btest == BoolTest::ne)
    prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent

  // If this is a conditional test guarding a backwards branch,
  // assume its a loop-back edge.  Make it a likely taken branch.
  if (target_bci < bci()) {
    if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
      // Since it's an OSR, we probably have profile data, but since
      // branch_prediction returned PROB_UNKNOWN, the counts are too small.
      // Let's make a special check here for completely zero counts.
      ciMethodData* methodData = method()->method_data();
      if (!methodData->is_empty()) {
        ciProfileData* data = methodData->bci_to_data(bci());
        // Only stop for truly zero counts, which mean an unknown part
        // of the OSR-ed method, and we want to deopt to gather more stats.
        // If you have ANY counts, then this loop is simply 'cold' relative
        // to the OSR loop.
        if (data == NULL ||
            (data->as_BranchData()->taken() +  data->as_BranchData()->not_taken() == 0)) {
          // This is the only way to return PROB_UNKNOWN:
          return PROB_UNKNOWN;
        }
      }
    }
    prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
  }

  assert(prob != PROB_UNKNOWN, "must have some guess at this point");
  return prob;
}

// The magic constants are chosen so as to match the output of
// branch_prediction() when the profile reports a zero taken count.
// It is important to distinguish zero counts unambiguously, because
// some branches (e.g., _213_javac.Assembler.eliminate) validly produce
// very small but nonzero probabilities, which if confused with zero
// counts would keep the program recompiling indefinitely.
bool Parse::seems_never_taken(float prob) const {
  return prob < PROB_MIN;
}

// True if the comparison seems to be the kind that will not change its
// statistics from true to false.  See comments in adjust_map_after_if.
// This question is only asked along paths which are already
// classifed as untaken (by seems_never_taken), so really,
// if a path is never taken, its controlling comparison is
// already acting in a stable fashion.  If the comparison
// seems stable, we will put an expensive uncommon trap
// on the untaken path.
bool Parse::seems_stable_comparison() const {
  if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
    return false;
  }
  return true;
}

//-------------------------------repush_if_args--------------------------------
// Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
inline int Parse::repush_if_args() {
#ifndef PRODUCT
  if (PrintOpto && WizardMode) {
    tty->print("defending against excessive implicit null exceptions on %s @%d in ",
               Bytecodes::name(iter().cur_bc()), iter().cur_bci());
    method()->print_name(); tty->cr();
  }
#endif
  int bc_depth = - Bytecodes::depth(iter().cur_bc());
  assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
  DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
  assert(argument(0) != NULL, "must exist");
  assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
  inc_sp(bc_depth);
  return bc_depth;
}

//----------------------------------do_ifnull----------------------------------
void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
  int target_bci = iter().get_dest();

  Block* branch_block = successor_for_bci(target_bci);
  Block* next_block   = successor_for_bci(iter().next_bci());

  float cnt;
  float prob = branch_prediction(cnt, btest, target_bci, c);
  if (prob == PROB_UNKNOWN) {
    // (An earlier version of do_ifnull omitted this trap for OSR methods.)
#ifndef PRODUCT
    if (PrintOpto && Verbose)
      tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
#endif
    repush_if_args(); // to gather stats on loop
    // We need to mark this branch as taken so that if we recompile we will
    // see that it is possible. In the tiered system the interpreter doesn't
    // do profiling and by the time we get to the lower tier from the interpreter
    // the path may be cold again. Make sure it doesn't look untaken
    profile_taken_branch(target_bci, !ProfileInterpreter);
    uncommon_trap(Deoptimization::Reason_unreached,
                  Deoptimization::Action_reinterpret,
                  NULL, "cold");
    if (C->eliminate_boxing()) {
      // Mark the successor blocks as parsed
      branch_block->next_path_num();
      next_block->next_path_num();
    }
    return;
  }

  explicit_null_checks_inserted++;

  // Generate real control flow
  Node   *tst = _gvn.transform( new (C) BoolNode( c, btest ) );

  // Sanity check the probability value
  assert(prob > 0.0f,"Bad probability in Parser");
 // Need xform to put node in hash table
  IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
  assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
  // True branch
  { PreserveJVMState pjvms(this);
    Node* iftrue  = _gvn.transform( new (C) IfTrueNode (iff) );
    set_control(iftrue);

    if (stopped()) {            // Path is dead?
      explicit_null_checks_elided++;
      if (C->eliminate_boxing()) {
        // Mark the successor block as parsed
        branch_block->next_path_num();
      }
    } else {                    // Path is live.
      // Update method data
      profile_taken_branch(target_bci);
      adjust_map_after_if(btest, c, prob, branch_block, next_block);
      if (!stopped()) {
        merge(target_bci);
      }
    }
  }

  // False branch
  Node* iffalse = _gvn.transform( new (C) IfFalseNode(iff) );
  set_control(iffalse);

  if (stopped()) {              // Path is dead?
    explicit_null_checks_elided++;
    if (C->eliminate_boxing()) {
      // Mark the successor block as parsed
      next_block->next_path_num();
    }
  } else  {                     // Path is live.
    // Update method data
    profile_not_taken_branch();
    adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
                        next_block, branch_block);
  }
}

//------------------------------------do_if------------------------------------
void Parse::do_if(BoolTest::mask btest, Node* c) {
  int target_bci = iter().get_dest();

  Block* branch_block = successor_for_bci(target_bci);
  Block* next_block   = successor_for_bci(iter().next_bci());

  float cnt;
  float prob = branch_prediction(cnt, btest, target_bci, c);
  float untaken_prob = 1.0 - prob;

  if (prob == PROB_UNKNOWN) {
#ifndef PRODUCT
    if (PrintOpto && Verbose)
      tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
#endif
    repush_if_args(); // to gather stats on loop
    // We need to mark this branch as taken so that if we recompile we will
    // see that it is possible. In the tiered system the interpreter doesn't
    // do profiling and by the time we get to the lower tier from the interpreter
    // the path may be cold again. Make sure it doesn't look untaken
    profile_taken_branch(target_bci, !ProfileInterpreter);
    uncommon_trap(Deoptimization::Reason_unreached,
                  Deoptimization::Action_reinterpret,
                  NULL, "cold");
    if (C->eliminate_boxing()) {
      // Mark the successor blocks as parsed
      branch_block->next_path_num();
      next_block->next_path_num();
    }
    return;
  }

  // Sanity check the probability value
  assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");

  bool taken_if_true = true;
  // Convert BoolTest to canonical form:
  if (!BoolTest(btest).is_canonical()) {
    btest         = BoolTest(btest).negate();
    taken_if_true = false;
    // prob is NOT updated here; it remains the probability of the taken
    // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
  }
  assert(btest != BoolTest::eq, "!= is the only canonical exact test");

  Node* tst0 = new (C) BoolNode(c, btest);
  Node* tst = _gvn.transform(tst0);
  BoolTest::mask taken_btest   = BoolTest::illegal;
  BoolTest::mask untaken_btest = BoolTest::illegal;

  if (tst->is_Bool()) {
    // Refresh c from the transformed bool node, since it may be
    // simpler than the original c.  Also re-canonicalize btest.
    // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
    // That can arise from statements like: if (x instanceof C) ...
    if (tst != tst0) {
      // Canonicalize one more time since transform can change it.
      btest = tst->as_Bool()->_test._test;
      if (!BoolTest(btest).is_canonical()) {
        // Reverse edges one more time...
        tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
        btest = tst->as_Bool()->_test._test;
        assert(BoolTest(btest).is_canonical(), "sanity");
        taken_if_true = !taken_if_true;
      }
      c = tst->in(1);
    }
    BoolTest::mask neg_btest = BoolTest(btest).negate();
    taken_btest   = taken_if_true ?     btest : neg_btest;
    untaken_btest = taken_if_true ? neg_btest :     btest;
  }

  // Generate real control flow
  float true_prob = (taken_if_true ? prob : untaken_prob);
  IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
  assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
  Node* taken_branch   = new (C) IfTrueNode(iff);
  Node* untaken_branch = new (C) IfFalseNode(iff);
  if (!taken_if_true) {  // Finish conversion to canonical form
    Node* tmp      = taken_branch;
    taken_branch   = untaken_branch;
    untaken_branch = tmp;
  }

  // Branch is taken:
  { PreserveJVMState pjvms(this);
    taken_branch = _gvn.transform(taken_branch);
    set_control(taken_branch);

    if (stopped()) {
      if (C->eliminate_boxing()) {
        // Mark the successor block as parsed
        branch_block->next_path_num();
      }
    } else {
      // Update method data
      profile_taken_branch(target_bci);
      adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
      if (!stopped()) {
        merge(target_bci);
      }
    }
  }

  untaken_branch = _gvn.transform(untaken_branch);
  set_control(untaken_branch);

  // Branch not taken.
  if (stopped()) {
    if (C->eliminate_boxing()) {
      // Mark the successor block as parsed
      next_block->next_path_num();
    }
  } else {
    // Update method data
    profile_not_taken_branch();
    adjust_map_after_if(untaken_btest, c, untaken_prob,
                        next_block, branch_block);
  }
}

bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
  // Don't want to speculate on uncommon traps when running with -Xcomp
  if (!UseInterpreter) {
    return false;
  }
  return (seems_never_taken(prob) && seems_stable_comparison());
}

//----------------------------adjust_map_after_if------------------------------
// Adjust the JVM state to reflect the result of taking this path.
// Basically, it means inspecting the CmpNode controlling this
// branch, seeing how it constrains a tested value, and then
// deciding if it's worth our while to encode this constraint
// as graph nodes in the current abstract interpretation map.
void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
                                Block* path, Block* other_path) {
  if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
    return;                             // nothing to do

  bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));

  if (path_is_suitable_for_uncommon_trap(prob)) {
    repush_if_args();
    uncommon_trap(Deoptimization::Reason_unstable_if,
                  Deoptimization::Action_reinterpret,
                  NULL,
                  (is_fallthrough ? "taken always" : "taken never"));
    return;
  }

  Node* val = c->in(1);
  Node* con = c->in(2);
  const Type* tcon = _gvn.type(con);
  const Type* tval = _gvn.type(val);
  bool have_con = tcon->singleton();
  if (tval->singleton()) {
    if (!have_con) {
      // Swap, so constant is in con.
      con  = val;
      tcon = tval;
      val  = c->in(2);
      tval = _gvn.type(val);
      btest = BoolTest(btest).commute();
      have_con = true;
    } else {
      // Do we have two constants?  Then leave well enough alone.
      have_con = false;
    }
  }
  if (!have_con)                        // remaining adjustments need a con
    return;

  sharpen_type_after_if(btest, con, tcon, val, tval);
}


static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
  Node* ldk;
  if (n->is_DecodeNKlass()) {
    if (n->in(1)->Opcode() != Op_LoadNKlass) {
      return NULL;
    } else {
      ldk = n->in(1);
    }
  } else if (n->Opcode() != Op_LoadKlass) {
    return NULL;
  } else {
    ldk = n;
  }
  assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");

  Node* adr = ldk->in(MemNode::Address);
  intptr_t off = 0;
  Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
  if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
    return NULL;
  const TypePtr* tp = gvn->type(obj)->is_ptr();
  if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
    return NULL;

  return obj;
}

void Parse::sharpen_type_after_if(BoolTest::mask btest,
                                  Node* con, const Type* tcon,
                                  Node* val, const Type* tval) {
  // Look for opportunities to sharpen the type of a node
  // whose klass is compared with a constant klass.
  if (btest == BoolTest::eq && tcon->isa_klassptr()) {
    Node* obj = extract_obj_from_klass_load(&_gvn, val);
    const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
    if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
       // Found:
       //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
       // or the narrowOop equivalent.
       const Type* obj_type = _gvn.type(obj);
       const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
       if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
           tboth->higher_equal(obj_type)) {
          // obj has to be of the exact type Foo if the CmpP succeeds.
          int obj_in_map = map()->find_edge(obj);
          JVMState* jvms = this->jvms();
          if (obj_in_map >= 0 &&
              (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
            TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
            const Type* tcc = ccast->as_Type()->type();
            assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
            // Delay transform() call to allow recovery of pre-cast value
            // at the control merge.
            _gvn.set_type_bottom(ccast);
            record_for_igvn(ccast);
            // Here's the payoff.
            replace_in_map(obj, ccast);
          }
       }
    }
  }

  int val_in_map = map()->find_edge(val);
  if (val_in_map < 0)  return;          // replace_in_map would be useless
  {
    JVMState* jvms = this->jvms();
    if (!(jvms->is_loc(val_in_map) ||
          jvms->is_stk(val_in_map)))
      return;                           // again, it would be useless
  }

  // Check for a comparison to a constant, and "know" that the compared
  // value is constrained on this path.
  assert(tcon->singleton(), "");
  ConstraintCastNode* ccast = NULL;
  Node* cast = NULL;

  switch (btest) {
  case BoolTest::eq:                    // Constant test?
    {
      const Type* tboth = tcon->join_speculative(tval);
      if (tboth == tval)  break;        // Nothing to gain.
      if (tcon->isa_int()) {
        ccast = new (C) CastIINode(val, tboth);
      } else if (tcon == TypePtr::NULL_PTR) {
        // Cast to null, but keep the pointer identity temporarily live.
        ccast = new (C) CastPPNode(val, tboth);
      } else {
        const TypeF* tf = tcon->isa_float_constant();
        const TypeD* td = tcon->isa_double_constant();
        // Exclude tests vs float/double 0 as these could be
        // either +0 or -0.  Just because you are equal to +0
        // doesn't mean you ARE +0!
        // Note, following code also replaces Long and Oop values.
        if ((!tf || tf->_f != 0.0) &&
            (!td || td->_d != 0.0))
          cast = con;                   // Replace non-constant val by con.
      }
    }
    break;

  case BoolTest::ne:
    if (tcon == TypePtr::NULL_PTR) {
      cast = cast_not_null(val, false);
    }
    break;

  default:
    // (At this point we could record int range types with CastII.)
    break;
  }

  if (ccast != NULL) {
    const Type* tcc = ccast->as_Type()->type();
    assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
    // Delay transform() call to allow recovery of pre-cast value
    // at the control merge.
    ccast->set_req(0, control());
    _gvn.set_type_bottom(ccast);
    record_for_igvn(ccast);
    cast = ccast;
  }

  if (cast != NULL) {                   // Here's the payoff.
    replace_in_map(val, cast);
  }
}

/**
 * Use speculative type to optimize CmpP node: if comparison is
 * against the low level class, cast the object to the speculative
 * type if any. CmpP should then go away.
 *
 * @param c  expected CmpP node
 * @return   result of CmpP on object casted to speculative type
 *
 */
Node* Parse::optimize_cmp_with_klass(Node* c) {
  // If this is transformed by the _gvn to a comparison with the low
  // level klass then we may be able to use speculation
  if (c->Opcode() == Op_CmpP &&
      (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
      c->in(2)->is_Con()) {
    Node* load_klass = NULL;
    Node* decode = NULL;
    if (c->in(1)->Opcode() == Op_DecodeNKlass) {
      decode = c->in(1);
      load_klass = c->in(1)->in(1);
    } else {
      load_klass = c->in(1);
    }
    if (load_klass->in(2)->is_AddP()) {
      Node* addp = load_klass->in(2);
      Node* obj = addp->in(AddPNode::Address);
      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
      if (obj_type->speculative_type() != NULL) {
        ciKlass* k = obj_type->speculative_type();
        inc_sp(2);
        obj = maybe_cast_profiled_obj(obj, k);
        dec_sp(2);
        // Make the CmpP use the casted obj
        addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
        load_klass = load_klass->clone();
        load_klass->set_req(2, addp);
        load_klass = _gvn.transform(load_klass);
        if (decode != NULL) {
          decode = decode->clone();
          decode->set_req(1, load_klass);
          load_klass = _gvn.transform(decode);
        }
        c = c->clone();
        c->set_req(1, load_klass);
        c = _gvn.transform(c);
      }
    }
  }
  return c;
}

//------------------------------do_one_bytecode--------------------------------
// Parse this bytecode, and alter the Parsers JVM->Node mapping
void Parse::do_one_bytecode() {
  Node *a, *b, *c, *d;          // Handy temps
  BoolTest::mask btest;
  int i;

  assert(!has_exceptions(), "bytecode entry state must be clear of throws");

  if (C->check_node_count(NodeLimitFudgeFactor * 5,
                          "out of nodes parsing method")) {
    return;
  }

#ifdef ASSERT
  // for setting breakpoints
  if (TraceOptoParse) {
    tty->print(" @");
    dump_bci(bci());
    tty->cr();
  }
#endif

  switch (bc()) {
  case Bytecodes::_nop:
    // do nothing
    break;
  case Bytecodes::_lconst_0:
    push_pair(longcon(0));
    break;

  case Bytecodes::_lconst_1:
    push_pair(longcon(1));
    break;

  case Bytecodes::_fconst_0:
    push(zerocon(T_FLOAT));
    break;

  case Bytecodes::_fconst_1:
    push(makecon(TypeF::ONE));
    break;

  case Bytecodes::_fconst_2:
    push(makecon(TypeF::make(2.0f)));
    break;

  case Bytecodes::_dconst_0:
    push_pair(zerocon(T_DOUBLE));
    break;

  case Bytecodes::_dconst_1:
    push_pair(makecon(TypeD::ONE));
    break;

  case Bytecodes::_iconst_m1:push(intcon(-1)); break;
  case Bytecodes::_iconst_0: push(intcon( 0)); break;
  case Bytecodes::_iconst_1: push(intcon( 1)); break;
  case Bytecodes::_iconst_2: push(intcon( 2)); break;
  case Bytecodes::_iconst_3: push(intcon( 3)); break;
  case Bytecodes::_iconst_4: push(intcon( 4)); break;
  case Bytecodes::_iconst_5: push(intcon( 5)); break;
  case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
  case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
  case Bytecodes::_aconst_null: push(null());  break;
  case Bytecodes::_ldc:
  case Bytecodes::_ldc_w:
  case Bytecodes::_ldc2_w:
    // If the constant is unresolved, run this BC once in the interpreter.
    {
      ciConstant constant = iter().get_constant();
      if (constant.basic_type() == T_OBJECT &&
          !constant.as_object()->is_loaded()) {
        int index = iter().get_constant_pool_index();
        constantTag tag = iter().get_constant_pool_tag(index);
        uncommon_trap(Deoptimization::make_trap_request
                      (Deoptimization::Reason_unloaded,
                       Deoptimization::Action_reinterpret,
                       index),
                      NULL, tag.internal_name());
        break;
      }
      assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(),
             "must be java_mirror of klass");
      bool pushed = push_constant(constant, true);
      guarantee(pushed, "must be possible to push this constant");
    }

    break;

  case Bytecodes::_aload_0:
    push( local(0) );
    break;
  case Bytecodes::_aload_1:
    push( local(1) );
    break;
  case Bytecodes::_aload_2:
    push( local(2) );
    break;
  case Bytecodes::_aload_3:
    push( local(3) );
    break;
  case Bytecodes::_aload:
    push( local(iter().get_index()) );
    break;

  case Bytecodes::_fload_0:
  case Bytecodes::_iload_0:
    push( local(0) );
    break;
  case Bytecodes::_fload_1:
  case Bytecodes::_iload_1:
    push( local(1) );
    break;
  case Bytecodes::_fload_2:
  case Bytecodes::_iload_2:
    push( local(2) );
    break;
  case Bytecodes::_fload_3:
  case Bytecodes::_iload_3:
    push( local(3) );
    break;
  case Bytecodes::_fload:
  case Bytecodes::_iload:
    push( local(iter().get_index()) );
    break;
  case Bytecodes::_lload_0:
    push_pair_local( 0 );
    break;
  case Bytecodes::_lload_1:
    push_pair_local( 1 );
    break;
  case Bytecodes::_lload_2:
    push_pair_local( 2 );
    break;
  case Bytecodes::_lload_3:
    push_pair_local( 3 );
    break;
  case Bytecodes::_lload:
    push_pair_local( iter().get_index() );
    break;

  case Bytecodes::_dload_0:
    push_pair_local(0);
    break;
  case Bytecodes::_dload_1:
    push_pair_local(1);
    break;
  case Bytecodes::_dload_2:
    push_pair_local(2);
    break;
  case Bytecodes::_dload_3:
    push_pair_local(3);
    break;
  case Bytecodes::_dload:
    push_pair_local(iter().get_index());
    break;
  case Bytecodes::_fstore_0:
  case Bytecodes::_istore_0:
  case Bytecodes::_astore_0:
    set_local( 0, pop() );
    break;
  case Bytecodes::_fstore_1:
  case Bytecodes::_istore_1:
  case Bytecodes::_astore_1:
    set_local( 1, pop() );
    break;
  case Bytecodes::_fstore_2:
  case Bytecodes::_istore_2:
  case Bytecodes::_astore_2:
    set_local( 2, pop() );
    break;
  case Bytecodes::_fstore_3:
  case Bytecodes::_istore_3:
  case Bytecodes::_astore_3:
    set_local( 3, pop() );
    break;
  case Bytecodes::_fstore:
  case Bytecodes::_istore:
  case Bytecodes::_astore:
    set_local( iter().get_index(), pop() );
    break;
  // long stores
  case Bytecodes::_lstore_0:
    set_pair_local( 0, pop_pair() );
    break;
  case Bytecodes::_lstore_1:
    set_pair_local( 1, pop_pair() );
    break;
  case Bytecodes::_lstore_2:
    set_pair_local( 2, pop_pair() );
    break;
  case Bytecodes::_lstore_3:
    set_pair_local( 3, pop_pair() );
    break;
  case Bytecodes::_lstore:
    set_pair_local( iter().get_index(), pop_pair() );
    break;

  // double stores
  case Bytecodes::_dstore_0:
    set_pair_local( 0, dstore_rounding(pop_pair()) );
    break;
  case Bytecodes::_dstore_1:
    set_pair_local( 1, dstore_rounding(pop_pair()) );
    break;
  case Bytecodes::_dstore_2:
    set_pair_local( 2, dstore_rounding(pop_pair()) );
    break;
  case Bytecodes::_dstore_3:
    set_pair_local( 3, dstore_rounding(pop_pair()) );
    break;
  case Bytecodes::_dstore:
    set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
    break;

  case Bytecodes::_pop:  dec_sp(1);   break;
  case Bytecodes::_pop2: dec_sp(2);   break;
  case Bytecodes::_swap:
    a = pop();
    b = pop();
    push(a);
    push(b);
    break;
  case Bytecodes::_dup:
    a = pop();
    push(a);
    push(a);
    break;
  case Bytecodes::_dup_x1:
    a = pop();
    b = pop();
    push( a );
    push( b );
    push( a );
    break;
  case Bytecodes::_dup_x2:
    a = pop();
    b = pop();
    c = pop();
    push( a );
    push( c );
    push( b );
    push( a );
    break;
  case Bytecodes::_dup2:
    a = pop();
    b = pop();
    push( b );
    push( a );
    push( b );
    push( a );
    break;

  case Bytecodes::_dup2_x1:
    // before: .. c, b, a
    // after:  .. b, a, c, b, a
    // not tested
    a = pop();
    b = pop();
    c = pop();
    push( b );
    push( a );
    push( c );
    push( b );
    push( a );
    break;
  case Bytecodes::_dup2_x2:
    // before: .. d, c, b, a
    // after:  .. b, a, d, c, b, a
    // not tested
    a = pop();
    b = pop();
    c = pop();
    d = pop();
    push( b );
    push( a );
    push( d );
    push( c );
    push( b );
    push( a );
    break;

  case Bytecodes::_arraylength: {
    // Must do null-check with value on expression stack
    Node *ary = null_check(peek(), T_ARRAY);
    // Compile-time detect of null-exception?
    if (stopped())  return;
    a = pop();
    push(load_array_length(a));
    break;
  }

  case Bytecodes::_baload: array_load(T_BYTE);   break;
  case Bytecodes::_caload: array_load(T_CHAR);   break;
  case Bytecodes::_iaload: array_load(T_INT);    break;
  case Bytecodes::_saload: array_load(T_SHORT);  break;
  case Bytecodes::_faload: array_load(T_FLOAT);  break;
  case Bytecodes::_aaload: array_load(T_OBJECT); break;
  case Bytecodes::_laload: {
    a = array_addressing(T_LONG, 0);
    if (stopped())  return;     // guaranteed null or range check
    dec_sp(2);                  // Pop array and index
    push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered));
    break;
  }
  case Bytecodes::_daload: {
    a = array_addressing(T_DOUBLE, 0);
    if (stopped())  return;     // guaranteed null or range check
    dec_sp(2);                  // Pop array and index
    push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered));
    break;
  }
  case Bytecodes::_bastore: array_store(T_BYTE);  break;
  case Bytecodes::_castore: array_store(T_CHAR);  break;
  case Bytecodes::_iastore: array_store(T_INT);   break;
  case Bytecodes::_sastore: array_store(T_SHORT); break;
  case Bytecodes::_fastore: array_store(T_FLOAT); break;
  case Bytecodes::_aastore: {
    d = array_addressing(T_OBJECT, 1);
    if (stopped())  return;     // guaranteed null or range check
    array_store_check();
    c = pop();                  // Oop to store
    b = pop();                  // index (already used)
    a = pop();                  // the array itself
    const TypeOopPtr* elemtype  = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
    const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
    Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, MemNode::release);
    break;
  }
  case Bytecodes::_lastore: {
    a = array_addressing(T_LONG, 2);
    if (stopped())  return;     // guaranteed null or range check
    c = pop_pair();
    dec_sp(2);                  // Pop array and index
    store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered);
    break;
  }
  case Bytecodes::_dastore: {
    a = array_addressing(T_DOUBLE, 2);
    if (stopped())  return;     // guaranteed null or range check
    c = pop_pair();
    dec_sp(2);                  // Pop array and index
    c = dstore_rounding(c);
    store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered);
    break;
  }
  case Bytecodes::_getfield:
    do_getfield();
    break;

  case Bytecodes::_getstatic:
    do_getstatic();
    break;

  case Bytecodes::_putfield:
    do_putfield();
    break;

  case Bytecodes::_putstatic:
    do_putstatic();
    break;

  case Bytecodes::_irem:
    do_irem();
    break;
  case Bytecodes::_idiv:
    // Must keep both values on the expression-stack during null-check
    zero_check_int(peek());
    // Compile-time detect of null-exception?
    if (stopped())  return;
    b = pop();
    a = pop();
    push( _gvn.transform( new (C) DivINode(control(),a,b) ) );
    break;
  case Bytecodes::_imul:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) MulINode(a,b) ) );
    break;
  case Bytecodes::_iadd:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) AddINode(a,b) ) );
    break;
  case Bytecodes::_ineg:
    a = pop();
    push( _gvn.transform( new (C) SubINode(_gvn.intcon(0),a)) );
    break;
  case Bytecodes::_isub:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) SubINode(a,b) ) );
    break;
  case Bytecodes::_iand:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) AndINode(a,b) ) );
    break;
  case Bytecodes::_ior:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) OrINode(a,b) ) );
    break;
  case Bytecodes::_ixor:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) XorINode(a,b) ) );
    break;
  case Bytecodes::_ishl:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) LShiftINode(a,b) ) );
    break;
  case Bytecodes::_ishr:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) RShiftINode(a,b) ) );
    break;
  case Bytecodes::_iushr:
    b = pop(); a = pop();
    push( _gvn.transform( new (C) URShiftINode(a,b) ) );
    break;

  case Bytecodes::_fneg:
    a = pop();
    b = _gvn.transform(new (C) NegFNode (a));
    push(b);
    break;

  case Bytecodes::_fsub:
    b = pop();
    a = pop();
    c = _gvn.transform( new (C) SubFNode(a,b) );
    d = precision_rounding(c);
    push( d );
    break;

  case Bytecodes::_fadd:
    b = pop();
    a = pop();
    c = _gvn.transform( new (C) AddFNode(a,b) );
    d = precision_rounding(c);
    push( d );
    break;

  case Bytecodes::_fmul:
    b = pop();
    a = pop();
    c = _gvn.transform( new (C) MulFNode(a,b) );
    d = precision_rounding(c);
    push( d );
    break;

  case Bytecodes::_fdiv:
    b = pop();
    a = pop();
    c = _gvn.transform( new (C) DivFNode(0,a,b) );
    d = precision_rounding(c);
    push( d );
    break;

  case Bytecodes::_frem:
    if (Matcher::has_match_rule(Op_ModF)) {
      // Generate a ModF node.
      b = pop();
      a = pop();
      c = _gvn.transform( new (C) ModFNode(0,a,b) );
      d = precision_rounding(c);
      push( d );
    }
    else {
      // Generate a call.
      modf();
    }
    break;

  case Bytecodes::_fcmpl:
    b = pop();
    a = pop();
    c = _gvn.transform( new (C) CmpF3Node( a, b));
    push(c);
    break;
  case Bytecodes::_fcmpg:
    b = pop();
    a = pop();

    // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
    // which negates the result sign except for unordered.  Flip the unordered
    // as well by using CmpF3 which implements unordered-lesser instead of
    // unordered-greater semantics.  Finally, commute the result bits.  Result
    // is same as using a CmpF3Greater except we did it with CmpF3 alone.
    c = _gvn.transform( new (C) CmpF3Node( b, a));
    c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) );
    push(c);
    break;

  case Bytecodes::_f2i:
    a = pop();
    push(_gvn.transform(new (C) ConvF2INode(a)));
    break;

  case Bytecodes::_d2i:
    a = pop_pair();
    b = _gvn.transform(new (C) ConvD2INode(a));
    push( b );
    break;

  case Bytecodes::_f2d:
    a = pop();
    b = _gvn.transform( new (C) ConvF2DNode(a));
    push_pair( b );
    break;

  case Bytecodes::_d2f:
    a = pop_pair();
    b = _gvn.transform( new (C) ConvD2FNode(a));
    // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
    //b = _gvn.transform(new (C) RoundFloatNode(0, b) );
    push( b );
    break;

  case Bytecodes::_l2f:
    if (Matcher::convL2FSupported()) {
      a = pop_pair();
      b = _gvn.transform( new (C) ConvL2FNode(a));
      // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
      // Rather than storing the result into an FP register then pushing
      // out to memory to round, the machine instruction that implements
      // ConvL2D is responsible for rounding.
      // c = precision_rounding(b);
      c = _gvn.transform(b);
      push(c);
    } else {
      l2f();
    }
    break;

  case Bytecodes::_l2d:
    a = pop_pair();
    b = _gvn.transform( new (C) ConvL2DNode(a));
    // For i486.ad, rounding is always necessary (see _l2f above).
    // c = dprecision_rounding(b);
    c = _gvn.transform(b);
    push_pair(c);
    break;

  case Bytecodes::_f2l:
    a = pop();
    b = _gvn.transform( new (C) ConvF2LNode(a));
    push_pair(b);
    break;

  case Bytecodes::_d2l:
    a = pop_pair();
    b = _gvn.transform( new (C) ConvD2LNode(a));
    push_pair(b);
    break;

  case Bytecodes::_dsub:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) SubDNode(a,b) );
    d = dprecision_rounding(c);
    push_pair( d );
    break;

  case Bytecodes::_dadd:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) AddDNode(a,b) );
    d = dprecision_rounding(c);
    push_pair( d );
    break;

  case Bytecodes::_dmul:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) MulDNode(a,b) );
    d = dprecision_rounding(c);
    push_pair( d );
    break;

  case Bytecodes::_ddiv:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) DivDNode(0,a,b) );
    d = dprecision_rounding(c);
    push_pair( d );
    break;

  case Bytecodes::_dneg:
    a = pop_pair();
    b = _gvn.transform(new (C) NegDNode (a));
    push_pair(b);
    break;

  case Bytecodes::_drem:
    if (Matcher::has_match_rule(Op_ModD)) {
      // Generate a ModD node.
      b = pop_pair();
      a = pop_pair();
      // a % b

      c = _gvn.transform( new (C) ModDNode(0,a,b) );
      d = dprecision_rounding(c);
      push_pair( d );
    }
    else {
      // Generate a call.
      modd();
    }
    break;

  case Bytecodes::_dcmpl:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) CmpD3Node( a, b));
    push(c);
    break;

  case Bytecodes::_dcmpg:
    b = pop_pair();
    a = pop_pair();
    // Same as dcmpl but need to flip the unordered case.
    // Commute the inputs, which negates the result sign except for unordered.
    // Flip the unordered as well by using CmpD3 which implements
    // unordered-lesser instead of unordered-greater semantics.
    // Finally, negate the result bits.  Result is same as using a
    // CmpD3Greater except we did it with CmpD3 alone.
    c = _gvn.transform( new (C) CmpD3Node( b, a));
    c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) );
    push(c);
    break;


    // Note for longs -> lo word is on TOS, hi word is on TOS - 1
  case Bytecodes::_land:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) AndLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lor:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) OrLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lxor:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) XorLNode(a,b) );
    push_pair(c);
    break;

  case Bytecodes::_lshl:
    b = pop();                  // the shift count
    a = pop_pair();             // value to be shifted
    c = _gvn.transform( new (C) LShiftLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lshr:
    b = pop();                  // the shift count
    a = pop_pair();             // value to be shifted
    c = _gvn.transform( new (C) RShiftLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lushr:
    b = pop();                  // the shift count
    a = pop_pair();             // value to be shifted
    c = _gvn.transform( new (C) URShiftLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lmul:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) MulLNode(a,b) );
    push_pair(c);
    break;

  case Bytecodes::_lrem:
    // Must keep both values on the expression-stack during null-check
    assert(peek(0) == top(), "long word order");
    zero_check_long(peek(1));
    // Compile-time detect of null-exception?
    if (stopped())  return;
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) ModLNode(control(),a,b) );
    push_pair(c);
    break;

  case Bytecodes::_ldiv:
    // Must keep both values on the expression-stack during null-check
    assert(peek(0) == top(), "long word order");
    zero_check_long(peek(1));
    // Compile-time detect of null-exception?
    if (stopped())  return;
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) DivLNode(control(),a,b) );
    push_pair(c);
    break;

  case Bytecodes::_ladd:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) AddLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lsub:
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) SubLNode(a,b) );
    push_pair(c);
    break;
  case Bytecodes::_lcmp:
    // Safepoints are now inserted _before_ branches.  The long-compare
    // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
    // slew of control flow.  These are usually followed by a CmpI vs zero and
    // a branch; this pattern then optimizes to the obvious long-compare and
    // branch.  However, if the branch is backwards there's a Safepoint
    // inserted.  The inserted Safepoint captures the JVM state at the
    // pre-branch point, i.e. it captures the 3-way value.  Thus if a
    // long-compare is used to control a loop the debug info will force
    // computation of the 3-way value, even though the generated code uses a
    // long-compare and branch.  We try to rectify the situation by inserting
    // a SafePoint here and have it dominate and kill the safepoint added at a
    // following backwards branch.  At this point the JVM state merely holds 2
    // longs but not the 3-way value.
    if( UseLoopSafepoints ) {
      switch( iter().next_bc() ) {
      case Bytecodes::_ifgt:
      case Bytecodes::_iflt:
      case Bytecodes::_ifge:
      case Bytecodes::_ifle:
      case Bytecodes::_ifne:
      case Bytecodes::_ifeq:
        // If this is a backwards branch in the bytecodes, add Safepoint
        maybe_add_safepoint(iter().next_get_dest());
      }
    }
    b = pop_pair();
    a = pop_pair();
    c = _gvn.transform( new (C) CmpL3Node( a, b ));
    push(c);
    break;

  case Bytecodes::_lneg:
    a = pop_pair();
    b = _gvn.transform( new (C) SubLNode(longcon(0),a));
    push_pair(b);
    break;
  case Bytecodes::_l2i:
    a = pop_pair();
    push( _gvn.transform( new (C) ConvL2INode(a)));
    break;
  case Bytecodes::_i2l:
    a = pop();
    b = _gvn.transform( new (C) ConvI2LNode(a));
    push_pair(b);
    break;
  case Bytecodes::_i2b:
    // Sign extend
    a = pop();
    a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(24)) );
    a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(24)) );
    push( a );
    break;
  case Bytecodes::_i2s:
    a = pop();
    a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(16)) );
    a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(16)) );
    push( a );
    break;
  case Bytecodes::_i2c:
    a = pop();
    push( _gvn.transform( new (C) AndINode(a,_gvn.intcon(0xFFFF)) ) );
    break;

  case Bytecodes::_i2f:
    a = pop();
    b = _gvn.transform( new (C) ConvI2FNode(a) ) ;
    c = precision_rounding(b);
    push (b);
    break;

  case Bytecodes::_i2d:
    a = pop();
    b = _gvn.transform( new (C) ConvI2DNode(a));
    push_pair(b);
    break;

  case Bytecodes::_iinc:        // Increment local
    i = iter().get_index();     // Get local index
    set_local( i, _gvn.transform( new (C) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
    break;

  // Exit points of synchronized methods must have an unlock node
  case Bytecodes::_return:
    return_current(NULL);
    break;

  case Bytecodes::_ireturn:
  case Bytecodes::_areturn:
  case Bytecodes::_freturn:
    return_current(pop());
    break;
  case Bytecodes::_lreturn:
    return_current(pop_pair());
    break;
  case Bytecodes::_dreturn:
    return_current(pop_pair());
    break;

  case Bytecodes::_athrow:
    // null exception oop throws NULL pointer exception
    null_check(peek());
    if (stopped())  return;
    // Hook the thrown exception directly to subsequent handlers.
    if (BailoutToInterpreterForThrows) {
      // Keep method interpreted from now on.
      uncommon_trap(Deoptimization::Reason_unhandled,
                    Deoptimization::Action_make_not_compilable);
      return;
    }
    if (env()->jvmti_can_post_on_exceptions()) {
      // check if we must post exception events, take uncommon trap if so (with must_throw = false)
      uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
    }
    // Here if either can_post_on_exceptions or should_post_on_exceptions is false
    add_exception_state(make_exception_state(peek()));
    break;

  case Bytecodes::_goto:   // fall through
  case Bytecodes::_goto_w: {
    int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();

    // If this is a backwards branch in the bytecodes, add Safepoint
    maybe_add_safepoint(target_bci);

    // Update method data
    profile_taken_branch(target_bci);

    // Merge the current control into the target basic block
    merge(target_bci);

    // See if we can get some profile data and hand it off to the next block
    Block *target_block = block()->successor_for_bci(target_bci);
    if (target_block->pred_count() != 1)  break;
    ciMethodData* methodData = method()->method_data();
    if (!methodData->is_mature())  break;
    ciProfileData* data = methodData->bci_to_data(bci());
    assert( data->is_JumpData(), "" );
    int taken = ((ciJumpData*)data)->taken();
    taken = method()->scale_count(taken);
    target_block->set_count(taken);
    break;
  }

  case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
  case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
  handle_if_null:
    // If this is a backwards branch in the bytecodes, add Safepoint
    maybe_add_safepoint(iter().get_dest());
    a = null();
    b = pop();
    c = _gvn.transform( new (C) CmpPNode(b, a) );
    do_ifnull(btest, c);
    break;

  case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
  case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
  handle_if_acmp:
    // If this is a backwards branch in the bytecodes, add Safepoint
    maybe_add_safepoint(iter().get_dest());
    a = pop();
    b = pop();
    c = _gvn.transform( new (C) CmpPNode(b, a) );
    c = optimize_cmp_with_klass(c);
    do_if(btest, c);
    break;

  case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
  case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
  case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
  case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
  case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
  case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
  handle_ifxx:
    // If this is a backwards branch in the bytecodes, add Safepoint
    maybe_add_safepoint(iter().get_dest());
    a = _gvn.intcon(0);
    b = pop();
    c = _gvn.transform( new (C) CmpINode(b, a) );
    do_if(btest, c);
    break;

  case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
  case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
  case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
  case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
  case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
  case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
  handle_if_icmp:
    // If this is a backwards branch in the bytecodes, add Safepoint
    maybe_add_safepoint(iter().get_dest());
    a = pop();
    b = pop();
    c = _gvn.transform( new (C) CmpINode( b, a ) );
    do_if(btest, c);
    break;

  case Bytecodes::_tableswitch:
    do_tableswitch();
    break;

  case Bytecodes::_lookupswitch:
    do_lookupswitch();
    break;

  case Bytecodes::_invokestatic:
  case Bytecodes::_invokedynamic:
  case Bytecodes::_invokespecial:
  case Bytecodes::_invokevirtual:
  case Bytecodes::_invokeinterface:
    do_call();
    break;
  case Bytecodes::_checkcast:
    do_checkcast();
    break;
  case Bytecodes::_instanceof:
    do_instanceof();
    break;
  case Bytecodes::_anewarray:
    do_anewarray();
    break;
  case Bytecodes::_newarray:
    do_newarray((BasicType)iter().get_index());
    break;
  case Bytecodes::_multianewarray:
    do_multianewarray();
    break;
  case Bytecodes::_new:
    do_new();
    break;

  case Bytecodes::_jsr:
  case Bytecodes::_jsr_w:
    do_jsr();
    break;

  case Bytecodes::_ret:
    do_ret();
    break;


  case Bytecodes::_monitorenter:
    do_monitor_enter();
    break;

  case Bytecodes::_monitorexit:
    do_monitor_exit();
    break;

  case Bytecodes::_breakpoint:
    // Breakpoint set concurrently to compile
    // %%% use an uncommon trap?
    C->record_failure("breakpoint in method");
    return;

  default:
#ifndef PRODUCT
    map()->dump(99);
#endif
    tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
    ShouldNotReachHere();
  }

#ifndef PRODUCT
  IdealGraphPrinter *printer = IdealGraphPrinter::printer();
  if(printer) {
    char buffer[256];
    sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
    bool old = printer->traverse_outs();
    printer->set_traverse_outs(true);
    printer->print_method(C, buffer, 4);
    printer->set_traverse_outs(old);
  }
#endif
}
C:\hotspot-69087d08d473\src\share\vm/opto/parse3.cpp
/*
 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/universe.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/memnode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"

//=============================================================================
// Helper methods for _get* and _put* bytecodes
//=============================================================================
bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
  // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
  // Better to check now than to Deoptimize as soon as we execute
  assert( field->is_static(), "Only check if field is static");
  // is_being_initialized() is too generous.  It allows access to statics
  // by threads that are not running the <clinit> before the <clinit> finishes.
  // return field->holder()->is_being_initialized();

  // The following restriction is correct but conservative.
  // It is also desirable to allow compilation of methods called from <clinit>
  // but this generated code will need to be made safe for execution by
  // other threads, or the transition from interpreted to compiled code would
  // need to be guarded.
  ciInstanceKlass *field_holder = field->holder();

  bool access_OK = false;
  if (method->holder()->is_subclass_of(field_holder)) {
    if (method->is_static()) {
      if (method->name() == ciSymbol::class_initializer_name()) {
        // OK to access static fields inside initializer
        access_OK = true;
      }
    } else {
      if (method->name() == ciSymbol::object_initializer_name()) {
        // It's also OK to access static fields inside a constructor,
        // because any thread calling the constructor must first have
        // synchronized on the class by executing a '_new' bytecode.
        access_OK = true;
      }
    }
  }

  return access_OK;

}


void Parse::do_field_access(bool is_get, bool is_field) {
  bool will_link;
  ciField* field = iter().get_field(will_link);
  assert(will_link, "getfield: typeflow responsibility");

  ciInstanceKlass* field_holder = field->holder();

  if (is_field == field->is_static()) {
    // Interpreter will throw java_lang_IncompatibleClassChangeError
    // Check this before allowing <clinit> methods to access static fields
    uncommon_trap(Deoptimization::Reason_unhandled,
                  Deoptimization::Action_none);
    return;
  }

  if (!is_field && !field_holder->is_initialized()) {
    if (!static_field_ok_in_clinit(field, method())) {
      uncommon_trap(Deoptimization::Reason_uninitialized,
                    Deoptimization::Action_reinterpret,
                    NULL, "!static_field_ok_in_clinit");
      return;
    }
  }

  // Deoptimize on putfield writes to call site target field.
  if (!is_get && field->is_call_site_target()) {
    uncommon_trap(Deoptimization::Reason_unhandled,
                  Deoptimization::Action_reinterpret,
                  NULL, "put to call site target field");
    return;
  }

  assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");

  // Note:  We do not check for an unloaded field type here any more.

  // Generate code for the object pointer.
  Node* obj;
  if (is_field) {
    int obj_depth = is_get ? 0 : field->type()->size();
    obj = null_check(peek(obj_depth));
    // Compile-time detect of null-exception?
    if (stopped())  return;

#ifdef ASSERT
    const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
    assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
#endif

    if (is_get) {
      (void) pop();  // pop receiver before getting
      do_get_xxx(obj, field, is_field);
    } else {
      do_put_xxx(obj, field, is_field);
      (void) pop();  // pop receiver after putting
    }
  } else {
    const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
    obj = _gvn.makecon(tip);
    if (is_get) {
      do_get_xxx(obj, field, is_field);
    } else {
      do_put_xxx(obj, field, is_field);
    }
  }
}


void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
  // Does this field have a constant value?  If so, just push the value.
  if (field->is_constant()) {
    // final or stable field
    const Type* stable_type = NULL;
    if (FoldStableValues && field->is_stable()) {
      stable_type = Type::get_const_type(field->type());
      if (field->type()->is_array_klass()) {
        int stable_dimension = field->type()->as_array_klass()->dimension();
        stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
      }
    }
    if (field->is_static()) {
      // final static field
      if (C->eliminate_boxing()) {
        // The pointers in the autobox arrays are always non-null.
        ciSymbol* klass_name = field->holder()->name();
        if (field->name() == ciSymbol::cache_field_name() &&
            field->holder()->uses_default_loader() &&
            (klass_name == ciSymbol::java_lang_Character_CharacterCache() ||
             klass_name == ciSymbol::java_lang_Byte_ByteCache() ||
             klass_name == ciSymbol::java_lang_Short_ShortCache() ||
             klass_name == ciSymbol::java_lang_Integer_IntegerCache() ||
             klass_name == ciSymbol::java_lang_Long_LongCache())) {
          bool require_const = true;
          bool autobox_cache = true;
          if (push_constant(field->constant_value(), require_const, autobox_cache)) {
            return;
          }
        }
      }
      if (push_constant(field->constant_value(), false, false, stable_type))
        return;
    } else {
      // final or stable non-static field
      // Treat final non-static fields of trusted classes (classes in
      // java.lang.invoke and sun.invoke packages and subpackages) as
      // compile time constants.
      if (obj->is_Con()) {
        const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
        ciObject* constant_oop = oop_ptr->const_oop();
        ciConstant constant = field->constant_value_of(constant_oop);
        if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
          // fall through to field load; the field is not yet initialized
        } else {
          if (push_constant(constant, true, false, stable_type))
            return;
        }
      }
    }
  }

  Node* leading_membar = NULL;
  ciType* field_klass = field->type();
  bool is_vol = field->is_volatile();

  // Compute address and memory type.
  int offset = field->offset_in_bytes();
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node *adr = basic_plus_adr(obj, obj, offset);
  BasicType bt = field->layout_type();

  // Build the resultant type of the load
  const Type *type;

  bool must_assert_null = false;

  if( bt == T_OBJECT ) {
    if (!field->type()->is_loaded()) {
      type = TypeInstPtr::BOTTOM;
      must_assert_null = true;
    } else if (field->is_constant() && field->is_static()) {
      // This can happen if the constant oop is non-perm.
      ciObject* con = field->constant_value().as_object();
      // Do not "join" in the previous type; it doesn't add value,
      // and may yield a vacuous result if the field is of interface type.
      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
      assert(type != NULL, "field singleton type must be consistent");
    } else {
      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
    }
  } else {
    type = Type::get_const_basic_type(bt);
  }
  if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
    leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
  }
  // Build the load.
  //
  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);

  // Adjust Java stack
  if (type2size[bt] == 1)
    push(ld);
  else
    push_pair(ld);

  if (must_assert_null) {
    // Do not take a trap here.  It's possible that the program
    // will never load the field's class, and will happily see
    // null values in this field forever.  Don't stumble into a
    // trap for such a program, or we might get a long series
    // of useless recompilations.  (Or, we might load a class
    // which should not be loaded.)  If we ever see a non-null
    // value, we will then trap and recompile.  (The trap will
    // not need to mention the class index, since the class will
    // already have been loaded if we ever see a non-null value.)
    // uncommon_trap(iter().get_field_signature_index());
#ifndef PRODUCT
    if (PrintOpto && (Verbose || WizardMode)) {
      method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
    }
#endif
    if (C->log() != NULL) {
      C->log()->elem("assert_null reason='field' klass='%d'",
                     C->log()->identify(field->type()));
    }
    // If there is going to be a trap, put it at the next bytecode:
    set_bci(iter().next_bci());
    null_assert(peek());
    set_bci(iter().cur_bci()); // put it back
  }

  // If reference is volatile, prevent following memory ops from
  // floating up past the volatile read.  Also prevents commoning
  // another volatile read.
  if (field->is_volatile()) {
    // Memory barrier includes bogus read of value to force load BEFORE membar
    assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
    Node* mb = insert_mem_bar(Op_MemBarAcquire, ld);
    mb->as_MemBar()->set_trailing_load();
  }
}

void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
  Node* leading_membar = NULL;
  bool is_vol = field->is_volatile();
  // If reference is volatile, prevent following memory ops from
  // floating down past the volatile write.  Also prevents commoning
  // another volatile read.
  if (is_vol) {
    leading_membar = insert_mem_bar(Op_MemBarRelease);
  }

  // Compute address and memory type.
  int offset = field->offset_in_bytes();
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node* adr = basic_plus_adr(obj, obj, offset);
  BasicType bt = field->layout_type();
  // Value to be stored
  Node* val = type2size[bt] == 1 ? pop() : pop_pair();
  // Round doubles before storing
  if (bt == T_DOUBLE)  val = dstore_rounding(val);

  // Conservatively release stores of object references.
  const MemNode::MemOrd mo =
    is_vol ?
    // Volatile fields need releasing stores.
    MemNode::release :
    // Non-volatile fields also need releasing stores if they hold an
    // object reference, because the object reference might point to
    // a freshly created object.
    StoreNode::release_if_reference(bt);

  // Store the value.
  Node* store;
  if (bt == T_OBJECT) {
    const TypeOopPtr* field_type;
    if (!field->type()->is_loaded()) {
      field_type = TypeInstPtr::BOTTOM;
    } else {
      field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
    }
    store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
  } else {
    store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
  }

  // If reference is volatile, prevent following volatiles ops from
  // floating up before the volatile write.
  if (is_vol) {
    // If not multiple copy atomic, we do the MemBarVolatile before the load.
    if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
      Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar
      MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
    }
    // Remember we wrote a volatile field.
    // For not multiple copy atomic cpu (ppc64) a barrier should be issued
    // in constructors which have such stores. See do_exits() in parse1.cpp.
    if (is_field) {
      set_wrote_volatile(true);
    }
  }

  // If the field is final, the rules of Java say we are in <init> or <clinit>.
  // Note the presence of writes to final non-static fields, so that we
  // can insert a memory barrier later on to keep the writes from floating
  // out of the constructor.
  // Any method can write a @Stable field; insert memory barriers after those also.
  if (is_field && (field->is_final() || field->is_stable())) {
    set_wrote_final(true);
    // Preserve allocation ptr to create precedent edge to it in membar
    // generated on exit from constructor.
    if (C->eliminate_boxing() &&
        adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
        AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
      set_alloc_with_final(obj);
    }
  }
}



bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) {
  const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache);
  switch (constant.basic_type()) {
  case T_ARRAY:
  case T_OBJECT:
    // cases:
    //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
    //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
    // An oop is not scavengable if it is in the perm gen.
    if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
      con_type = con_type->join_speculative(stable_type);
    break;

  case T_ILLEGAL:
    // Invalid ciConstant returned due to OutOfMemoryError in the CI
    assert(C->env()->failing(), "otherwise should not see this");
    // These always occur because of object types; we are going to
    // bail out anyway, so make the stack depths match up
    push( zerocon(T_OBJECT) );
    return false;
  }

  if (con_type == NULL)
    // we cannot inline the oop, but we can use it later to narrow a type
    return false;

  push_node(constant.basic_type(), makecon(con_type));
  return true;
}


//=============================================================================
void Parse::do_anewarray() {
  bool will_link;
  ciKlass* klass = iter().get_klass(will_link);

  // Uncommon Trap when class that array contains is not loaded
  // we need the loaded class for the rest of graph; do not
  // initialize the container class (see Java spec)!!!
  assert(will_link, "anewarray: typeflow responsibility");

  ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
  // Check that array_klass object is loaded
  if (!array_klass->is_loaded()) {
    // Generate uncommon_trap for unloaded array_class
    uncommon_trap(Deoptimization::Reason_unloaded,
                  Deoptimization::Action_reinterpret,
                  array_klass);
    return;
  }

  kill_dead_locals();

  const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
  Node* count_val = pop();
  Node* obj = new_array(makecon(array_klass_type), count_val, 1);
  push(obj);
}


void Parse::do_newarray(BasicType elem_type) {
  kill_dead_locals();

  Node*   count_val = pop();
  const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
  Node*   obj = new_array(makecon(array_klass), count_val, 1);
  // Push resultant oop onto stack
  push(obj);
}

// Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
// Also handle the degenerate 1-dimensional case of anewarray.
Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
  Node* length = lengths[0];
  assert(length != NULL, "");
  Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
  if (ndimensions > 1) {
    jint length_con = find_int_con(length, -1);
    guarantee(length_con >= 0, "non-constant multianewarray");
    ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
    const TypePtr* adr_type = TypeAryPtr::OOPS;
    const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
    const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
    for (jint i = 0; i < length_con; i++) {
      Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
      intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
      Node*    eaddr  = basic_plus_adr(array, offset);
      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
    }
  }
  return array;
}

void Parse::do_multianewarray() {
  int ndimensions = iter().get_dimensions();

  // the m-dimensional array
  bool will_link;
  ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
  assert(will_link, "multianewarray: typeflow responsibility");

  // Note:  Array classes are always initialized; no is_initialized check.

  kill_dead_locals();

  // get the lengths from the stack (first dimension is on top)
  Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
  length[ndimensions] = NULL;  // terminating null for make_runtime_call
  int j;
  for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();

  // The original expression was of this form: new T[length0][length1]...
  // It is often the case that the lengths are small (except the last).
  // If that happens, use the fast 1-d creator a constant number of times.
  const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
  jint expand_count = 1;        // count of allocations in the expansion
  jint expand_fanout = 1;       // running total fanout
  for (j = 0; j < ndimensions-1; j++) {
    jint dim_con = find_int_con(length[j], -1);
    expand_fanout *= dim_con;
    expand_count  += expand_fanout; // count the level-J sub-arrays
    if (dim_con <= 0
        || dim_con > expand_limit
        || expand_count > expand_limit) {
      expand_count = 0;
      break;
    }
  }

  // Can use multianewarray instead of [a]newarray if only one dimension,
  // or if all non-final dimensions are small constants.
  if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
    Node* obj = NULL;
    // Set the original stack and the reexecute bit for the interpreter
    // to reexecute the multianewarray bytecode if deoptimization happens.
    // Do it unconditionally even for one dimension multianewarray.
    // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
    // when AllocateArray node for newarray is created.
    { PreserveReexecuteState preexecs(this);
      inc_sp(ndimensions);
      // Pass 0 as nargs since uncommon trap code does not need to restore stack.
      obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
    } //original reexecute and sp are set back here
    push(obj);
    return;
  }

  address fun = NULL;
  switch (ndimensions) {
  case 1: ShouldNotReachHere(); break;
  case 2: fun = OptoRuntime::multianewarray2_Java(); break;
  case 3: fun = OptoRuntime::multianewarray3_Java(); break;
  case 4: fun = OptoRuntime::multianewarray4_Java(); break;
  case 5: fun = OptoRuntime::multianewarray5_Java(); break;
  };
  Node* c = NULL;

  if (fun != NULL) {
    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
                          OptoRuntime::multianewarray_Type(ndimensions),
                          fun, NULL, TypeRawPtr::BOTTOM,
                          makecon(TypeKlassPtr::make(array_klass)),
                          length[0], length[1], length[2],
                          (ndimensions > 2) ? length[3] : NULL,
                          (ndimensions > 3) ? length[4] : NULL);
  } else {
    // Create a java array for dimension sizes
    Node* dims = NULL;
    { PreserveReexecuteState preexecs(this);
      inc_sp(ndimensions);
      Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
      dims = new_array(dims_array_klass, intcon(ndimensions), 0);

      // Fill-in it with values
      for (j = 0; j < ndimensions; j++) {
        Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
      }
    }

    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
                          OptoRuntime::multianewarrayN_Type(),
                          OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
                          makecon(TypeKlassPtr::make(array_klass)),
                          dims);
  }
  make_slow_call_ex(c, env()->Throwable_klass(), false);

  Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms));

  const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);

  // Improve the type:  We know it's not null, exact, and of a given length.
  type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
  type = type->is_aryptr()->cast_to_exactness(true);

  const TypeInt* ltype = _gvn.find_int_type(length[0]);
  if (ltype != NULL)
    type = type->is_aryptr()->cast_to_size(ltype);

    // We cannot sharpen the nested sub-arrays, since the top level is mutable.

  Node* cast = _gvn.transform( new (C) CheckCastPPNode(control(), res, type) );
  push(cast);

  // Possible improvements:
  // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
  // - Issue CastII against length[*] values, to TypeInt::POS.
}
C:\hotspot-69087d08d473\src\share\vm/opto/parseHelper.cpp
/*
 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/compileLog.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/sharedRuntime.hpp"

//------------------------------make_dtrace_method_entry_exit ----------------
// Dtrace -- record entry or exit of a method if compiled with dtrace support
void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
  const TypeFunc *call_type    = OptoRuntime::dtrace_method_entry_exit_Type();
  address         call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
                                            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
  const char     *call_name    = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";

  // Get base of thread-local storage area
  Node* thread = _gvn.transform( new (C) ThreadLocalNode() );

  // Get method
  const TypePtr* method_type = TypeMetadataPtr::make(method);
  Node *method_node = _gvn.transform( ConNode::make(C, method_type) );

  kill_dead_locals();

  // For some reason, this call reads only raw memory.
  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
  make_runtime_call(RC_LEAF | RC_NARROW_MEM,
                    call_type, call_address,
                    call_name, raw_adr_type,
                    thread, method_node);
}


//=============================================================================
//------------------------------do_checkcast-----------------------------------
void Parse::do_checkcast() {
  bool will_link;
  ciKlass* klass = iter().get_klass(will_link);

  Node *obj = peek();

  // Throw uncommon trap if class is not loaded or the value we are casting
  // _from_ is not loaded, and value is not null.  If the value _is_ NULL,
  // then the checkcast does nothing.
  const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
  if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
    if (C->log() != NULL) {
      if (!will_link) {
        C->log()->elem("assert_null reason='checkcast' klass='%d'",
                       C->log()->identify(klass));
      }
      if (tp && tp->klass() && !tp->klass()->is_loaded()) {
        // %%% Cannot happen?
        C->log()->elem("assert_null reason='checkcast source' klass='%d'",
                       C->log()->identify(tp->klass()));
      }
    }
    null_assert(obj);
    assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
    if (!stopped()) {
      profile_null_checkcast();
    }
    return;
  }

  Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) );

  // Pop from stack AFTER gen_checkcast because it can uncommon trap and
  // the debug info has to be correct.
  pop();
  push(res);
}


//------------------------------do_instanceof----------------------------------
void Parse::do_instanceof() {
  if (stopped())  return;
  // We would like to return false if class is not loaded, emitting a
  // dependency, but Java requires instanceof to load its operand.

  // Throw uncommon trap if class is not loaded
  bool will_link;
  ciKlass* klass = iter().get_klass(will_link);

  if (!will_link) {
    if (C->log() != NULL) {
      C->log()->elem("assert_null reason='instanceof' klass='%d'",
                     C->log()->identify(klass));
    }
    null_assert(peek());
    assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
    if (!stopped()) {
      // The object is now known to be null.
      // Shortcut the effect of gen_instanceof and return "false" directly.
      pop();                   // pop the null
      push(_gvn.intcon(0));    // push false answer
    }
    return;
  }

  // Push the bool result back on stack
  Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true);

  // Pop from stack AFTER gen_instanceof because it can uncommon trap.
  pop();
  push(res);
}

//------------------------------array_store_check------------------------------
// pull array from stack and check that the store is valid
void Parse::array_store_check() {

  // Shorthand access to array store elements without popping them.
  Node *obj = peek(0);
  Node *idx = peek(1);
  Node *ary = peek(2);

  if (_gvn.type(obj) == TypePtr::NULL_PTR) {
    // There's never a type check on null values.
    // This cutout lets us avoid the uncommon_trap(Reason_array_check)
    // below, which turns into a performance liability if the
    // gen_checkcast folds up completely.
    return;
  }

  // Extract the array klass type
  int klass_offset = oopDesc::klass_offset_in_bytes();
  Node* p = basic_plus_adr( ary, ary, klass_offset );
  // p's type is array-of-OOPS plus klass_offset
  Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
  // Get the array klass
  const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();

  // The type of array_klass is usually INexact array-of-oop.  Heroically
  // cast array_klass to EXACT array and uncommon-trap if the cast fails.
  // Make constant out of the inexact array klass, but use it only if the cast
  // succeeds.
  bool always_see_exact_class = false;
  if (MonomorphicArrayCheck
      && !too_many_traps(Deoptimization::Reason_array_check)
      && !tak->klass_is_exact()
      && tak != TypeKlassPtr::OBJECT) {
      // Regarding the fourth condition in the if-statement from above:
      //
      // If the compiler has determined that the type of array 'ary' (represented
      // by 'array_klass') is java/lang/Object, the compiler must not assume that
      // the array 'ary' is monomorphic.
      //
      // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
      // because it is not possible to perform a arraystore into an object that is not
      // a "proper" array.
      //
      // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
      // successfully perform the store.
      //
      // The implementation reasons for the condition are the following:
      //
      // java/lang/Object is the superclass of all arrays, but it is represented by the VM
      // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
      // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
      //
      // See issue JDK-8057622 for details.

    always_see_exact_class = true;
    // (If no MDO at all, hope for the best, until a trap actually occurs.)

    // Make a constant out of the inexact array klass
    const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
    Node* con = makecon(extak);
    Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
    Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
    Node* ctrl= control();
    { BuildCutout unless(this, bol, PROB_MAX);
      uncommon_trap(Deoptimization::Reason_array_check,
                    Deoptimization::Action_maybe_recompile,
                    tak->klass());
    }
    if (stopped()) {          // MUST uncommon-trap?
      set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
    } else {                  // Cast array klass to exactness:
      // Use the exact constant value we know it is.
      replace_in_map(array_klass,con);
      CompileLog* log = C->log();
      if (log != NULL) {
        log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
                  log->identify(tak->klass()));
      }
      array_klass = con;      // Use cast value moving forward
    }
  }

  // Come here for polymorphic array klasses

  // Extract the array element class
  int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
  Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
  // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
  // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
  // LoadKlassNode.
  Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
                                                       immutable_memory(), p2, tak));

  // Check (the hard way) and throw if not a subklass.
  // Result is ignored, we just need the CFG effects.
  gen_checkcast(obj, a_e_klass);
}


void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
  // Emit guarded new
  //   if (klass->_init_thread != current_thread ||
  //       klass->_init_state != being_initialized)
  //      uncommon_trap
  Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
  Node* merge = new (C) RegionNode(3);
  _gvn.set_type(merge, Type::CONTROL);
  Node* kls = makecon(TypeKlassPtr::make(klass));

  Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
  Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
  Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
  IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(1, IfFalse(iff));

  Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
  adr_node = basic_plus_adr(kls, kls, init_state_offset);
  // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
  // can generate code to load it as unsigned byte.
  Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
  Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
  tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
  iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(2, IfFalse(iff));

  PreserveJVMState pjvms(this);
  record_for_igvn(merge);
  set_control(merge);

  uncommon_trap(Deoptimization::Reason_uninitialized,
                Deoptimization::Action_reinterpret,
                klass);
}


//------------------------------do_new-----------------------------------------
void Parse::do_new() {
  kill_dead_locals();

  bool will_link;
  ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
  assert(will_link, "_new: typeflow responsibility");

  // Should initialize, or throw an InstantiationError?
  if (!klass->is_initialized() && !klass->is_being_initialized() ||
      klass->is_abstract() || klass->is_interface() ||
      klass->name() == ciSymbol::java_lang_Class() ||
      iter().is_unresolved_klass()) {
    uncommon_trap(Deoptimization::Reason_uninitialized,
                  Deoptimization::Action_reinterpret,
                  klass);
    return;
  }
  if (klass->is_being_initialized()) {
    emit_guard_for_new(klass);
  }

  Node* kls = makecon(TypeKlassPtr::make(klass));
  Node* obj = new_instance(kls);

  // Push resultant oop onto stack
  push(obj);

  // Keep track of whether opportunities exist for StringBuilder
  // optimizations.
  if (OptimizeStringConcat &&
      (klass == C->env()->StringBuilder_klass() ||
       klass == C->env()->StringBuffer_klass())) {
    C->set_has_stringbuilder(true);
  }

  // Keep track of boxed values for EliminateAutoBox optimizations.
  if (C->eliminate_boxing() && klass->is_box_klass()) {
    C->set_has_boxed_value(true);
  }
}

#ifndef PRODUCT
//------------------------------dump_map_adr_mem-------------------------------
// Debug dump of the mapping from address types to MergeMemNode indices.
void Parse::dump_map_adr_mem() const {
  tty->print_cr("--- Mapping from address types to memory Nodes ---");
  MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ?
                                      map()->memory()->as_MergeMem() : NULL);
  for (uint i = 0; i < (uint)C->num_alias_types(); i++) {
    C->alias_type(i)->print_on(tty);
    tty->print("\t");
    // Node mapping, if any
    if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) {
      mem->in(i)->dump();
    } else {
      tty->cr();
    }
  }
}

#endif


//=============================================================================
//
// parser methods for profiling


//----------------------test_counter_against_threshold ------------------------
void Parse::test_counter_against_threshold(Node* cnt, int limit) {
  // Test the counter against the limit and uncommon trap if greater.

  // This code is largely copied from the range check code in
  // array_addressing()

  // Test invocation count vs threshold
  Node *threshold = makecon(TypeInt::make(limit));
  Node *chk   = _gvn.transform( new (C) CmpUNode( cnt, threshold) );
  BoolTest::mask btest = BoolTest::lt;
  Node *tst   = _gvn.transform( new (C) BoolNode( chk, btest) );
  // Branch to failure if threshold exceeded
  { BuildCutout unless(this, tst, PROB_ALWAYS);
    uncommon_trap(Deoptimization::Reason_age,
                  Deoptimization::Action_maybe_recompile);
  }
}

//----------------------increment_and_test_invocation_counter-------------------
void Parse::increment_and_test_invocation_counter(int limit) {
  if (!count_invocations()) return;

  // Get the Method* node.
  ciMethod* m = method();
  MethodCounters* counters_adr = m->ensure_method_counters();
  if (counters_adr == NULL) {
    C->record_failure("method counters allocation failed");
    return;
  }

  Node* ctrl = control();
  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
  Node *counters_node = makecon(adr_type);
  Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
    MethodCounters::interpreter_invocation_counter_offset_in_bytes());
  Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);

  test_counter_against_threshold(cnt, limit);

  // Add one to the counter and store
  Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
  store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
}

//----------------------------method_data_addressing---------------------------
Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
  // Get offset within MethodData* of the data array
  ByteSize data_offset = MethodData::data_offset();

  // Get cell offset of the ProfileData within data array
  int cell_offset = md->dp_to_di(data->dp());

  // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
  int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);

  const TypePtr* adr_type = TypeMetadataPtr::make(md);
  Node* mdo = makecon(adr_type);
  Node* ptr = basic_plus_adr(mdo, mdo, offset);

  if (stride != 0) {
    Node* str = _gvn.MakeConX(stride);
    Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
    ptr   = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
  }

  return ptr;
}

//--------------------------increment_md_counter_at----------------------------
void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
  Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
  Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
  store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
}

//--------------------------test_for_osr_md_counter_at-------------------------
void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
  Node* adr_node = method_data_addressing(md, data, counter_offset);

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);

  test_counter_against_threshold(cnt, limit);
}

//-------------------------------set_md_flag_at--------------------------------
void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
  Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
  Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
  store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
}

//----------------------------profile_taken_branch-----------------------------
void Parse::profile_taken_branch(int target_bci, bool force_update) {
  // This is a potential osr_site if we have a backedge.
  int cur_bci = bci();
  bool osr_site =
    (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;

  // If we are going to OSR, restart at the target bytecode.
  set_bci(target_bci);

  // To do: factor out the the limit calculations below. These duplicate
  // the similar limit calculations in the interpreter.

  if (method_data_update() || force_update) {
    ciMethodData* md = method()->method_data();
    assert(md != NULL, "expected valid ciMethodData");
    ciProfileData* data = md->bci_to_data(cur_bci);
    assert(data->is_JumpData(), "need JumpData for taken branch");
    increment_md_counter_at(md, data, JumpData::taken_offset());
  }

  // In the new tiered system this is all we need to do. In the old
  // (c2 based) tiered sytem we must do the code below.
#ifndef TIERED
  if (method_data_update()) {
    ciMethodData* md = method()->method_data();
    if (osr_site) {
      ciProfileData* data = md->bci_to_data(cur_bci);
      int limit = (CompileThreshold
                   * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
      test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
    }
  } else {
    // With method data update off, use the invocation counter to trigger an
    // OSR compilation, as done in the interpreter.
    if (osr_site) {
      int limit = (CompileThreshold * OnStackReplacePercentage) / 100;
      increment_and_test_invocation_counter(limit);
    }
  }
#endif // TIERED

  // Restore the original bytecode.
  set_bci(cur_bci);
}

//--------------------------profile_not_taken_branch---------------------------
void Parse::profile_not_taken_branch(bool force_update) {

  if (method_data_update() || force_update) {
    ciMethodData* md = method()->method_data();
    assert(md != NULL, "expected valid ciMethodData");
    ciProfileData* data = md->bci_to_data(bci());
    assert(data->is_BranchData(), "need BranchData for not taken branch");
    increment_md_counter_at(md, data, BranchData::not_taken_offset());
  }

}

//---------------------------------profile_call--------------------------------
void Parse::profile_call(Node* receiver) {
  if (!method_data_update()) return;

  switch (bc()) {
  case Bytecodes::_invokevirtual:
  case Bytecodes::_invokeinterface:
    profile_receiver_type(receiver);
    break;
  case Bytecodes::_invokestatic:
  case Bytecodes::_invokedynamic:
  case Bytecodes::_invokespecial:
    profile_generic_call();
    break;
  default: fatal("unexpected call bytecode");
  }
}

//------------------------------profile_generic_call---------------------------
void Parse::profile_generic_call() {
  assert(method_data_update(), "must be generating profile code");

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_CounterData(), "need CounterData for not taken branch");
  increment_md_counter_at(md, data, CounterData::count_offset());
}

//-----------------------------profile_receiver_type---------------------------
void Parse::profile_receiver_type(Node* receiver) {
  assert(method_data_update(), "must be generating profile code");

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");

  // Skip if we aren't tracking receivers
  if (TypeProfileWidth < 1) {
    increment_md_counter_at(md, data, CounterData::count_offset());
    return;
  }
  ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();

  Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));

  // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
  // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
  make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
                    CAST_FROM_FN_PTR(address,
                                     OptoRuntime::profile_receiver_type_C),
                    "profile_receiver_type_C",
                    TypePtr::BOTTOM,
                    method_data, receiver);
}

//---------------------------------profile_ret---------------------------------
void Parse::profile_ret(int target_bci) {
  if (!method_data_update()) return;

  // Skip if we aren't tracking ret targets
  if (TypeProfileWidth < 1) return;

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_RetData(), "need RetData for ret");
  ciRetData* ret_data = (ciRetData*)data->as_RetData();

  // Look for the target_bci is already in the table
  uint row;
  bool table_full = true;
  for (row = 0; row < ret_data->row_limit(); row++) {
    int key = ret_data->bci(row);
    table_full &= (key != RetData::no_bci);
    if (key == target_bci) break;
  }

  if (row >= ret_data->row_limit()) {
    // The target_bci was not found in the table.
    if (!table_full) {
      // XXX: Make slow call to update RetData
    }
    return;
  }

  // the target_bci is already in the table
  increment_md_counter_at(md, data, RetData::bci_count_offset(row));
}

//--------------------------profile_null_checkcast----------------------------
void Parse::profile_null_checkcast() {
  // Set the null-seen flag, done in conjunction with the usual null check. We
  // never unset the flag, so this is a one-way switch.
  if (!method_data_update()) return;

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_BitData(), "need BitData for checkcast");
  set_md_flag_at(md, data, BitData::null_seen_byte_constant());
}

//-----------------------------profile_switch_case-----------------------------
void Parse::profile_switch_case(int table_index) {
  if (!method_data_update()) return;

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");

  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_MultiBranchData(), "need MultiBranchData for switch case");
  if (table_index >= 0) {
    increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index));
  } else {
    increment_md_counter_at(md, data, MultiBranchData::default_count_offset());
  }
}
C:\hotspot-69087d08d473\src\share\vm/opto/phase.cpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileBroker.hpp"
#include "opto/compile.hpp"
#include "opto/matcher.hpp"
#include "opto/node.hpp"
#include "opto/phase.hpp"

#ifndef PRODUCT
int Phase::_total_bytes_compiled = 0;

elapsedTimer Phase::_t_totalCompilation;
elapsedTimer Phase::_t_methodCompilation;
elapsedTimer Phase::_t_stubCompilation;
#endif

// The next timers used for LogCompilation
elapsedTimer Phase::_t_parser;
elapsedTimer Phase::_t_optimizer;
elapsedTimer   Phase::_t_escapeAnalysis;
elapsedTimer     Phase::_t_connectionGraph;
elapsedTimer   Phase::_t_idealLoop;
elapsedTimer   Phase::_t_ccp;
elapsedTimer Phase::_t_matcher;
elapsedTimer Phase::_t_registerAllocation;
elapsedTimer Phase::_t_output;

#ifndef PRODUCT
elapsedTimer Phase::_t_graphReshaping;
elapsedTimer Phase::_t_scheduler;
elapsedTimer Phase::_t_blockOrdering;
elapsedTimer Phase::_t_macroEliminate;
elapsedTimer Phase::_t_macroExpand;
elapsedTimer Phase::_t_peephole;
elapsedTimer Phase::_t_postalloc_expand;
elapsedTimer Phase::_t_codeGeneration;
elapsedTimer Phase::_t_registerMethod;
elapsedTimer Phase::_t_temporaryTimer1;
elapsedTimer Phase::_t_temporaryTimer2;
elapsedTimer Phase::_t_idealLoopVerify;

// Subtimers for _t_optimizer
elapsedTimer   Phase::_t_iterGVN;
elapsedTimer   Phase::_t_iterGVN2;
elapsedTimer   Phase::_t_incrInline;
elapsedTimer   Phase::_t_renumberLive;


// Subtimers for _t_registerAllocation
elapsedTimer   Phase::_t_ctorChaitin;
elapsedTimer   Phase::_t_buildIFGphysical;
elapsedTimer   Phase::_t_computeLive;
elapsedTimer   Phase::_t_regAllocSplit;
elapsedTimer   Phase::_t_postAllocCopyRemoval;
elapsedTimer   Phase::_t_mergeMultidefs;
elapsedTimer   Phase::_t_fixupSpills;

// Subtimers for _t_output
elapsedTimer   Phase::_t_instrSched;
elapsedTimer   Phase::_t_buildOopMaps;
#endif

//------------------------------Phase------------------------------------------
Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) {
  // Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544).
  // This is an effective place to poll, since the compiler is full of phases.
  // In particular, every inlining site uses a recursively created Parse phase.
  CompileBroker::maybe_block();
}

#ifndef PRODUCT
static const double minimum_reported_time             = 0.0001; // seconds
static const double expected_method_compile_coverage  = 0.97;   // %
static const double minimum_meaningful_method_compile = 2.00;   // seconds

void Phase::print_timers() {
  tty->print_cr ("Accumulated compiler times:");
  tty->print_cr ("---------------------------");
  tty->print_cr ("  Total compilation: %3.3f sec.", Phase::_t_totalCompilation.seconds());
  tty->print    ("    method compilation   : %3.3f sec", Phase::_t_methodCompilation.seconds());
  tty->print    ("/%d bytes",_total_bytes_compiled);
  tty->print_cr (" (%3.0f bytes per sec) ", Phase::_total_bytes_compiled / Phase::_t_methodCompilation.seconds());
  tty->print_cr ("    stub compilation     : %3.3f sec.", Phase::_t_stubCompilation.seconds());
  tty->print_cr ("  Phases:");
  tty->print_cr ("    parse          : %3.3f sec", Phase::_t_parser.seconds());
  tty->print_cr ("    optimizer      : %3.3f sec", Phase::_t_optimizer.seconds());
  if( Verbose || WizardMode ) {
    if (DoEscapeAnalysis) {
      // EA is part of Optimizer.
      tty->print_cr ("      escape analysis: %3.3f sec", Phase::_t_escapeAnalysis.seconds());
      tty->print_cr ("        connection graph: %3.3f sec", Phase::_t_connectionGraph.seconds());
      tty->print_cr ("      macroEliminate : %3.3f sec", Phase::_t_macroEliminate.seconds());
    }
    tty->print_cr ("      iterGVN        : %3.3f sec", Phase::_t_iterGVN.seconds());
    tty->print_cr ("      incrInline     : %3.3f sec", Phase::_t_incrInline.seconds());
    tty->print_cr ("      renumberLive   : %3.3f sec", Phase::_t_renumberLive.seconds());
    tty->print_cr ("      idealLoop      : %3.3f sec", Phase::_t_idealLoop.seconds());
    tty->print_cr ("      idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
    tty->print_cr ("      ccp            : %3.3f sec", Phase::_t_ccp.seconds());
    tty->print_cr ("      iterGVN2       : %3.3f sec", Phase::_t_iterGVN2.seconds());
    tty->print_cr ("      macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
    tty->print_cr ("      graphReshape   : %3.3f sec", Phase::_t_graphReshaping.seconds());
    double optimizer_subtotal = Phase::_t_iterGVN.seconds() + Phase::_t_iterGVN2.seconds() + Phase::_t_renumberLive.seconds() +
      Phase::_t_escapeAnalysis.seconds() + Phase::_t_macroEliminate.seconds() +
      Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
      Phase::_t_macroExpand.seconds() + Phase::_t_graphReshaping.seconds();
    double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
    tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
  }
  tty->print_cr ("    matcher        : %3.3f sec", Phase::_t_matcher.seconds());
  tty->print_cr ("    scheduler      : %3.3f sec", Phase::_t_scheduler.seconds());
  tty->print_cr ("    regalloc       : %3.3f sec", Phase::_t_registerAllocation.seconds());
  if( Verbose || WizardMode ) {
    tty->print_cr ("      ctorChaitin    : %3.3f sec", Phase::_t_ctorChaitin.seconds());
    tty->print_cr ("      buildIFG       : %3.3f sec", Phase::_t_buildIFGphysical.seconds());
    tty->print_cr ("      computeLive    : %3.3f sec", Phase::_t_computeLive.seconds());
    tty->print_cr ("      regAllocSplit  : %3.3f sec", Phase::_t_regAllocSplit.seconds());
    tty->print_cr ("      postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds());
    tty->print_cr ("      mergeMultidefs: %3.3f sec", Phase::_t_mergeMultidefs.seconds());
    tty->print_cr ("      fixupSpills    : %3.3f sec", Phase::_t_fixupSpills.seconds());
    double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() +
      Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() +
      Phase::_t_regAllocSplit.seconds()    + Phase::_t_fixupSpills.seconds() +
      Phase::_t_postAllocCopyRemoval.seconds() + Phase::_t_mergeMultidefs.seconds();
    double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
    tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", regalloc_subtotal, percent_of_regalloc);
  }
  tty->print_cr ("    blockOrdering  : %3.3f sec", Phase::_t_blockOrdering.seconds());
  tty->print_cr ("    peephole       : %3.3f sec", Phase::_t_peephole.seconds());
  if (Matcher::require_postalloc_expand) {
    tty->print_cr ("    postalloc_expand: %3.3f sec", Phase::_t_postalloc_expand.seconds());
  }
  tty->print_cr ("    codeGen        : %3.3f sec", Phase::_t_codeGeneration.seconds());
  tty->print_cr ("    install_code   : %3.3f sec", Phase::_t_registerMethod.seconds());
  tty->print_cr ("    -------------- : ----------");
  double phase_subtotal = Phase::_t_parser.seconds() +
    Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() +
    Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() +
    Phase::_t_registerAllocation.seconds() + Phase::_t_blockOrdering.seconds() +
    Phase::_t_codeGeneration.seconds() + Phase::_t_registerMethod.seconds();
  double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0;
  // counters inside Compile::CodeGen include time for adapters and stubs
  // so phase-total can be greater than 100%
  tty->print_cr ("    total          : %3.3f sec,  %3.2f %%", phase_subtotal, percent_of_method_compile);

  assert( percent_of_method_compile > expected_method_compile_coverage ||
          phase_subtotal < minimum_meaningful_method_compile,
          "Must account for method compilation");

  if( Phase::_t_temporaryTimer1.seconds() > minimum_reported_time ) {
    tty->cr();
    tty->print_cr ("    temporaryTimer1: %3.3f sec", Phase::_t_temporaryTimer1.seconds());
  }
  if( Phase::_t_temporaryTimer2.seconds() > minimum_reported_time ) {
    tty->cr();
    tty->print_cr ("    temporaryTimer2: %3.3f sec", Phase::_t_temporaryTimer2.seconds());
  }
  tty->print_cr ("    output         : %3.3f sec", Phase::_t_output.seconds());
  tty->print_cr ("      isched         : %3.3f sec", Phase::_t_instrSched.seconds());
  tty->print_cr ("      bldOopMaps     : %3.3f sec", Phase::_t_buildOopMaps.seconds());
}
#endif
C:\hotspot-69087d08d473\src\share\vm/opto/phase.hpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_PHASE_HPP
#define SHARE_VM_OPTO_PHASE_HPP

#include "libadt/port.hpp"
#include "runtime/timer.hpp"

class Compile;

//------------------------------Phase------------------------------------------
// Most optimizations are done in Phases.  Creating a phase does any long
// running analysis required, and caches the analysis in internal data
// structures.  Later the analysis is queried using transform() calls to
// guide transforming the program.  When the Phase is deleted, so is any
// cached analysis info.  This basic Phase class mostly contains timing and
// memory management code.
class Phase : public StackObj {
public:
  enum PhaseNumber {
    Compiler,                         // Top-level compiler phase
    Parser,                           // Parse bytecodes
    Remove_Useless,                   // Remove useless nodes
    Remove_Useless_And_Renumber_Live, // First, remove useless nodes from the graph. Then, renumber live nodes.
    Optimistic,                       // Optimistic analysis phase
    GVN,                              // Pessimistic global value numbering phase
    Ins_Select,                       // Instruction selection phase
    CFG,                              // Build a CFG
    BlockLayout,                      // Linear ordering of blocks
    Register_Allocation,              // Register allocation, duh
    LIVE,                             // Dragon-book LIVE range problem
    StringOpts,                       // StringBuilder related optimizations
    Interference_Graph,               // Building the IFG
    Coalesce,                         // Coalescing copies
    Ideal_Loop,                       // Find idealized trip-counted loops
    Macro_Expand,                     // Expand macro nodes
    Peephole,                         // Apply peephole optimizations
    last_phase
  };
protected:
  enum PhaseNumber _pnum;       // Phase number (for stat gathering)

#ifndef PRODUCT
  static int _total_bytes_compiled;

  // accumulated timers
  static elapsedTimer _t_totalCompilation;
  static elapsedTimer _t_methodCompilation;
  static elapsedTimer _t_stubCompilation;
#endif

// The next timers used for LogCompilation
  static elapsedTimer _t_parser;
  static elapsedTimer _t_optimizer;
public:
  // ConnectionGraph can't be Phase since it is used after EA done.
  static elapsedTimer   _t_escapeAnalysis;
  static elapsedTimer     _t_connectionGraph;
protected:
  static elapsedTimer   _t_idealLoop;
  static elapsedTimer   _t_ccp;
  static elapsedTimer _t_matcher;
  static elapsedTimer _t_registerAllocation;
  static elapsedTimer _t_output;

#ifndef PRODUCT
  static elapsedTimer _t_graphReshaping;
  static elapsedTimer _t_scheduler;
  static elapsedTimer _t_blockOrdering;
  static elapsedTimer _t_macroEliminate;
  static elapsedTimer _t_macroExpand;
  static elapsedTimer _t_peephole;
  static elapsedTimer _t_postalloc_expand;
  static elapsedTimer _t_codeGeneration;
  static elapsedTimer _t_registerMethod;
  static elapsedTimer _t_temporaryTimer1;
  static elapsedTimer _t_temporaryTimer2;
  static elapsedTimer _t_idealLoopVerify;

// Subtimers for _t_optimizer
  static elapsedTimer   _t_iterGVN;
  static elapsedTimer   _t_iterGVN2;
  static elapsedTimer   _t_incrInline;
  static elapsedTimer   _t_renumberLive;

// Subtimers for _t_registerAllocation
  static elapsedTimer   _t_ctorChaitin;
  static elapsedTimer   _t_buildIFGphysical;
  static elapsedTimer   _t_computeLive;
  static elapsedTimer   _t_regAllocSplit;
  static elapsedTimer   _t_postAllocCopyRemoval;
  static elapsedTimer   _t_mergeMultidefs;
  static elapsedTimer   _t_fixupSpills;

// Subtimers for _t_output
  static elapsedTimer   _t_instrSched;
  static elapsedTimer   _t_buildOopMaps;
#endif
public:
  Compile * C;
  Phase( PhaseNumber pnum );
#ifndef PRODUCT
  static void print_timers();
#endif
};

#endif // SHARE_VM_OPTO_PHASE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/phasetype.hpp
/*
 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_PHASETYPE_HPP
#define SHARE_VM_OPTO_PHASETYPE_HPP

enum CompilerPhaseType {
  PHASE_BEFORE_STRINGOPTS,
  PHASE_AFTER_STRINGOPTS,
  PHASE_BEFORE_REMOVEUSELESS,
  PHASE_AFTER_PARSING,
  PHASE_ITER_GVN1,
  PHASE_PHASEIDEAL_BEFORE_EA,
  PHASE_ITER_GVN_AFTER_EA,
  PHASE_ITER_GVN_AFTER_ELIMINATION,
  PHASE_PHASEIDEALLOOP1,
  PHASE_PHASEIDEALLOOP2,
  PHASE_PHASEIDEALLOOP3,
  PHASE_CPP1,
  PHASE_ITER_GVN2,
  PHASE_PHASEIDEALLOOP_ITERATIONS,
  PHASE_OPTIMIZE_FINISHED,
  PHASE_GLOBAL_CODE_MOTION,
  PHASE_FINAL_CODE,
  PHASE_AFTER_EA,
  PHASE_BEFORE_CLOOPS,
  PHASE_AFTER_CLOOPS,
  PHASE_BEFORE_BEAUTIFY_LOOPS,
  PHASE_AFTER_BEAUTIFY_LOOPS,
  PHASE_BEFORE_MATCHING,
  PHASE_INCREMENTAL_INLINE,
  PHASE_INCREMENTAL_BOXING_INLINE,
  PHASE_END,
  PHASE_FAILURE,

  PHASE_NUM_TYPES
};

class CompilerPhaseTypeHelper {
  public:
  static const char* to_string(CompilerPhaseType cpt) {
    switch (cpt) {
      case PHASE_BEFORE_STRINGOPTS:          return "Before StringOpts";
      case PHASE_AFTER_STRINGOPTS:           return "After StringOpts";
      case PHASE_BEFORE_REMOVEUSELESS:       return "Before RemoveUseless";
      case PHASE_AFTER_PARSING:              return "After Parsing";
      case PHASE_ITER_GVN1:                  return "Iter GVN 1";
      case PHASE_PHASEIDEAL_BEFORE_EA:       return "PhaseIdealLoop before EA";
      case PHASE_ITER_GVN_AFTER_EA:          return "Iter GVN after EA";
      case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks";
      case PHASE_PHASEIDEALLOOP1:            return "PhaseIdealLoop 1";
      case PHASE_PHASEIDEALLOOP2:            return "PhaseIdealLoop 2";
      case PHASE_PHASEIDEALLOOP3:            return "PhaseIdealLoop 3";
      case PHASE_CPP1:                       return "PhaseCPP 1";
      case PHASE_ITER_GVN2:                  return "Iter GVN 2";
      case PHASE_PHASEIDEALLOOP_ITERATIONS:  return "PhaseIdealLoop iterations";
      case PHASE_OPTIMIZE_FINISHED:          return "Optimize finished";
      case PHASE_GLOBAL_CODE_MOTION:         return "Global code motion";
      case PHASE_FINAL_CODE:                 return "Final Code";
      case PHASE_AFTER_EA:                   return "After Escape Analysis";
      case PHASE_BEFORE_CLOOPS:              return "Before CountedLoop";
      case PHASE_AFTER_CLOOPS:               return "After CountedLoop";
      case PHASE_BEFORE_BEAUTIFY_LOOPS:      return "Before beautify loops";
      case PHASE_AFTER_BEAUTIFY_LOOPS:       return "After beautify loops";
      case PHASE_BEFORE_MATCHING:            return "Before Matching";
      case PHASE_INCREMENTAL_INLINE:         return "Incremental Inline";
      case PHASE_INCREMENTAL_BOXING_INLINE:  return "Incremental Boxing Inline";
      case PHASE_END:                        return "End";
      case PHASE_FAILURE:                    return "Failure";
      default:
        ShouldNotReachHere();
        return NULL;
    }
  }
};

#endif //SHARE_VM_OPTO_PHASETYPE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/phaseX.cpp
/*
 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/block.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/phaseX.hpp"
#include "opto/regalloc.hpp"
#include "opto/rootnode.hpp"

//=============================================================================
#define NODE_HASH_MINIMUM_SIZE    255
//------------------------------NodeHash---------------------------------------
NodeHash::NodeHash(uint est_max_size) :
  _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  _a(Thread::current()->resource_area()),
  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
  _inserts(0), _insert_limit( insert_limit() ),
  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  _total_insert_probes(0), _total_inserts(0),
  _insert_probes(0), _grows(0) {
  // _sentinel must be in the current node space
  _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  memset(_table,0,sizeof(Node*)*_max);
}

//------------------------------NodeHash---------------------------------------
NodeHash::NodeHash(Arena *arena, uint est_max_size) :
  _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
  _a(arena),
  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ),
  _inserts(0), _insert_limit( insert_limit() ),
  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
  _delete_probes(0), _delete_hits(0), _delete_misses(0),
  _total_insert_probes(0), _total_inserts(0),
  _insert_probes(0), _grows(0) {
  // _sentinel must be in the current node space
  _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
  memset(_table,0,sizeof(Node*)*_max);
}

//------------------------------NodeHash---------------------------------------
NodeHash::NodeHash(NodeHash *nh) {
  debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
  // just copy in all the fields
  *this = *nh;
  // nh->_sentinel must be in the current node space
}

void NodeHash::replace_with(NodeHash *nh) {
  debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
  // just copy in all the fields
  *this = *nh;
  // nh->_sentinel must be in the current node space
}

//------------------------------hash_find--------------------------------------
// Find in hash table
Node *NodeHash::hash_find( const Node *n ) {
  // ((Node*)n)->set_hash( n->hash() );
  uint hash = n->hash();
  if (hash == Node::NO_HASH) {
    debug_only( _lookup_misses++ );
    return NULL;
  }
  uint key = hash & (_max-1);
  uint stride = key | 0x01;
  debug_only( _look_probes++ );
  Node *k = _table[key];        // Get hashed value
  if( !k ) {                    // ?Miss?
    debug_only( _lookup_misses++ );
    return NULL;                // Miss!
  }

  int op = n->Opcode();
  uint req = n->req();
  while( 1 ) {                  // While probing hash table
    if( k->req() == req &&      // Same count of inputs
        k->Opcode() == op ) {   // Same Opcode
      for( uint i=0; i<req; i++ )
        if( n->in(i)!=k->in(i)) // Different inputs?
          goto collision;       // "goto" is a speed hack...
      if( n->cmp(*k) ) {        // Check for any special bits
        debug_only( _lookup_hits++ );
        return k;               // Hit!
      }
    }
  collision:
    debug_only( _look_probes++ );
    key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
    k = _table[key];            // Get hashed value
    if( !k ) {                  // ?Miss?
      debug_only( _lookup_misses++ );
      return NULL;              // Miss!
    }
  }
  ShouldNotReachHere();
  return NULL;
}

//------------------------------hash_find_insert-------------------------------
// Find in hash table, insert if not already present
// Used to preserve unique entries in hash table
Node *NodeHash::hash_find_insert( Node *n ) {
  // n->set_hash( );
  uint hash = n->hash();
  if (hash == Node::NO_HASH) {
    debug_only( _lookup_misses++ );
    return NULL;
  }
  uint key = hash & (_max-1);
  uint stride = key | 0x01;     // stride must be relatively prime to table siz
  uint first_sentinel = 0;      // replace a sentinel if seen.
  debug_only( _look_probes++ );
  Node *k = _table[key];        // Get hashed value
  if( !k ) {                    // ?Miss?
    debug_only( _lookup_misses++ );
    _table[key] = n;            // Insert into table!
    debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
    check_grow();               // Grow table if insert hit limit
    return NULL;                // Miss!
  }
  else if( k == _sentinel ) {
    first_sentinel = key;      // Can insert here
  }

  int op = n->Opcode();
  uint req = n->req();
  while( 1 ) {                  // While probing hash table
    if( k->req() == req &&      // Same count of inputs
        k->Opcode() == op ) {   // Same Opcode
      for( uint i=0; i<req; i++ )
        if( n->in(i)!=k->in(i)) // Different inputs?
          goto collision;       // "goto" is a speed hack...
      if( n->cmp(*k) ) {        // Check for any special bits
        debug_only( _lookup_hits++ );
        return k;               // Hit!
      }
    }
  collision:
    debug_only( _look_probes++ );
    key = (key + stride) & (_max-1); // Stride through table w/ relative prime
    k = _table[key];            // Get hashed value
    if( !k ) {                  // ?Miss?
      debug_only( _lookup_misses++ );
      key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
      _table[key] = n;          // Insert into table!
      debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
      check_grow();             // Grow table if insert hit limit
      return NULL;              // Miss!
    }
    else if( first_sentinel == 0 && k == _sentinel ) {
      first_sentinel = key;    // Can insert here
    }

  }
  ShouldNotReachHere();
  return NULL;
}

//------------------------------hash_insert------------------------------------
// Insert into hash table
void NodeHash::hash_insert( Node *n ) {
  // // "conflict" comments -- print nodes that conflict
  // bool conflict = false;
  // n->set_hash();
  uint hash = n->hash();
  if (hash == Node::NO_HASH) {
    return;
  }
  check_grow();
  uint key = hash & (_max-1);
  uint stride = key | 0x01;

  while( 1 ) {                  // While probing hash table
    debug_only( _insert_probes++ );
    Node *k = _table[key];      // Get hashed value
    if( !k || (k == _sentinel) ) break;       // Found a slot
    assert( k != n, "already inserted" );
    // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print("  conflict: "); k->dump(); conflict = true; }
    key = (key + stride) & (_max-1); // Stride through table w/ relative prime
  }
  _table[key] = n;              // Insert into table!
  debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
  // if( conflict ) { n->dump(); }
}

//------------------------------hash_delete------------------------------------
// Replace in hash table with sentinel
bool NodeHash::hash_delete( const Node *n ) {
  Node *k;
  uint hash = n->hash();
  if (hash == Node::NO_HASH) {
    debug_only( _delete_misses++ );
    return false;
  }
  uint key = hash & (_max-1);
  uint stride = key | 0x01;
  debug_only( uint counter = 0; );
  for( ; /* (k != NULL) && (k != _sentinel) */; ) {
    debug_only( counter++ );
    debug_only( _delete_probes++ );
    k = _table[key];            // Get hashed value
    if( !k ) {                  // Miss?
      debug_only( _delete_misses++ );
#ifdef ASSERT
      if( VerifyOpto ) {
        for( uint i=0; i < _max; i++ )
          assert( _table[i] != n, "changed edges with rehashing" );
      }
#endif
      return false;             // Miss! Not in chain
    }
    else if( n == k ) {
      debug_only( _delete_hits++ );
      _table[key] = _sentinel;  // Hit! Label as deleted entry
      debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
      return true;
    }
    else {
      // collision: move through table with prime offset
      key = (key + stride/*7*/) & (_max-1);
      assert( counter <= _insert_limit, "Cycle in hash-table");
    }
  }
  ShouldNotReachHere();
  return false;
}

//------------------------------round_up---------------------------------------
// Round up to nearest power of 2
uint NodeHash::round_up( uint x ) {
  x += (x>>2);                  // Add 25% slop
  if( x <16 ) return 16;        // Small stuff
  uint i=16;
  while( i < x ) i <<= 1;       // Double to fit
  return i;                     // Return hash table size
}

//------------------------------grow-------------------------------------------
// Grow _table to next power of 2 and insert old entries
void  NodeHash::grow() {
  // Record old state
  uint   old_max   = _max;
  Node **old_table = _table;
  // Construct new table with twice the space
  _grows++;
  _total_inserts       += _inserts;
  _total_insert_probes += _insert_probes;
  _inserts         = 0;
  _insert_probes   = 0;
  _max     = _max << 1;
  _table   = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) );
  memset(_table,0,sizeof(Node*)*_max);
  _insert_limit = insert_limit();
  // Insert old entries into the new table
  for( uint i = 0; i < old_max; i++ ) {
    Node *m = *old_table++;
    if( !m || m == _sentinel ) continue;
    debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table.
    hash_insert(m);
  }
}

//------------------------------clear------------------------------------------
// Clear all entries in _table to NULL but keep storage
void  NodeHash::clear() {
#ifdef ASSERT
  // Unlock all nodes upon removal from table.
  for (uint i = 0; i < _max; i++) {
    Node* n = _table[i];
    if (!n || n == _sentinel)  continue;
    n->exit_hash_lock();
  }
#endif

  memset( _table, 0, _max * sizeof(Node*) );
}

//-----------------------remove_useless_nodes----------------------------------
// Remove useless nodes from value table,
// implementation does not depend on hash function
void NodeHash::remove_useless_nodes(VectorSet &useful) {

  // Dead nodes in the hash table inherited from GVN should not replace
  // existing nodes, remove dead nodes.
  uint max = size();
  Node *sentinel_node = sentinel();
  for( uint i = 0; i < max; ++i ) {
    Node *n = at(i);
    if(n != NULL && n != sentinel_node && !useful.test(n->_idx)) {
      debug_only(n->exit_hash_lock()); // Unlock the node when removed
      _table[i] = sentinel_node;       // Replace with placeholder
    }
  }
}


void NodeHash::check_no_speculative_types() {
#ifdef ASSERT
  uint max = size();
  Node *sentinel_node = sentinel();
  for (uint i = 0; i < max; ++i) {
    Node *n = at(i);
    if(n != NULL && n != sentinel_node && n->is_Type()) {
      TypeNode* tn = n->as_Type();
      const Type* t = tn->type();
      const Type* t_no_spec = t->remove_speculative();
      assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
    }
  }
#endif
}

#ifndef PRODUCT
//------------------------------dump-------------------------------------------
// Dump statistics for the hash table
void NodeHash::dump() {
  _total_inserts       += _inserts;
  _total_insert_probes += _insert_probes;
  if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) {
    if (WizardMode) {
      for (uint i=0; i<_max; i++) {
        if (_table[i])
          tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
      }
    }
    tty->print("\nGVN Hash stats:  %d grows to %d max_size\n", _grows, _max);
    tty->print("  %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0);
    tty->print("  %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses));
    tty->print("  %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts);
    // sentinels increase lookup cost, but not insert cost
    assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function");
    assert( _inserts+(_inserts>>3) < _max, "table too full" );
    assert( _inserts*3+100 >= _insert_probes, "bad hash function" );
  }
}

Node *NodeHash::find_index(uint idx) { // For debugging
  // Find an entry by its index value
  for( uint i = 0; i < _max; i++ ) {
    Node *m = _table[i];
    if( !m || m == _sentinel ) continue;
    if( m->_idx == (uint)idx ) return m;
  }
  return NULL;
}
#endif

#ifdef ASSERT
NodeHash::~NodeHash() {
  // Unlock all nodes upon destruction of table.
  if (_table != (Node**)badAddress)  clear();
}

void NodeHash::operator=(const NodeHash& nh) {
  // Unlock all nodes upon replacement of table.
  if (&nh == this)  return;
  if (_table != (Node**)badAddress)  clear();
  memcpy(this, &nh, sizeof(*this));
  // Do not increment hash_lock counts again.
  // Instead, be sure we never again use the source table.
  ((NodeHash*)&nh)->_table = (Node**)badAddress;
}


#endif


//=============================================================================
//------------------------------PhaseRemoveUseless-----------------------------
// 1) Use a breadthfirst walk to collect useful nodes reachable from root.
PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist, PhaseNumber phase_num) : Phase(phase_num),
  _useful(Thread::current()->resource_area()) {

  // Implementation requires 'UseLoopSafepoints == true' and an edge from root
  // to each SafePointNode at a backward branch.  Inserted in add_safepoint().
  if( !UseLoopSafepoints || !OptoRemoveUseless ) return;

  // Identify nodes that are reachable from below, useful.
  C->identify_useful_nodes(_useful);
  // Update dead node list
  C->update_dead_node_list(_useful);

  // Remove all useless nodes from PhaseValues' recorded types
  // Must be done before disconnecting nodes to preserve hash-table-invariant
  gvn->remove_useless_nodes(_useful.member_set());

  // Remove all useless nodes from future worklist
  worklist->remove_useless_nodes(_useful.member_set());

  // Disconnect 'useless' nodes that are adjacent to useful nodes
  C->remove_useless_nodes(_useful);
}

//=============================================================================
//------------------------------PhaseRenumberLive------------------------------
// First, remove useless nodes (equivalent to identifying live nodes).
// Then, renumber live nodes.
//
// The set of live nodes is returned by PhaseRemoveUseless in the _useful structure.
// If the number of live nodes is 'x' (where 'x' == _useful.size()), then the
// PhaseRenumberLive updates the node ID of each node (the _idx field) with a unique
// value in the range [0, x).
//
// At the end of the PhaseRenumberLive phase, the compiler's count of unique nodes is
// updated to 'x' and the list of dead nodes is reset (as there are no dead nodes).
//
// The PhaseRenumberLive phase updates two data structures with the new node IDs.
// (1) The worklist is used by the PhaseIterGVN phase to identify nodes that must be
// processed. A new worklist (with the updated node IDs) is returned in 'new_worklist'.
// (2) Type information (the field PhaseGVN::_types) maps type information to each
// node ID. The mapping is updated to use the new node IDs as well. Updated type
// information is returned in PhaseGVN::_types.
//
// The PhaseRenumberLive phase does not preserve the order of elements in the worklist.
//
// Other data structures used by the compiler are not updated. The hash table for value
// numbering (the field PhaseGVN::_table) is not updated because computing the hash
// values is not based on node IDs. The field PhaseGVN::_nodes is not updated either
// because it is empty wherever PhaseRenumberLive is used.
PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
                                     Unique_Node_List* worklist, Unique_Node_List* new_worklist,
                                     PhaseNumber phase_num) :
  PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live) {

  assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
  assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
  assert(gvn->nodes_size() == 0, "GVN must not contain any nodes at this point");

  uint old_unique_count = C->unique();
  uint live_node_count = C->live_nodes();
  uint worklist_size = worklist->size();

  // Storage for the updated type information.
  Type_Array new_type_array(C->comp_arena());

  // Iterate over the set of live nodes.
  uint current_idx = 0; // The current new node ID. Incremented after every assignment.
  for (uint i = 0; i < _useful.size(); i++) {
    Node* n = _useful.at(i);
    // Sanity check that fails if we ever decide to execute this phase after EA
    assert(!n->is_Phi() || n->as_Phi()->inst_mem_id() == -1, "should not be linked to data Phi");
    const Type* type = gvn->type_or_null(n);
    new_type_array.map(current_idx, type);

    bool in_worklist = false;
    if (worklist->member(n)) {
      in_worklist = true;
    }

    n->set_idx(current_idx); // Update node ID.

    if (in_worklist) {
      new_worklist->push(n);
    }

    current_idx++;
  }

  assert(worklist_size == new_worklist->size(), "the new worklist must have the same size as the original worklist");
  assert(live_node_count == current_idx, "all live nodes must be processed");

  // Replace the compiler's type information with the updated type information.
  gvn->replace_types(new_type_array);

  // Update the unique node count of the compilation to the number of currently live nodes.
  C->set_unique(live_node_count);

  // Set the dead node count to 0 and reset dead node list.
  C->reset_dead_node_list();
}


//=============================================================================
//------------------------------PhaseTransform---------------------------------
PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
  _arena(Thread::current()->resource_area()),
  _nodes(_arena),
  _types(_arena)
{
  init_con_caches();
#ifndef PRODUCT
  clear_progress();
  clear_transforms();
  set_allow_progress(true);
#endif
  // Force allocation for currently existing nodes
  _types.map(C->unique(), NULL);
}

//------------------------------PhaseTransform---------------------------------
PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
  _arena(arena),
  _nodes(arena),
  _types(arena)
{
  init_con_caches();
#ifndef PRODUCT
  clear_progress();
  clear_transforms();
  set_allow_progress(true);
#endif
  // Force allocation for currently existing nodes
  _types.map(C->unique(), NULL);
}

//------------------------------PhaseTransform---------------------------------
// Initialize with previously generated type information
PhaseTransform::PhaseTransform( PhaseTransform *pt, PhaseNumber pnum ) : Phase(pnum),
  _arena(pt->_arena),
  _nodes(pt->_nodes),
  _types(pt->_types)
{
  init_con_caches();
#ifndef PRODUCT
  clear_progress();
  clear_transforms();
  set_allow_progress(true);
#endif
}

void PhaseTransform::init_con_caches() {
  memset(_icons,0,sizeof(_icons));
  memset(_lcons,0,sizeof(_lcons));
  memset(_zcons,0,sizeof(_zcons));
}


//--------------------------------find_int_type--------------------------------
const TypeInt* PhaseTransform::find_int_type(Node* n) {
  if (n == NULL)  return NULL;
  // Call type_or_null(n) to determine node's type since we might be in
  // parse phase and call n->Value() may return wrong type.
  // (For example, a phi node at the beginning of loop parsing is not ready.)
  const Type* t = type_or_null(n);
  if (t == NULL)  return NULL;
  return t->isa_int();
}


//-------------------------------find_long_type--------------------------------
const TypeLong* PhaseTransform::find_long_type(Node* n) {
  if (n == NULL)  return NULL;
  // (See comment above on type_or_null.)
  const Type* t = type_or_null(n);
  if (t == NULL)  return NULL;
  return t->isa_long();
}


#ifndef PRODUCT
void PhaseTransform::dump_old2new_map() const {
  _nodes.dump();
}

void PhaseTransform::dump_new( uint nidx ) const {
  for( uint i=0; i<_nodes.Size(); i++ )
    if( _nodes[i] && _nodes[i]->_idx == nidx ) {
      _nodes[i]->dump();
      tty->cr();
      tty->print_cr("Old index= %d",i);
      return;
    }
  tty->print_cr("Node %d not found in the new indices", nidx);
}

//------------------------------dump_types-------------------------------------
void PhaseTransform::dump_types( ) const {
  _types.dump();
}

//------------------------------dump_nodes_and_types---------------------------
void PhaseTransform::dump_nodes_and_types(const Node *root, uint depth, bool only_ctrl) {
  VectorSet visited(Thread::current()->resource_area());
  dump_nodes_and_types_recur( root, depth, only_ctrl, visited );
}

//------------------------------dump_nodes_and_types_recur---------------------
void PhaseTransform::dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited) {
  if( !n ) return;
  if( depth == 0 ) return;
  if( visited.test_set(n->_idx) ) return;
  for( uint i=0; i<n->len(); i++ ) {
    if( only_ctrl && !(n->is_Region()) && i != TypeFunc::Control ) continue;
    dump_nodes_and_types_recur( n->in(i), depth-1, only_ctrl, visited );
  }
  n->dump();
  if (type_or_null(n) != NULL) {
    tty->print("      "); type(n)->dump(); tty->cr();
  }
}

#endif


//=============================================================================
//------------------------------PhaseValues------------------------------------
// Set minimum table size to "255"
PhaseValues::PhaseValues( Arena *arena, uint est_max_size ) : PhaseTransform(arena, GVN), _table(arena, est_max_size) {
  NOT_PRODUCT( clear_new_values(); )
}

//------------------------------PhaseValues------------------------------------
// Set minimum table size to "255"
PhaseValues::PhaseValues( PhaseValues *ptv ) : PhaseTransform( ptv, GVN ),
  _table(&ptv->_table) {
  NOT_PRODUCT( clear_new_values(); )
}

//------------------------------PhaseValues------------------------------------
// Used by +VerifyOpto.  Clear out hash table but copy _types array.
PhaseValues::PhaseValues( PhaseValues *ptv, const char *dummy ) : PhaseTransform( ptv, GVN ),
  _table(ptv->arena(),ptv->_table.size()) {
  NOT_PRODUCT( clear_new_values(); )
}

//------------------------------~PhaseValues-----------------------------------
#ifndef PRODUCT
PhaseValues::~PhaseValues() {
  _table.dump();

  // Statistics for value progress and efficiency
  if( PrintCompilation && Verbose && WizardMode ) {
    tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
      is_IterGVN() ? "Iter" : "    ", C->unique(), made_progress(), made_transforms(), made_new_values());
    if( made_transforms() != 0 ) {
      tty->print_cr("  ratio %f", made_progress()/(float)made_transforms() );
    } else {
      tty->cr();
    }
  }
}
#endif

//------------------------------makecon----------------------------------------
ConNode* PhaseTransform::makecon(const Type *t) {
  assert(t->singleton(), "must be a constant");
  assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
  switch (t->base()) {  // fast paths
  case Type::Half:
  case Type::Top:  return (ConNode*) C->top();
  case Type::Int:  return intcon( t->is_int()->get_con() );
  case Type::Long: return longcon( t->is_long()->get_con() );
  }
  if (t->is_zero_type())
    return zerocon(t->basic_type());
  return uncached_makecon(t);
}

//--------------------------uncached_makecon-----------------------------------
// Make an idealized constant - one of ConINode, ConPNode, etc.
ConNode* PhaseValues::uncached_makecon(const Type *t) {
  assert(t->singleton(), "must be a constant");
  ConNode* x = ConNode::make(C, t);
  ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
  if (k == NULL) {
    set_type(x, t);             // Missed, provide type mapping
    GrowableArray<Node_Notes*>* nna = C->node_note_array();
    if (nna != NULL) {
      Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
      loc->clear(); // do not put debug info on constants
    }
  } else {
    x->destruct();              // Hit, destroy duplicate constant
    x = k;                      // use existing constant
  }
  return x;
}

//------------------------------intcon-----------------------------------------
// Fast integer constant.  Same as "transform(new ConINode(TypeInt::make(i)))"
ConINode* PhaseTransform::intcon(int i) {
  // Small integer?  Check cache! Check that cached node is not dead
  if (i >= _icon_min && i <= _icon_max) {
    ConINode* icon = _icons[i-_icon_min];
    if (icon != NULL && icon->in(TypeFunc::Control) != NULL)
      return icon;
  }
  ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
  assert(icon->is_Con(), "");
  if (i >= _icon_min && i <= _icon_max)
    _icons[i-_icon_min] = icon;   // Cache small integers
  return icon;
}

//------------------------------longcon----------------------------------------
// Fast long constant.
ConLNode* PhaseTransform::longcon(jlong l) {
  // Small integer?  Check cache! Check that cached node is not dead
  if (l >= _lcon_min && l <= _lcon_max) {
    ConLNode* lcon = _lcons[l-_lcon_min];
    if (lcon != NULL && lcon->in(TypeFunc::Control) != NULL)
      return lcon;
  }
  ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
  assert(lcon->is_Con(), "");
  if (l >= _lcon_min && l <= _lcon_max)
    _lcons[l-_lcon_min] = lcon;      // Cache small integers
  return lcon;
}

//------------------------------zerocon-----------------------------------------
// Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
ConNode* PhaseTransform::zerocon(BasicType bt) {
  assert((uint)bt <= _zcon_max, "domain check");
  ConNode* zcon = _zcons[bt];
  if (zcon != NULL && zcon->in(TypeFunc::Control) != NULL)
    return zcon;
  zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
  _zcons[bt] = zcon;
  return zcon;
}



//=============================================================================
//------------------------------transform--------------------------------------
// Return a node which computes the same function as this node, but in a
// faster or cheaper fashion.
Node *PhaseGVN::transform( Node *n ) {
  return transform_no_reclaim(n);
}

//------------------------------transform--------------------------------------
// Return a node which computes the same function as this node, but
// in a faster or cheaper fashion.
Node *PhaseGVN::transform_no_reclaim( Node *n ) {
  NOT_PRODUCT( set_transforms(); )

  // Apply the Ideal call in a loop until it no longer applies
  Node *k = n;
  NOT_PRODUCT( uint loop_count = 0; )
  while( 1 ) {
    Node *i = k->Ideal(this, /*can_reshape=*/false);
    if( !i ) break;
    assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
    k = i;
    assert(loop_count++ < K, "infinite loop in PhaseGVN::transform");
  }
  NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } )


  // If brand new node, make space in type array.
  ensure_type_or_null(k);

  // Since I just called 'Value' to compute the set of run-time values
  // for this Node, and 'Value' is non-local (and therefore expensive) I'll
  // cache Value.  Later requests for the local phase->type of this Node can
  // use the cached Value instead of suffering with 'bottom_type'.
  const Type *t = k->Value(this); // Get runtime Value set
  assert(t != NULL, "value sanity");
  if (type_or_null(k) != t) {
#ifndef PRODUCT
    // Do not count initial visit to node as a transformation
    if (type_or_null(k) == NULL) {
      inc_new_values();
      set_progress();
    }
#endif
    set_type(k, t);
    // If k is a TypeNode, capture any more-precise type permanently into Node
    k->raise_bottom_type(t);
  }

  if( t->singleton() && !k->is_Con() ) {
    NOT_PRODUCT( set_progress(); )
    return makecon(t);          // Turn into a constant
  }

  // Now check for Identities
  Node *i = k->Identity(this);  // Look for a nearby replacement
  if( i != k ) {                // Found? Return replacement!
    NOT_PRODUCT( set_progress(); )
    return i;
  }

  // Global Value Numbering
  i = hash_find_insert(k);      // Insert if new
  if( i && (i != k) ) {
    // Return the pre-existing node
    NOT_PRODUCT( set_progress(); )
    return i;
  }

  // Return Idealized original
  return k;
}

#ifdef ASSERT
//------------------------------dead_loop_check--------------------------------
// Check for a simple dead loop when a data node references itself directly
// or through an other data node excluding cons and phis.
void PhaseGVN::dead_loop_check( Node *n ) {
  // Phi may reference itself in a loop
  if (n != NULL && !n->is_dead_loop_safe() && !n->is_CFG()) {
    // Do 2 levels check and only data inputs.
    bool no_dead_loop = true;
    uint cnt = n->req();
    for (uint i = 1; i < cnt && no_dead_loop; i++) {
      Node *in = n->in(i);
      if (in == n) {
        no_dead_loop = false;
      } else if (in != NULL && !in->is_dead_loop_safe()) {
        uint icnt = in->req();
        for (uint j = 1; j < icnt && no_dead_loop; j++) {
          if (in->in(j) == n || in->in(j) == in)
            no_dead_loop = false;
        }
      }
    }
    if (!no_dead_loop) n->dump(3);
    assert(no_dead_loop, "dead loop detected");
  }
}
#endif

//=============================================================================
//------------------------------PhaseIterGVN-----------------------------------
// Initialize hash table to fresh and clean for +VerifyOpto
PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
                                                                      _stack(C->live_nodes() >> 1),
                                                                      _delay_transform(false) {
}

//------------------------------PhaseIterGVN-----------------------------------
// Initialize with previous PhaseIterGVN info; used by PhaseCCP
PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
                                                   _worklist( igvn->_worklist ),
                                                   _stack( igvn->_stack ),
                                                   _delay_transform(igvn->_delay_transform)
{
}

//------------------------------PhaseIterGVN-----------------------------------
// Initialize with previous PhaseGVN info from Parser
PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
                                              _worklist(*C->for_igvn()),
// TODO: Before incremental inlining it was allocated only once and it was fine. Now that
//       the constructor is used in incremental inlining, this consumes too much memory:
//                                            _stack(C->live_nodes() >> 1),
//       So, as a band-aid, we replace this by:
                                              _stack(C->comp_arena(), 32),
                                              _delay_transform(false)
{
  uint max;

  // Dead nodes in the hash table inherited from GVN were not treated as
  // roots during def-use info creation; hence they represent an invisible
  // use.  Clear them out.
  max = _table.size();
  for( uint i = 0; i < max; ++i ) {
    Node *n = _table.at(i);
    if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
      if( n->is_top() ) continue;
      assert( false, "Parse::remove_useless_nodes missed this node");
      hash_delete(n);
    }
  }

  // Any Phis or Regions on the worklist probably had uses that could not
  // make more progress because the uses were made while the Phis and Regions
  // were in half-built states.  Put all uses of Phis and Regions on worklist.
  max = _worklist.size();
  for( uint j = 0; j < max; j++ ) {
    Node *n = _worklist.at(j);
    uint uop = n->Opcode();
    if( uop == Op_Phi || uop == Op_Region ||
        n->is_Type() ||
        n->is_Mem() )
      add_users_to_worklist(n);
  }
}


#ifndef PRODUCT
void PhaseIterGVN::verify_step(Node* n) {
  _verify_window[_verify_counter % _verify_window_size] = n;
  ++_verify_counter;
  ResourceMark rm;
  ResourceArea *area = Thread::current()->resource_area();
  VectorSet old_space(area), new_space(area);
  if (C->unique() < 1000 ||
      0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
    ++_verify_full_passes;
    Node::verify_recur(C->root(), -1, old_space, new_space);
  }
  const int verify_depth = 4;
  for ( int i = 0; i < _verify_window_size; i++ ) {
    Node* n = _verify_window[i];
    if ( n == NULL )  continue;
    if( n->in(0) == NodeSentinel ) {  // xform_idom
      _verify_window[i] = n->in(1);
      --i; continue;
    }
    // Typical fanout is 1-2, so this call visits about 6 nodes.
    Node::verify_recur(n, verify_depth, old_space, new_space);
  }
}
#endif


//------------------------------init_worklist----------------------------------
// Initialize worklist for each node.
void PhaseIterGVN::init_worklist( Node *n ) {
  if( _worklist.member(n) ) return;
  _worklist.push(n);
  uint cnt = n->req();
  for( uint i =0 ; i < cnt; i++ ) {
    Node *m = n->in(i);
    if( m ) init_worklist(m);
  }
}

//------------------------------optimize---------------------------------------
void PhaseIterGVN::optimize() {
  debug_only(uint num_processed  = 0;);
#ifndef PRODUCT
  {
    _verify_counter = 0;
    _verify_full_passes = 0;
    for ( int i = 0; i < _verify_window_size; i++ ) {
      _verify_window[i] = NULL;
    }
  }
#endif

#ifdef ASSERT
  Node* prev = NULL;
  uint rep_cnt = 0;
#endif
  uint loop_count = 0;

  // Pull from worklist; transform node;
  // If node has changed: update edge info and put uses on worklist.
  while( _worklist.size() ) {
    if (C->check_node_count(NodeLimitFudgeFactor * 2,
                            "out of nodes optimizing method")) {
      return;
    }
    Node *n  = _worklist.pop();
    if (++loop_count >= K * C->live_nodes()) {
      debug_only(n->dump(4);)
      assert(false, "infinite loop in PhaseIterGVN::optimize");
      C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
      return;
    }
#ifdef ASSERT
    if (n == prev) {
      if (++rep_cnt > 3) {
        n->dump(4);
        assert(false, "loop in Ideal transformation");
      }
    } else {
      rep_cnt = 0;
    }
    prev = n;
#endif
    if (TraceIterativeGVN && Verbose) {
      tty->print("  Pop ");
      NOT_PRODUCT( n->dump(); )
      debug_only(if( (num_processed++ % 100) == 0 ) _worklist.print_set();)
    }

    if (n->outcnt() != 0) {

#ifndef PRODUCT
      uint wlsize = _worklist.size();
      const Type* oldtype = type_or_null(n);
#endif //PRODUCT

      Node *nn = transform_old(n);

#ifndef PRODUCT
      if (TraceIterativeGVN) {
        const Type* newtype = type_or_null(n);
        if (nn != n) {
          // print old node
          tty->print("< ");
          if (oldtype != newtype && oldtype != NULL) {
            oldtype->dump();
          }
          do { tty->print("\t"); } while (tty->position() < 16);
          tty->print("<");
          n->dump();
        }
        if (oldtype != newtype || nn != n) {
          // print new node and/or new type
          if (oldtype == NULL) {
            tty->print("* ");
          } else if (nn != n) {
            tty->print("> ");
          } else {
            tty->print("= ");
          }
          if (newtype == NULL) {
            tty->print("null");
          } else {
            newtype->dump();
          }
          do { tty->print("\t"); } while (tty->position() < 16);
          nn->dump();
        }
        if (Verbose && wlsize < _worklist.size()) {
          tty->print("  Push {");
          while (wlsize != _worklist.size()) {
            Node* pushed = _worklist.at(wlsize++);
            tty->print(" %d", pushed->_idx);
          }
          tty->print_cr(" }");
        }
      }
      if( VerifyIterativeGVN && nn != n ) {
        verify_step((Node*) NULL);  // ignore n, it might be subsumed
      }
#endif
    } else if (!n->is_top()) {
      remove_dead_node(n);
    }
  }

#ifndef PRODUCT
  C->verify_graph_edges();
  if( VerifyOpto && allow_progress() ) {
    // Must turn off allow_progress to enable assert and break recursion
    C->root()->verify();
    { // Check if any progress was missed using IterGVN
      // Def-Use info enables transformations not attempted in wash-pass
      // e.g. Region/Phi cleanup, ...
      // Null-check elision -- may not have reached fixpoint
      //                       do not propagate to dominated nodes
      ResourceMark rm;
      PhaseIterGVN igvn2(this,"Verify"); // Fresh and clean!
      // Fill worklist completely
      igvn2.init_worklist(C->root());

      igvn2.set_allow_progress(false);
      igvn2.optimize();
      igvn2.set_allow_progress(true);
    }
  }
  if ( VerifyIterativeGVN && PrintOpto ) {
    if ( _verify_counter == _verify_full_passes )
      tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
                    (int) _verify_full_passes);
    else
      tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
                  (int) _verify_counter, (int) _verify_full_passes);
  }
#endif
}


//------------------register_new_node_with_optimizer---------------------------
// Register a new node with the optimizer.  Update the types array, the def-use
// info.  Put on worklist.
Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
  set_type_bottom(n);
  _worklist.push(n);
  if (orig != NULL)  C->copy_node_notes_to(n, orig);
  return n;
}

//------------------------------transform--------------------------------------
// Non-recursive: idealize Node 'n' with respect to its inputs and its value
Node *PhaseIterGVN::transform( Node *n ) {
  if (_delay_transform) {
    // Register the node but don't optimize for now
    register_new_node_with_optimizer(n);
    return n;
  }

  // If brand new node, make space in type array, and give it a type.
  ensure_type_or_null(n);
  if (type_or_null(n) == NULL) {
    set_type_bottom(n);
  }

  return transform_old(n);
}

//------------------------------transform_old----------------------------------
Node *PhaseIterGVN::transform_old( Node *n ) {
#ifndef PRODUCT
  debug_only(uint loop_count = 0;);
  set_transforms();
#endif
  // Remove 'n' from hash table in case it gets modified
  _table.hash_delete(n);
  if( VerifyIterativeGVN ) {
   assert( !_table.find_index(n->_idx), "found duplicate entry in table");
  }

  // Apply the Ideal call in a loop until it no longer applies
  Node *k = n;
  DEBUG_ONLY(dead_loop_check(k);)
  DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
  Node *i = k->Ideal(this, /*can_reshape=*/true);
  assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
  if( VerifyIterativeGVN )
    verify_step(k);
  if( i && VerifyOpto ) {
    if( !allow_progress() ) {
      if (i->is_Add() && i->outcnt() == 1) {
        // Switched input to left side because this is the only use
      } else if( i->is_If() && (i->in(0) == NULL) ) {
        // This IF is dead because it is dominated by an equivalent IF When
        // dominating if changed, info is not propagated sparsely to 'this'
        // Propagating this info further will spuriously identify other
        // progress.
        return i;
      } else
        set_progress();
    } else
      set_progress();
  }
#endif

  while( i ) {
#ifndef PRODUCT
    debug_only( if( loop_count >= K ) i->dump(4); )
    assert(loop_count < K, "infinite loop in PhaseIterGVN::transform");
    debug_only( loop_count++; )
#endif
    assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
    // Made a change; put users of original Node on worklist
    add_users_to_worklist( k );
    // Replacing root of transform tree?
    if( k != i ) {
      // Make users of old Node now use new.
      subsume_node( k, i );
      k = i;
    }
    DEBUG_ONLY(dead_loop_check(k);)
    // Try idealizing again
    DEBUG_ONLY(is_new = (k->outcnt() == 0);)
    i = k->Ideal(this, /*can_reshape=*/true);
    assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
    if( VerifyIterativeGVN )
      verify_step(k);
    if( i && VerifyOpto ) set_progress();
#endif
  }

  // If brand new node, make space in type array.
  ensure_type_or_null(k);

  // See what kind of values 'k' takes on at runtime
  const Type *t = k->Value(this);
  assert(t != NULL, "value sanity");

  // Since I just called 'Value' to compute the set of run-time values
  // for this Node, and 'Value' is non-local (and therefore expensive) I'll
  // cache Value.  Later requests for the local phase->type of this Node can
  // use the cached Value instead of suffering with 'bottom_type'.
  if (t != type_or_null(k)) {
    NOT_PRODUCT( set_progress(); )
    NOT_PRODUCT( inc_new_values();)
    set_type(k, t);
    // If k is a TypeNode, capture any more-precise type permanently into Node
    k->raise_bottom_type(t);
    // Move users of node to worklist
    add_users_to_worklist( k );
  }

  // If 'k' computes a constant, replace it with a constant
  if( t->singleton() && !k->is_Con() ) {
    NOT_PRODUCT( set_progress(); )
    Node *con = makecon(t);     // Make a constant
    add_users_to_worklist( k );
    subsume_node( k, con );     // Everybody using k now uses con
    return con;
  }

  // Now check for Identities
  i = k->Identity(this);        // Look for a nearby replacement
  if( i != k ) {                // Found? Return replacement!
    NOT_PRODUCT( set_progress(); )
    add_users_to_worklist( k );
    subsume_node( k, i );       // Everybody using k now uses i
    return i;
  }

  // Global Value Numbering
  i = hash_find_insert(k);      // Check for pre-existing node
  if( i && (i != k) ) {
    // Return the pre-existing node if it isn't dead
    NOT_PRODUCT( set_progress(); )
    add_users_to_worklist( k );
    subsume_node( k, i );       // Everybody using k now uses i
    return i;
  }

  // Return Idealized original
  return k;
}

//---------------------------------saturate------------------------------------
const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
                                   const Type* limit_type) const {
  return new_type->narrow(old_type);
}

//------------------------------remove_globally_dead_node----------------------
// Kill a globally dead Node.  All uses are also globally dead and are
// aggressively trimmed.
void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
  enum DeleteProgress {
    PROCESS_INPUTS,
    PROCESS_OUTPUTS
  };
  assert(_stack.is_empty(), "not empty");
  _stack.push(dead, PROCESS_INPUTS);

  while (_stack.is_nonempty()) {
    dead = _stack.node();
    if (dead->Opcode() == Op_SafePoint) {
      dead->as_SafePoint()->disconnect_from_root(this);
    }
    uint progress_state = _stack.index();
    assert(dead != C->root(), "killing root, eh?");
    assert(!dead->is_top(), "add check for top when pushing");
    NOT_PRODUCT( set_progress(); )
    if (progress_state == PROCESS_INPUTS) {
      // After following inputs, continue to outputs
      _stack.set_index(PROCESS_OUTPUTS);
      if (!dead->is_Con()) { // Don't kill cons but uses
        bool recurse = false;
        // Remove from hash table
        _table.hash_delete( dead );
        // Smash all inputs to 'dead', isolating him completely
        for (uint i = 0; i < dead->req(); i++) {
          Node *in = dead->in(i);
          if (in != NULL && in != C->top()) {  // Points to something?
            int nrep = dead->replace_edge(in, NULL);  // Kill edges
            assert((nrep > 0), "sanity");
            if (in->outcnt() == 0) { // Made input go dead?
              _stack.push(in, PROCESS_INPUTS); // Recursively remove
              recurse = true;
            } else if (in->outcnt() == 1 &&
                       in->has_special_unique_user()) {
              _worklist.push(in->unique_out());
            } else if (in->outcnt() <= 2 && dead->is_Phi()) {
              if (in->Opcode() == Op_Region) {
                _worklist.push(in);
              } else if (in->is_Store()) {
                DUIterator_Fast imax, i = in->fast_outs(imax);
                _worklist.push(in->fast_out(i));
                i++;
                if (in->outcnt() == 2) {
                  _worklist.push(in->fast_out(i));
                  i++;
                }
                assert(!(i < imax), "sanity");
              }
            }
            if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
                in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
              // A Load that directly follows an InitializeNode is
              // going away. The Stores that follow are candidates
              // again to be captured by the InitializeNode.
              for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
                Node *n = in->fast_out(j);
                if (n->is_Store()) {
                  _worklist.push(n);
                }
              }
            }
          } // if (in != NULL && in != C->top())
        } // for (uint i = 0; i < dead->req(); i++)
        if (recurse) {
          continue;
        }
      } // if (!dead->is_Con())
    } // if (progress_state == PROCESS_INPUTS)

    // Aggressively kill globally dead uses
    // (Rather than pushing all the outs at once, we push one at a time,
    // plus the parent to resume later, because of the indefinite number
    // of edge deletions per loop trip.)
    if (dead->outcnt() > 0) {
      // Recursively remove output edges
      _stack.push(dead->raw_out(0), PROCESS_INPUTS);
    } else {
      // Finished disconnecting all input and output edges.
      _stack.pop();
      // Remove dead node from iterative worklist
      _worklist.remove(dead);
      // Constant node that has no out-edges and has only one in-edge from
      // root is usually dead. However, sometimes reshaping walk makes
      // it reachable by adding use edges. So, we will NOT count Con nodes
      // as dead to be conservative about the dead node count at any
      // given time.
      if (!dead->is_Con()) {
        C->record_dead_node(dead->_idx);
      }
      if (dead->is_macro()) {
        C->remove_macro_node(dead);
      }
      if (dead->is_expensive()) {
        C->remove_expensive_node(dead);
      }
      CastIINode* cast = dead->isa_CastII();
      if (cast != NULL && cast->has_range_check()) {
        C->remove_range_check_cast(cast);
      }
    }
  } // while (_stack.is_nonempty())
}

//------------------------------subsume_node-----------------------------------
// Remove users from node 'old' and add them to node 'nn'.
void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
  if (old->Opcode() == Op_SafePoint) {
    old->as_SafePoint()->disconnect_from_root(this);
  }
  assert( old != hash_find(old), "should already been removed" );
  assert( old != C->top(), "cannot subsume top node");
  // Copy debug or profile information to the new version:
  C->copy_node_notes_to(nn, old);
  // Move users of node 'old' to node 'nn'
  for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
    Node* use = old->last_out(i);  // for each use...
    // use might need re-hashing (but it won't if it's a new node)
    bool is_in_table = _table.hash_delete( use );
    // Update use-def info as well
    // We remove all occurrences of old within use->in,
    // so as to avoid rehashing any node more than once.
    // The hash table probe swamps any outer loop overhead.
    uint num_edges = 0;
    for (uint jmax = use->len(), j = 0; j < jmax; j++) {
      if (use->in(j) == old) {
        use->set_req(j, nn);
        ++num_edges;
      }
    }
    // Insert into GVN hash table if unique
    // If a duplicate, 'use' will be cleaned up when pulled off worklist
    if( is_in_table ) {
      hash_find_insert(use);
    }
    i -= num_edges;    // we deleted 1 or more copies of this edge
  }

  // Search for instance field data PhiNodes in the same region pointing to the old
  // memory PhiNode and update their instance memory ids to point to the new node.
  if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != NULL) {
    Node* region = old->in(0);
    for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
      PhiNode* phi = region->fast_out(i)->isa_Phi();
      if (phi != NULL && phi->inst_mem_id() == (int)old->_idx) {
        phi->set_inst_mem_id((int)nn->_idx);
      }
    }
  }

  // Smash all inputs to 'old', isolating him completely
  Node *temp = new (C) Node(1);
  temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
  remove_dead_node( old );
  temp->del_req(0);         // Yank bogus edge
#ifndef PRODUCT
  if( VerifyIterativeGVN ) {
    for ( int i = 0; i < _verify_window_size; i++ ) {
      if ( _verify_window[i] == old )
        _verify_window[i] = nn;
    }
  }
#endif
  _worklist.remove(temp);   // this can be necessary
  temp->destruct();         // reuse the _idx of this little guy
}

//------------------------------add_users_to_worklist--------------------------
void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
    _worklist.push(n->fast_out(i));  // Push on worklist
  }
}

// Return counted loop Phi if as a counted loop exit condition, cmp
// compares the the induction variable with n
static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) {
  for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) {
    Node* bol = cmp->fast_out(i);
    for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) {
      Node* iff = bol->fast_out(i2);
      if (iff->is_CountedLoopEnd()) {
        CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
        if (cle->limit() == n) {
          PhiNode* phi = cle->phi();
          if (phi != NULL) {
            return phi;
          }
        }
      }
    }
  }
  return NULL;
}

void PhaseIterGVN::add_users_to_worklist( Node *n ) {
  add_users_to_worklist0(n);

  // Move users of node to worklist
  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
    Node* use = n->fast_out(i); // Get use

    if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
        use->is_Store() )       // Enable store/load same address
      add_users_to_worklist0(use);

    // If we changed the receiver type to a call, we need to revisit
    // the Catch following the call.  It's looking for a non-NULL
    // receiver to know when to enable the regular fall-through path
    // in addition to the NullPtrException path.
    if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
      Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
      if (p != NULL) {
        add_users_to_worklist0(p);
      }
    }

    uint use_op = use->Opcode();
    if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
      add_users_to_worklist(use); // Put Bool on worklist
      if (use->outcnt() > 0) {
        Node* bol = use->raw_out(0);
        if (bol->outcnt() > 0) {
          Node* iff = bol->raw_out(0);
          if (iff->outcnt() == 2) {
            // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
            // phi merging either 0 or 1 onto the worklist
            Node* ifproj0 = iff->raw_out(0);
            Node* ifproj1 = iff->raw_out(1);
            if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
              Node* region0 = ifproj0->raw_out(0);
              Node* region1 = ifproj1->raw_out(0);
              if( region0 == region1 )
                add_users_to_worklist0(region0);
            }
          }
        }
      }
      if (use_op == Op_CmpI) {
        Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n);
        if (phi != NULL) {
          // If an opaque node feeds into the limit condition of a
          // CountedLoop, we need to process the Phi node for the
          // induction variable when the opaque node is removed:
          // the range of values taken by the Phi is now known and
          // so its type is also known.
          _worklist.push(phi);
        }
        Node* in1 = use->in(1);
        for (uint i = 0; i < in1->outcnt(); i++) {
          if (in1->raw_out(i)->Opcode() == Op_CastII) {
            Node* castii = in1->raw_out(i);
            if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
              Node* ifnode = castii->in(0)->in(0);
              if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
                // Reprocess a CastII node that may depend on an
                // opaque node value when the opaque node is
                // removed. In case it carries a dependency we can do
                // a better job of computing its type.
                _worklist.push(castii);
              }
            }
          }
        }
      }
    }

    // If changed Cast input, check Phi users for simple cycles
    if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        if (u->is_Phi())
          _worklist.push(u);
      }
    }
    // If changed LShift inputs, check RShift users for useless sign-ext
    if( use_op == Op_LShiftI ) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        if (u->Opcode() == Op_RShiftI)
          _worklist.push(u);
      }
    }
    // If changed AddI/SubI inputs, check CmpU for range check optimization.
    if (use_op == Op_AddI || use_op == Op_SubI) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) {
          _worklist.push(u);
        }
      }
    }
    // If changed AddP inputs, check Stores for loop invariant
    if( use_op == Op_AddP ) {
      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
        Node* u = use->fast_out(i2);
        if (u->is_Mem())
          _worklist.push(u);
      }
    }
    // If changed initialization activity, check dependent Stores
    if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
      InitializeNode* init = use->as_Allocate()->initialization();
      if (init != NULL) {
        Node* imem = init->proj_out(TypeFunc::Memory);
        if (imem != NULL)  add_users_to_worklist0(imem);
      }
    }
    if (use_op == Op_Initialize) {
      Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
      if (imem != NULL)  add_users_to_worklist0(imem);
    }
  }
}

/**
 * Remove the speculative part of all types that we know of
 */
void PhaseIterGVN::remove_speculative_types()  {
  assert(UseTypeSpeculation, "speculation is off");
  for (uint i = 0; i < _types.Size(); i++)  {
    const Type* t = _types.fast_lookup(i);
    if (t != NULL) {
      _types.map(i, t->remove_speculative());
    }
  }
  _table.check_no_speculative_types();
}

//=============================================================================
#ifndef PRODUCT
uint PhaseCCP::_total_invokes   = 0;
uint PhaseCCP::_total_constants = 0;
#endif
//------------------------------PhaseCCP---------------------------------------
// Conditional Constant Propagation, ala Wegman & Zadeck
PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
  NOT_PRODUCT( clear_constants(); )
  assert( _worklist.size() == 0, "" );
  // Clear out _nodes from IterGVN.  Must be clear to transform call.
  _nodes.clear();               // Clear out from IterGVN
  analyze();
}

#ifndef PRODUCT
//------------------------------~PhaseCCP--------------------------------------
PhaseCCP::~PhaseCCP() {
  inc_invokes();
  _total_constants += count_constants();
}
#endif


#ifdef ASSERT
static bool ccp_type_widens(const Type* t, const Type* t0) {
  assert(t->meet(t0) == t, "Not monotonic");
  switch (t->base() == t0->base() ? t->base() : Type::Top) {
  case Type::Int:
    assert(t0->isa_int()->_widen <= t->isa_int()->_widen, "widen increases");
    break;
  case Type::Long:
    assert(t0->isa_long()->_widen <= t->isa_long()->_widen, "widen increases");
    break;
  }
  return true;
}
#endif //ASSERT

//------------------------------analyze----------------------------------------
void PhaseCCP::analyze() {
  // Initialize all types to TOP, optimistic analysis
  for (int i = C->unique() - 1; i >= 0; i--)  {
    _types.map(i,Type::TOP);
  }

  // Push root onto worklist
  Unique_Node_List worklist;
  worklist.push(C->root());

  // Pull from worklist; compute new value; push changes out.
  // This loop is the meat of CCP.
  while( worklist.size() ) {
    Node *n = worklist.pop();
    const Type *t = n->Value(this);
    if (t != type(n)) {
      assert(ccp_type_widens(t, type(n)), "ccp type must widen");
#ifndef PRODUCT
      if( TracePhaseCCP ) {
        t->dump();
        do { tty->print("\t"); } while (tty->position() < 16);
        n->dump();
      }
#endif
      set_type(n, t);
      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
        Node* m = n->fast_out(i);   // Get user
        if (m->is_Region()) {  // New path to Region?  Must recheck Phis too
          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
            Node* p = m->fast_out(i2); // Propagate changes to uses
            if (p->bottom_type() != type(p)) { // If not already bottomed out
              worklist.push(p); // Propagate change to user
            }
          }
        }
        // If we changed the receiver type to a call, we need to revisit
        // the Catch following the call.  It's looking for a non-NULL
        // receiver to know when to enable the regular fall-through path
        // in addition to the NullPtrException path
        if (m->is_Call()) {
          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
            Node* p = m->fast_out(i2);  // Propagate changes to uses
            if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control) {
              Node* catch_node = p->find_out_with(Op_Catch);
              if (catch_node != NULL) {
                worklist.push(catch_node);
              }
            }
          }
        }
        if (m->bottom_type() != type(m)) { // If not already bottomed out
          worklist.push(m);     // Propagate change to user
        }

        // CmpU nodes can get their type information from two nodes up in the
        // graph (instead of from the nodes immediately above). Make sure they
        // are added to the worklist if nodes they depend on are updated, since
        // they could be missed and get wrong types otherwise.
        uint m_op = m->Opcode();
        if (m_op == Op_AddI || m_op == Op_SubI) {
          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
            Node* p = m->fast_out(i2); // Propagate changes to uses
            if (p->Opcode() == Op_CmpU) {
              // Got a CmpU which might need the new type information from node n.
              if(p->bottom_type() != type(p)) { // If not already bottomed out
                worklist.push(p); // Propagate change to user
              }
            }
          }
        }
        // If n is used in a counted loop exit condition then the type
        // of the counted loop's Phi depends on the type of n. See
        // PhiNode::Value().
        if (m_op == Op_CmpI) {
          PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n);
          if (phi != NULL) {
            worklist.push(phi);
          }
        }
      }
    }
  }
}

//------------------------------do_transform-----------------------------------
// Top level driver for the recursive transformer
void PhaseCCP::do_transform() {
  // Correct leaves of new-space Nodes; they point to old-space.
  C->set_root( transform(C->root())->as_Root() );
  assert( C->top(),  "missing TOP node" );
  assert( C->root(), "missing root" );
}

//------------------------------transform--------------------------------------
// Given a Node in old-space, clone him into new-space.
// Convert any of his old-space children into new-space children.
Node *PhaseCCP::transform( Node *n ) {
  Node *new_node = _nodes[n->_idx]; // Check for transformed node
  if( new_node != NULL )
    return new_node;                // Been there, done that, return old answer
  new_node = transform_once(n);     // Check for constant
  _nodes.map( n->_idx, new_node );  // Flag as having been cloned

  // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
  GrowableArray <Node *> trstack(C->live_nodes() >> 1);

  trstack.push(new_node);           // Process children of cloned node
  while ( trstack.is_nonempty() ) {
    Node *clone = trstack.pop();
    uint cnt = clone->req();
    for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
      Node *input = clone->in(i);
      if( input != NULL ) {                    // Ignore NULLs
        Node *new_input = _nodes[input->_idx]; // Check for cloned input node
        if( new_input == NULL ) {
          new_input = transform_once(input);   // Check for constant
          _nodes.map( input->_idx, new_input );// Flag as having been cloned
          trstack.push(new_input);
        }
        assert( new_input == clone->in(i), "insanity check");
      }
    }
  }
  return new_node;
}


//------------------------------transform_once---------------------------------
// For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
Node *PhaseCCP::transform_once( Node *n ) {
  const Type *t = type(n);
  // Constant?  Use constant Node instead
  if( t->singleton() ) {
    Node *nn = n;               // Default is to return the original constant
    if( t == Type::TOP ) {
      // cache my top node on the Compile instance
      if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
        C->set_cached_top_node( ConNode::make(C, Type::TOP) );
        set_type(C->top(), Type::TOP);
      }
      nn = C->top();
    }
    if( !n->is_Con() ) {
      if( t != Type::TOP ) {
        nn = makecon(t);        // ConNode::make(t);
        NOT_PRODUCT( inc_constants(); )
      } else if( n->is_Region() ) { // Unreachable region
        // Note: nn == C->top()
        n->set_req(0, NULL);        // Cut selfreference
        // Eagerly remove dead phis to avoid phis copies creation.
        for (DUIterator i = n->outs(); n->has_out(i); i++) {
          Node* m = n->out(i);
          if( m->is_Phi() ) {
            assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
            replace_node(m, nn);
            --i; // deleted this phi; rescan starting with next position
          }
        }
      }
      replace_node(n,nn);       // Update DefUse edges for new constant
    }
    return nn;
  }

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值