ssssssss67


    // These two phis are pre-filled with copies of of the fast IO and Memory
    PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
    PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);

    result_rgn->init_req(slow_result_path, control());
    result_io ->init_req(slow_result_path, i_o());
    result_mem->init_req(slow_result_path, reset_memory());
    result_val->init_req(slow_result_path, slow_val);

    set_all_memory(_gvn.transform(result_mem));
    set_i_o(       _gvn.transform(result_io));
  }

  C->set_has_split_ifs(true); // Has chance for split-if optimization
  set_result(result_rgn, result_val);
  return true;
}

//---------------------------load_mirror_from_klass----------------------------
// Given a klass oop, load its java mirror (a java.lang.Class oop).
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
  Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
}

//-----------------------load_klass_from_mirror_common-------------------------
// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
// and branch to the given path on the region.
// If never_see_null, take an uncommon trap on null, so we can optimistically
// compile for the non-null case.
// If the region is NULL, force never_see_null = true.
Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
                                                    bool never_see_null,
                                                    RegionNode* region,
                                                    int null_path,
                                                    int offset) {
  if (region == NULL)  never_see_null = true;
  Node* p = basic_plus_adr(mirror, offset);
  const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
  Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
  Node* null_ctl = top();
  kls = null_check_oop(kls, &null_ctl, never_see_null);
  if (region != NULL) {
    // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
    region->init_req(null_path, null_ctl);
  } else {
    assert(null_ctl == top(), "no loose ends");
  }
  return kls;
}

//--------------------(inline_native_Class_query helpers)---------------------
// Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
// Fall through if (mods & mask) == bits, take the guard otherwise.
Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
  // Branch around if the given klass has the given modifier bit set.
  // Like generate_guard, adds a new path onto the region.
  Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
  Node* mask = intcon(modifier_mask);
  Node* bits = intcon(modifier_bits);
  Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
  Node* cmp  = _gvn.transform(new (C) CmpINode(mbit, bits));
  Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
  return generate_fair_guard(bol, region);
}
Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
  return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
}

//-------------------------inline_native_Class_query-------------------
bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
  const Type* return_type = TypeInt::BOOL;
  Node* prim_return_value = top();  // what happens if it's a primitive class?
  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
  bool expect_prim = false;     // most of these guys expect to work on refs

  enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };

  Node* mirror = argument(0);
  Node* obj    = top();

  switch (id) {
  case vmIntrinsics::_isInstance:
    // nothing is an instance of a primitive type
    prim_return_value = intcon(0);
    obj = argument(1);
    break;
  case vmIntrinsics::_getModifiers:
    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
    assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
    return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
    break;
  case vmIntrinsics::_isInterface:
    prim_return_value = intcon(0);
    break;
  case vmIntrinsics::_isArray:
    prim_return_value = intcon(0);
    expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
    break;
  case vmIntrinsics::_isPrimitive:
    prim_return_value = intcon(1);
    expect_prim = true;  // obviously
    break;
  case vmIntrinsics::_getSuperclass:
    prim_return_value = null();
    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
    break;
  case vmIntrinsics::_getComponentType:
    prim_return_value = null();
    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
    break;
  case vmIntrinsics::_getClassAccessFlags:
    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
    return_type = TypeInt::INT;  // not bool!  6297094
    break;
  default:
    fatal_unexpected_iid(id);
    break;
  }

  const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
  if (mirror_con == NULL)  return false;  // cannot happen?

#ifndef PRODUCT
  if (C->print_intrinsics() || C->print_inlining()) {
    ciType* k = mirror_con->java_mirror_type();
    if (k) {
      tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
      k->print_name();
      tty->cr();
    }
  }
#endif

  // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
  RegionNode* region = new (C) RegionNode(PATH_LIMIT);
  record_for_igvn(region);
  PhiNode* phi = new (C) PhiNode(region, return_type);

  // The mirror will never be null of Reflection.getClassAccessFlags, however
  // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
  // if it is. See bug 4774291.

  // For Reflection.getClassAccessFlags(), the null check occurs in
  // the wrong place; see inline_unsafe_access(), above, for a similar
  // situation.
  mirror = null_check(mirror);
  // If mirror or obj is dead, only null-path is taken.
  if (stopped())  return true;

  if (expect_prim)  never_see_null = false;  // expect nulls (meaning prims)

  // Now load the mirror's klass metaobject, and null-check it.
  // Side-effects region with the control path if the klass is null.
  Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
  // If kls is null, we have a primitive mirror.
  phi->init_req(_prim_path, prim_return_value);
  if (stopped()) { set_result(region, phi); return true; }
  bool safe_for_replace = (region->in(_prim_path) == top());

  Node* p;  // handy temp
  Node* null_ctl;

  // Now that we have the non-null klass, we can perform the real query.
  // For constant classes, the query will constant-fold in LoadNode::Value.
  Node* query_value = top();
  switch (id) {
  case vmIntrinsics::_isInstance:
    // nothing is an instance of a primitive type
    query_value = gen_instanceof(obj, kls, safe_for_replace);
    break;

  case vmIntrinsics::_getModifiers:
    p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
    break;

  case vmIntrinsics::_isInterface:
    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
    if (generate_interface_guard(kls, region) != NULL)
      // A guard was added.  If the guard is taken, it was an interface.
      phi->add_req(intcon(1));
    // If we fall through, it's a plain class.
    query_value = intcon(0);
    break;

  case vmIntrinsics::_isArray:
    // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
    if (generate_array_guard(kls, region) != NULL)
      // A guard was added.  If the guard is taken, it was an array.
      phi->add_req(intcon(1));
    // If we fall through, it's a plain class.
    query_value = intcon(0);
    break;

  case vmIntrinsics::_isPrimitive:
    query_value = intcon(0); // "normal" path produces false
    break;

  case vmIntrinsics::_getSuperclass:
    // The rules here are somewhat unfortunate, but we can still do better
    // with random logic than with a JNI call.
    // Interfaces store null or Object as _super, but must report null.
    // Arrays store an intermediate super as _super, but must report Object.
    // Other types can report the actual _super.
    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
    if (generate_interface_guard(kls, region) != NULL)
      // A guard was added.  If the guard is taken, it was an interface.
      phi->add_req(null());
    if (generate_array_guard(kls, region) != NULL)
      // A guard was added.  If the guard is taken, it was an array.
      phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
    // If we fall through, it's a plain class.  Get its _super.
    p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
    kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
    null_ctl = top();
    kls = null_check_oop(kls, &null_ctl);
    if (null_ctl != top()) {
      // If the guard is taken, Object.superClass is null (both klass and mirror).
      region->add_req(null_ctl);
      phi   ->add_req(null());
    }
    if (!stopped()) {
      query_value = load_mirror_from_klass(kls);
    }
    break;

  case vmIntrinsics::_getComponentType:
    if (generate_array_guard(kls, region) != NULL) {
      // Be sure to pin the oop load to the guard edge just created:
      Node* is_array_ctrl = region->in(region->req()-1);
      Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
      phi->add_req(cmo);
    }
    query_value = null();  // non-array case is null
    break;

  case vmIntrinsics::_getClassAccessFlags:
    p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
    break;

  default:
    fatal_unexpected_iid(id);
    break;
  }

  // Fall-through is the normal case of a query to a real class.
  phi->init_req(1, query_value);
  region->init_req(1, control());

  C->set_has_split_ifs(true); // Has chance for split-if optimization
  set_result(region, phi);
  return true;
}

//--------------------------inline_native_subtype_check------------------------
// This intrinsic takes the JNI calls out of the heart of
// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
bool LibraryCallKit::inline_native_subtype_check() {
  // Pull both arguments off the stack.
  Node* args[2];                // two java.lang.Class mirrors: superc, subc
  args[0] = argument(0);
  args[1] = argument(1);
  Node* klasses[2];             // corresponding Klasses: superk, subk
  klasses[0] = klasses[1] = top();

  enum {
    // A full decision tree on {superc is prim, subc is prim}:
    _prim_0_path = 1,           // {P,N} => false
                                // {P,P} & superc!=subc => false
    _prim_same_path,            // {P,P} & superc==subc => true
    _prim_1_path,               // {N,P} => false
    _ref_subtype_path,          // {N,N} & subtype check wins => true
    _both_ref_path,             // {N,N} & subtype check loses => false
    PATH_LIMIT
  };

  RegionNode* region = new (C) RegionNode(PATH_LIMIT);
  Node*       phi    = new (C) PhiNode(region, TypeInt::BOOL);
  record_for_igvn(region);

  const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
  const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
  int class_klass_offset = java_lang_Class::klass_offset_in_bytes();

  // First null-check both mirrors and load each mirror's klass metaobject.
  int which_arg;
  for (which_arg = 0; which_arg <= 1; which_arg++) {
    Node* arg = args[which_arg];
    arg = null_check(arg);
    if (stopped())  break;
    args[which_arg] = arg;

    Node* p = basic_plus_adr(arg, class_klass_offset);
    Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
    klasses[which_arg] = _gvn.transform(kls);
  }

  // Having loaded both klasses, test each for null.
  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
  for (which_arg = 0; which_arg <= 1; which_arg++) {
    Node* kls = klasses[which_arg];
    Node* null_ctl = top();
    kls = null_check_oop(kls, &null_ctl, never_see_null);
    int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
    region->init_req(prim_path, null_ctl);
    if (stopped())  break;
    klasses[which_arg] = kls;
  }

  if (!stopped()) {
    // now we have two reference types, in klasses[0..1]
    Node* subk   = klasses[1];  // the argument to isAssignableFrom
    Node* superk = klasses[0];  // the receiver
    region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
    // now we have a successful reference subtype check
    region->set_req(_ref_subtype_path, control());
  }

  // If both operands are primitive (both klasses null), then
  // we must return true when they are identical primitives.
  // It is convenient to test this after the first null klass check.
  set_control(region->in(_prim_0_path)); // go back to first null check
  if (!stopped()) {
    // Since superc is primitive, make a guard for the superc==subc case.
    Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
    Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
    generate_guard(bol_eq, region, PROB_FAIR);
    if (region->req() == PATH_LIMIT+1) {
      // A guard was added.  If the added guard is taken, superc==subc.
      region->swap_edges(PATH_LIMIT, _prim_same_path);
      region->del_req(PATH_LIMIT);
    }
    region->set_req(_prim_0_path, control()); // Not equal after all.
  }

  // these are the only paths that produce 'true':
  phi->set_req(_prim_same_path,   intcon(1));
  phi->set_req(_ref_subtype_path, intcon(1));

  // pull together the cases:
  assert(region->req() == PATH_LIMIT, "sane region");
  for (uint i = 1; i < region->req(); i++) {
    Node* ctl = region->in(i);
    if (ctl == NULL || ctl == top()) {
      region->set_req(i, top());
      phi   ->set_req(i, top());
    } else if (phi->in(i) == NULL) {
      phi->set_req(i, intcon(0)); // all other paths produce 'false'
    }
  }

  set_control(_gvn.transform(region));
  set_result(_gvn.transform(phi));
  return true;
}

//---------------------generate_array_guard_common------------------------
Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
                                                  bool obj_array, bool not_array) {
  // If obj_array/non_array==false/false:
  // Branch around if the given klass is in fact an array (either obj or prim).
  // If obj_array/non_array==false/true:
  // Branch around if the given klass is not an array klass of any kind.
  // If obj_array/non_array==true/true:
  // Branch around if the kls is not an oop array (kls is int[], String, etc.)
  // If obj_array/non_array==true/false:
  // Branch around if the kls is an oop array (Object[] or subtype)
  //
  // Like generate_guard, adds a new path onto the region.
  jint  layout_con = 0;
  Node* layout_val = get_layout_helper(kls, layout_con);
  if (layout_val == NULL) {
    bool query = (obj_array
                  ? Klass::layout_helper_is_objArray(layout_con)
                  : Klass::layout_helper_is_array(layout_con));
    if (query == not_array) {
      return NULL;                       // never a branch
    } else {                             // always a branch
      Node* always_branch = control();
      if (region != NULL)
        region->add_req(always_branch);
      set_control(top());
      return always_branch;
    }
  }
  // Now test the correct condition.
  jint  nval = (obj_array
                ? (jint)(Klass::_lh_array_tag_type_value
                   <<    Klass::_lh_array_tag_shift)
                : Klass::_lh_neutral_value);
  Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
  BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
  // invert the test if we are looking for a non-array
  if (not_array)  btest = BoolTest(btest).negate();
  Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest));
  return generate_fair_guard(bol, region);
}


//-----------------------inline_native_newArray--------------------------
// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
bool LibraryCallKit::inline_native_newArray() {
  Node* mirror    = argument(0);
  Node* count_val = argument(1);

  mirror = null_check(mirror);
  // If mirror or obj is dead, only null-path is taken.
  if (stopped())  return true;

  enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
  RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
  PhiNode*    result_val = new(C) PhiNode(result_reg,
                                          TypeInstPtr::NOTNULL);
  PhiNode*    result_io  = new(C) PhiNode(result_reg, Type::ABIO);
  PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
                                          TypePtr::BOTTOM);

  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
  Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
                                                  result_reg, _slow_path);
  Node* normal_ctl   = control();
  Node* no_array_ctl = result_reg->in(_slow_path);

  // Generate code for the slow case.  We make a call to newArray().
  set_control(no_array_ctl);
  if (!stopped()) {
    // Either the input type is void.class, or else the
    // array klass has not yet been cached.  Either the
    // ensuing call will throw an exception, or else it
    // will cache the array klass for next time.
    PreserveJVMState pjvms(this);
    CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
    Node* slow_result = set_results_for_java_call(slow_call);
    // this->control() comes from set_results_for_java_call
    result_reg->set_req(_slow_path, control());
    result_val->set_req(_slow_path, slow_result);
    result_io ->set_req(_slow_path, i_o());
    result_mem->set_req(_slow_path, reset_memory());
  }

  set_control(normal_ctl);
  if (!stopped()) {
    // Normal case:  The array type has been cached in the java.lang.Class.
    // The following call works fine even if the array type is polymorphic.
    // It could be a dynamic mix of int[], boolean[], Object[], etc.
    Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
    result_reg->init_req(_normal_path, control());
    result_val->init_req(_normal_path, obj);
    result_io ->init_req(_normal_path, i_o());
    result_mem->init_req(_normal_path, reset_memory());
  }

  // Return the combined state.
  set_i_o(        _gvn.transform(result_io)  );
  set_all_memory( _gvn.transform(result_mem));

  C->set_has_split_ifs(true); // Has chance for split-if optimization
  set_result(result_reg, result_val);
  return true;
}

//----------------------inline_native_getLength--------------------------
// public static native int java.lang.reflect.Array.getLength(Object array);
bool LibraryCallKit::inline_native_getLength() {
  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;

  Node* array = null_check(argument(0));
  // If array is dead, only null-path is taken.
  if (stopped())  return true;

  // Deoptimize if it is a non-array.
  Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);

  if (non_array != NULL) {
    PreserveJVMState pjvms(this);
    set_control(non_array);
    uncommon_trap(Deoptimization::Reason_intrinsic,
                  Deoptimization::Action_maybe_recompile);
  }

  // If control is dead, only non-array-path is taken.
  if (stopped())  return true;

  // The works fine even if the array type is polymorphic.
  // It could be a dynamic mix of int[], boolean[], Object[], etc.
  Node* result = load_array_length(array);

  C->set_has_split_ifs(true);  // Has chance for split-if optimization
  set_result(result);
  return true;
}

//------------------------inline_array_copyOf----------------------------
// public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;

  // Get the arguments.
  Node* original          = argument(0);
  Node* start             = is_copyOfRange? argument(1): intcon(0);
  Node* end               = is_copyOfRange? argument(2): argument(1);
  Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);

  Node* newcopy = NULL;

  // Set the original stack and the reexecute bit for the interpreter to reexecute
  // the bytecode that invokes Arrays.copyOf if deoptimization happens.
  { PreserveReexecuteState preexecs(this);
    jvms()->set_should_reexecute(true);

    array_type_mirror = null_check(array_type_mirror);
    original          = null_check(original);

    // Check if a null path was taken unconditionally.
    if (stopped())  return true;

    Node* orig_length = load_array_length(original);

    Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
    klass_node = null_check(klass_node);

    RegionNode* bailout = new (C) RegionNode(1);
    record_for_igvn(bailout);

    // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
    // Bail out if that is so.
    Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
    if (not_objArray != NULL) {
      // Improve the klass node's type from the new optimistic assumption:
      ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
      const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
      Node* cast = new (C) CastPPNode(klass_node, akls);
      cast->init_req(0, control());
      klass_node = _gvn.transform(cast);
    }

    // Bail out if either start or end is negative.
    generate_negative_guard(start, bailout, &start);
    generate_negative_guard(end,   bailout, &end);

    Node* length = end;
    if (_gvn.type(start) != TypeInt::ZERO) {
      length = _gvn.transform(new (C) SubINode(end, start));
    }

    // Bail out if length is negative.
    // Without this the new_array would throw
    // NegativeArraySizeException but IllegalArgumentException is what
    // should be thrown
    generate_negative_guard(length, bailout, &length);

    if (bailout->req() > 1) {
      PreserveJVMState pjvms(this);
      set_control(_gvn.transform(bailout));
      uncommon_trap(Deoptimization::Reason_intrinsic,
                    Deoptimization::Action_maybe_recompile);
    }

    if (!stopped()) {
      // How many elements will we copy from the original?
      // The answer is MinI(orig_length - start, length).
      Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
      Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);

      newcopy = new_array(klass_node, length, 0);  // no argments to push

      // Generate a direct call to the right arraycopy function(s).
      // We know the copy is disjoint but we might not know if the
      // oop stores need checking.
      // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
      // This will fail a store-check if x contains any non-nulls.
      bool disjoint_bases = true;
      // if start > orig_length then the length of the copy may be
      // negative.
      bool length_never_negative = !is_copyOfRange;
      generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
                         original, start, newcopy, intcon(0), moved,
                         disjoint_bases, length_never_negative);
    }
  } // original reexecute is set back here

  C->set_has_split_ifs(true); // Has chance for split-if optimization
  if (!stopped()) {
    set_result(newcopy);
  }
  return true;
}


//----------------------generate_virtual_guard---------------------------
// Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
                                             RegionNode* slow_region) {
  ciMethod* method = callee();
  int vtable_index = method->vtable_index();
  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
         err_msg_res("bad index %d", vtable_index));
  // Get the Method* out of the appropriate vtable entry.
  int entry_offset  = (InstanceKlass::vtable_start_offset() +
                     vtable_index*vtableEntry::size()) * wordSize +
                     vtableEntry::method_offset_in_bytes();
  Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
  Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);

  // Compare the target method with the expected method (e.g., Object.hashCode).
  const TypePtr* native_call_addr = TypeMetadataPtr::make(method);

  Node* native_call = makecon(native_call_addr);
  Node* chk_native  = _gvn.transform(new(C) CmpPNode(target_call, native_call));
  Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne));

  return generate_slow_guard(test_native, slow_region);
}

//-----------------------generate_method_call----------------------------
// Use generate_method_call to make a slow-call to the real
// method if the fast path fails.  An alternative would be to
// use a stub like OptoRuntime::slow_arraycopy_Java.
// This only works for expanding the current library call,
// not another intrinsic.  (E.g., don't use this for making an
// arraycopy call inside of the copyOf intrinsic.)
CallJavaNode*
LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
  // When compiling the intrinsic method itself, do not use this technique.
  guarantee(callee() != C->method(), "cannot make slow-call to self");

  ciMethod* method = callee();
  // ensure the JVMS we have will be correct for this call
  guarantee(method_id == method->intrinsic_id(), "must match");

  const TypeFunc* tf = TypeFunc::make(method);
  CallJavaNode* slow_call;
  if (is_static) {
    assert(!is_virtual, "");
    slow_call = new(C) CallStaticJavaNode(C, tf,
                           SharedRuntime::get_resolve_static_call_stub(),
                           method, bci());
  } else if (is_virtual) {
    null_check_receiver();
    int vtable_index = Method::invalid_vtable_index;
    if (UseInlineCaches) {
      // Suppress the vtable call
    } else {
      // hashCode and clone are not a miranda methods,
      // so the vtable index is fixed.
      // No need to use the linkResolver to get it.
       vtable_index = method->vtable_index();
       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
              err_msg_res("bad index %d", vtable_index));
    }
    slow_call = new(C) CallDynamicJavaNode(tf,
                          SharedRuntime::get_resolve_virtual_call_stub(),
                          method, vtable_index, bci());
  } else {  // neither virtual nor static:  opt_virtual
    null_check_receiver();
    slow_call = new(C) CallStaticJavaNode(C, tf,
                                SharedRuntime::get_resolve_opt_virtual_call_stub(),
                                method, bci());
    slow_call->set_optimized_virtual(true);
  }
  set_arguments_for_java_call(slow_call);
  set_edges_for_java_call(slow_call);
  return slow_call;
}


/**
 * Build special case code for calls to hashCode on an object. This call may
 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
 * slightly different code.
 */
bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
  assert(is_static == callee()->is_static(), "correct intrinsic selection");
  assert(!(is_virtual && is_static), "either virtual, special, or static");

  enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };

  RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
  PhiNode*    result_val = new(C) PhiNode(result_reg, TypeInt::INT);
  PhiNode*    result_io  = new(C) PhiNode(result_reg, Type::ABIO);
  PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
  Node* obj = NULL;
  if (!is_static) {
    // Check for hashing null object
    obj = null_check_receiver();
    if (stopped())  return true;        // unconditionally null
    result_reg->init_req(_null_path, top());
    result_val->init_req(_null_path, top());
  } else {
    // Do a null check, and return zero if null.
    // System.identityHashCode(null) == 0
    obj = argument(0);
    Node* null_ctl = top();
    obj = null_check_oop(obj, &null_ctl);
    result_reg->init_req(_null_path, null_ctl);
    result_val->init_req(_null_path, _gvn.intcon(0));
  }

  // Unconditionally null?  Then return right away.
  if (stopped()) {
    set_control( result_reg->in(_null_path));
    if (!stopped())
      set_result(result_val->in(_null_path));
    return true;
  }

  // We only go to the fast case code if we pass a number of guards.  The
  // paths which do not pass are accumulated in the slow_region.
  RegionNode* slow_region = new (C) RegionNode(1);
  record_for_igvn(slow_region);

  // If this is a virtual call, we generate a funny guard.  We pull out
  // the vtable entry corresponding to hashCode() from the target object.
  // If the target method which we are calling happens to be the native
  // Object hashCode() method, we pass the guard.  We do not need this
  // guard for non-virtual calls -- the caller is known to be the native
  // Object hashCode().
  if (is_virtual) {
    // After null check, get the object's klass.
    Node* obj_klass = load_object_klass(obj);
    generate_virtual_guard(obj_klass, slow_region);
  }

  // Get the header out of the object, use LoadMarkNode when available
  Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
  // The control of the load must be NULL. Otherwise, the load can move before
  // the null check after castPP removal.
  Node* no_ctrl = NULL;
  Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);

  // Test the header to see if it is unlocked.
  Node* lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
  Node* lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
  Node* unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
  Node* chk_unlocked   = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val));
  Node* test_unlocked  = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne));

  generate_slow_guard(test_unlocked, slow_region);

  // Get the hash value and check to see that it has been properly assigned.
  // We depend on hash_mask being at most 32 bits and avoid the use of
  // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
  // vm: see markOop.hpp.
  Node* hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
  Node* hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
  Node* hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift));
  // This hack lets the hash bits live anywhere in the mark object now, as long
  // as the shift drops the relevant bits into the low 32 bits.  Note that
  // Java spec says that HashCode is an int so there's no point in capturing
  // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
  hshifted_header      = ConvX2I(hshifted_header);
  Node* hash_val       = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask));

  Node* no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
  Node* chk_assigned   = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val));
  Node* test_assigned  = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq));

  generate_slow_guard(test_assigned, slow_region);

  Node* init_mem = reset_memory();
  // fill in the rest of the null path:
  result_io ->init_req(_null_path, i_o());
  result_mem->init_req(_null_path, init_mem);

  result_val->init_req(_fast_path, hash_val);
  result_reg->init_req(_fast_path, control());
  result_io ->init_req(_fast_path, i_o());
  result_mem->init_req(_fast_path, init_mem);

  // Generate code for the slow case.  We make a call to hashCode().
  set_control(_gvn.transform(slow_region));
  if (!stopped()) {
    // No need for PreserveJVMState, because we're using up the present state.
    set_all_memory(init_mem);
    vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
    CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
    Node* slow_result = set_results_for_java_call(slow_call);
    // this->control() comes from set_results_for_java_call
    result_reg->init_req(_slow_path, control());
    result_val->init_req(_slow_path, slow_result);
    result_io  ->set_req(_slow_path, i_o());
    result_mem ->set_req(_slow_path, reset_memory());
  }

  // Return the combined state.
  set_i_o(        _gvn.transform(result_io)  );
  set_all_memory( _gvn.transform(result_mem));

  set_result(result_reg, result_val);
  return true;
}

//---------------------------inline_native_getClass----------------------------
// public final native Class<?> java.lang.Object.getClass();
//
// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
  Node* obj = null_check_receiver();
  if (stopped())  return true;
  set_result(load_mirror_from_klass(load_object_klass(obj)));
  return true;
}

//-----------------inline_native_Reflection_getCallerClass---------------------
// public static native Class<?> sun.reflect.Reflection.getCallerClass();
//
// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
//
// NOTE: This code must perform the same logic as JVM_GetCallerClass
// in that it must skip particular security frames and checks for
// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
  }
#endif

  if (!jvms()->has_method()) {
#ifndef PRODUCT
    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
      tty->print_cr("  Bailing out because intrinsic was inlined at top level");
    }
#endif
    return false;
  }

  // Walk back up the JVM state to find the caller at the required
  // depth.
  JVMState* caller_jvms = jvms();

  // Cf. JVM_GetCallerClass
  // NOTE: Start the loop at depth 1 because the current JVM state does
  // not include the Reflection.getCallerClass() frame.
  for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
    ciMethod* m = caller_jvms->method();
    switch (n) {
    case 0:
      fatal("current JVM state does not include the Reflection.getCallerClass frame");
      break;
    case 1:
      // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
      if (!m->caller_sensitive()) {
#ifndef PRODUCT
        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
          tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
        }
#endif
        return false;  // bail-out; let JVM_GetCallerClass do the work
      }
      break;
    default:
      if (!m->is_ignored_by_security_stack_walk()) {
        // We have reached the desired frame; return the holder class.
        // Acquire method holder as java.lang.Class and push as constant.
        ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
        ciInstance* caller_mirror = caller_klass->java_mirror();
        set_result(makecon(TypeInstPtr::make(caller_mirror)));

#ifndef PRODUCT
        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
          tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
          tty->print_cr("  JVM state at this point:");
          for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
            ciMethod* m = jvms()->of_depth(i)->method();
            tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
          }
        }
#endif
        return true;
      }
      break;
    }
  }

#ifndef PRODUCT
  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
    tty->print_cr("  JVM state at this point:");
    for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
      ciMethod* m = jvms()->of_depth(i)->method();
      tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
    }
  }
#endif

  return false;  // bail-out; let JVM_GetCallerClass do the work
}

bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
  Node* arg = argument(0);
  Node* result = NULL;

  switch (id) {
  case vmIntrinsics::_floatToRawIntBits:    result = new (C) MoveF2INode(arg);  break;
  case vmIntrinsics::_intBitsToFloat:       result = new (C) MoveI2FNode(arg);  break;
  case vmIntrinsics::_doubleToRawLongBits:  result = new (C) MoveD2LNode(arg);  break;
  case vmIntrinsics::_longBitsToDouble:     result = new (C) MoveL2DNode(arg);  break;

  case vmIntrinsics::_doubleToLongBits: {
    // two paths (plus control) merge in a wood
    RegionNode *r = new (C) RegionNode(3);
    Node *phi = new (C) PhiNode(r, TypeLong::LONG);

    Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
    // Build the boolean node
    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));

    // Branch either way.
    // NaN case is less traveled, which makes all the difference.
    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
    Node *opt_isnan = _gvn.transform(ifisnan);
    assert( opt_isnan->is_If(), "Expect an IfNode");
    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
    Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));

    set_control(iftrue);

    static const jlong nan_bits = CONST64(0x7ff8000000000000);
    Node *slow_result = longcon(nan_bits); // return NaN
    phi->init_req(1, _gvn.transform( slow_result ));
    r->init_req(1, iftrue);

    // Else fall through
    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
    set_control(iffalse);

    phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
    r->init_req(2, iffalse);

    // Post merge
    set_control(_gvn.transform(r));
    record_for_igvn(r);

    C->set_has_split_ifs(true); // Has chance for split-if optimization
    result = phi;
    assert(result->bottom_type()->isa_long(), "must be");
    break;
  }

  case vmIntrinsics::_floatToIntBits: {
    // two paths (plus control) merge in a wood
    RegionNode *r = new (C) RegionNode(3);
    Node *phi = new (C) PhiNode(r, TypeInt::INT);

    Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
    // Build the boolean node
    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));

    // Branch either way.
    // NaN case is less traveled, which makes all the difference.
    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
    Node *opt_isnan = _gvn.transform(ifisnan);
    assert( opt_isnan->is_If(), "Expect an IfNode");
    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
    Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));

    set_control(iftrue);

    static const jint nan_bits = 0x7fc00000;
    Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
    phi->init_req(1, _gvn.transform( slow_result ));
    r->init_req(1, iftrue);

    // Else fall through
    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
    set_control(iffalse);

    phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
    r->init_req(2, iffalse);

    // Post merge
    set_control(_gvn.transform(r));
    record_for_igvn(r);

    C->set_has_split_ifs(true); // Has chance for split-if optimization
    result = phi;
    assert(result->bottom_type()->isa_int(), "must be");
    break;
  }

  default:
    fatal_unexpected_iid(id);
    break;
  }
  set_result(_gvn.transform(result));
  return true;
}

#ifdef _LP64
#define XTOP ,top() /*additional argument*/
#else  //_LP64
#define XTOP        /*no additional argument*/
#endif //_LP64

//----------------------inline_unsafe_copyMemory-------------------------
// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
bool LibraryCallKit::inline_unsafe_copyMemory() {
  if (callee()->is_static())  return false;  // caller must have the capability!
  null_check_receiver();  // null-check receiver
  if (stopped())  return true;

  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".

  Node* src_ptr =         argument(1);   // type: oop
  Node* src_off = ConvL2X(argument(2));  // type: long
  Node* dst_ptr =         argument(4);   // type: oop
  Node* dst_off = ConvL2X(argument(5));  // type: long
  Node* size    = ConvL2X(argument(7));  // type: long

  assert(Unsafe_field_offset_to_byte_offset(11) == 11,
         "fieldOffset must be byte-scaled");

  Node* src = make_unsafe_address(src_ptr, src_off);
  Node* dst = make_unsafe_address(dst_ptr, dst_off);

  // Conservatively insert a memory barrier on all memory slices.
  // Do not let writes of the copy source or destination float below the copy.
  insert_mem_bar(Op_MemBarCPUOrder);

  // Call it.  Note that the length argument is not scaled.
  make_runtime_call(RC_LEAF|RC_NO_FP,
                    OptoRuntime::fast_arraycopy_Type(),
                    StubRoutines::unsafe_arraycopy(),
                    "unsafe_arraycopy",
                    TypeRawPtr::BOTTOM,
                    src, dst, size XTOP);

  // Do not let reads of the copy destination float above the copy.
  insert_mem_bar(Op_MemBarCPUOrder);

  return true;
}

//------------------------clone_coping-----------------------------------
// Helper function for inline_native_clone.
void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
  assert(obj_size != NULL, "");
  Node* raw_obj = alloc_obj->in(1);
  assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");

  AllocateNode* alloc = NULL;
  if (ReduceBulkZeroing) {
    // We will be completely responsible for initializing this object -
    // mark Initialize node as complete.
    alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
    // The object was just allocated - there should be no any stores!
    guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
    // Mark as complete_with_arraycopy so that on AllocateNode
    // expansion, we know this AllocateNode is initialized by an array
    // copy and a StoreStore barrier exists after the array copy.
    alloc->initialization()->set_complete_with_arraycopy();
  }

  // Copy the fastest available way.
  // TODO: generate fields copies for small objects instead.
  Node* src  = obj;
  Node* dest = alloc_obj;
  Node* size = _gvn.transform(obj_size);

  // Exclude the header but include array length to copy by 8 bytes words.
  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
                            instanceOopDesc::base_offset_in_bytes();
  // base_off:
  // 8  - 32-bit VM
  // 12 - 64-bit VM, compressed klass
  // 16 - 64-bit VM, normal klass
  if (base_off % BytesPerLong != 0) {
    assert(UseCompressedClassPointers, "");
    if (is_array) {
      // Exclude length to copy by 8 bytes words.
      base_off += sizeof(int);
    } else {
      // Include klass to copy by 8 bytes words.
      base_off = instanceOopDesc::klass_offset_in_bytes();
    }
    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
  }
  src  = basic_plus_adr(src,  base_off);
  dest = basic_plus_adr(dest, base_off);

  // Compute the length also, if needed:
  Node* countx = size;
  countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
  countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));

  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
  bool disjoint_bases = true;
  generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
                               src, NULL, dest, NULL, countx,
                               /*dest_uninitialized*/true);

  // If necessary, emit some card marks afterwards.  (Non-arrays only.)
  if (card_mark) {
    assert(!is_array, "");
    // Put in store barrier for any and all oops we are sticking
    // into this object.  (We could avoid this if we could prove
    // that the object type contains no oop fields at all.)
    Node* no_particular_value = NULL;
    Node* no_particular_field = NULL;
    int raw_adr_idx = Compile::AliasIdxRaw;
    post_barrier(control(),
                 memory(raw_adr_type),
                 alloc_obj,
                 no_particular_field,
                 raw_adr_idx,
                 no_particular_value,
                 T_OBJECT,
                 false);
  }

  // Do not let reads from the cloned object float above the arraycopy.
  if (alloc != NULL) {
    // Do not let stores that initialize this object be reordered with
    // a subsequent store that would make this object accessible by
    // other threads.
    // Record what AllocateNode this StoreStore protects so that
    // escape analysis can go from the MemBarStoreStoreNode to the
    // AllocateNode and eliminate the MemBarStoreStoreNode if possible
    // based on the escape status of the AllocateNode.
    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
  } else {
    insert_mem_bar(Op_MemBarCPUOrder);
  }
}

//------------------------inline_native_clone----------------------------
// protected native Object java.lang.Object.clone();
//
// Here are the simple edge cases:
//  null receiver => normal trap
//  virtual and clone was overridden => slow path to out-of-line clone
//  not cloneable or finalizer => slow path to out-of-line Object.clone
//
// The general case has two steps, allocation and copying.
// Allocation has two cases, and uses GraphKit::new_instance or new_array.
//
// Copying also has two cases, oop arrays and everything else.
// Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
// Everything else uses the tight inline loop supplied by CopyArrayNode.
//
// These steps fold up nicely if and when the cloned object's klass
// can be sharply typed as an object array, a type array, or an instance.
//
bool LibraryCallKit::inline_native_clone(bool is_virtual) {
  PhiNode* result_val;

  // Set the reexecute bit for the interpreter to reexecute
  // the bytecode that invokes Object.clone if deoptimization happens.
  { PreserveReexecuteState preexecs(this);
    jvms()->set_should_reexecute(true);

    Node* obj = null_check_receiver();
    if (stopped())  return true;

    Node* obj_klass = load_object_klass(obj);
    const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
    const TypeOopPtr*   toop   = ((tklass != NULL)
                                ? tklass->as_instance_type()
                                : TypeInstPtr::NOTNULL);

    // Conservatively insert a memory barrier on all memory slices.
    // Do not let writes into the original float below the clone.
    insert_mem_bar(Op_MemBarCPUOrder);

    // paths into result_reg:
    enum {
      _slow_path = 1,     // out-of-line call to clone method (virtual or not)
      _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
      _array_path,        // plain array allocation, plus arrayof_long_arraycopy
      _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
      PATH_LIMIT
    };
    RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
    result_val             = new(C) PhiNode(result_reg,
                                            TypeInstPtr::NOTNULL);
    PhiNode*    result_i_o = new(C) PhiNode(result_reg, Type::ABIO);
    PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
                                            TypePtr::BOTTOM);
    record_for_igvn(result_reg);

    const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
    int raw_adr_idx = Compile::AliasIdxRaw;

    Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
    if (array_ctl != NULL) {
      // It's an array.
      PreserveJVMState pjvms(this);
      set_control(array_ctl);
      Node* obj_length = load_array_length(obj);
      Node* obj_size  = NULL;
      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push

      if (!use_ReduceInitialCardMarks()) {
        // If it is an oop array, it requires very special treatment,
        // because card marking is required on each card of the array.
        Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
        if (is_obja != NULL) {
          PreserveJVMState pjvms2(this);
          set_control(is_obja);
          // Generate a direct call to the right arraycopy function(s).
          bool disjoint_bases = true;
          bool length_never_negative = true;
          generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
                             obj, intcon(0), alloc_obj, intcon(0),
                             obj_length,
                             disjoint_bases, length_never_negative);
          result_reg->init_req(_objArray_path, control());
          result_val->init_req(_objArray_path, alloc_obj);
          result_i_o ->set_req(_objArray_path, i_o());
          result_mem ->set_req(_objArray_path, reset_memory());
        }
      }
      // Otherwise, there are no card marks to worry about.
      // (We can dispense with card marks if we know the allocation
      //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
      //  causes the non-eden paths to take compensating steps to
      //  simulate a fresh allocation, so that no further
      //  card marks are required in compiled code to initialize
      //  the object.)

      if (!stopped()) {
        copy_to_clone(obj, alloc_obj, obj_size, true, false);

        // Present the results of the copy.
        result_reg->init_req(_array_path, control());
        result_val->init_req(_array_path, alloc_obj);
        result_i_o ->set_req(_array_path, i_o());
        result_mem ->set_req(_array_path, reset_memory());
      }
    }

    // We only go to the instance fast case code if we pass a number of guards.
    // The paths which do not pass are accumulated in the slow_region.
    RegionNode* slow_region = new (C) RegionNode(1);
    record_for_igvn(slow_region);
    if (!stopped()) {
      // It's an instance (we did array above).  Make the slow-path tests.
      // If this is a virtual call, we generate a funny guard.  We grab
      // the vtable entry corresponding to clone() from the target object.
      // If the target method which we are calling happens to be the
      // Object clone() method, we pass the guard.  We do not need this
      // guard for non-virtual calls; the caller is known to be the native
      // Object clone().
      if (is_virtual) {
        generate_virtual_guard(obj_klass, slow_region);
      }

      // The object must be cloneable and must not have a finalizer.
      // Both of these conditions may be checked in a single test.
      // We could optimize the cloneable test further, but we don't care.
      generate_access_flags_guard(obj_klass,
                                  // Test both conditions:
                                  JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
                                  // Must be cloneable but not finalizer:
                                  JVM_ACC_IS_CLONEABLE,
                                  slow_region);
    }

    if (!stopped()) {
      // It's an instance, and it passed the slow-path tests.
      PreserveJVMState pjvms(this);
      Node* obj_size  = NULL;
      // Need to deoptimize on exception from allocation since Object.clone intrinsic
      // is reexecuted if deoptimization occurs and there could be problems when merging
      // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
      Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);

      copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());

      // Present the results of the slow call.
      result_reg->init_req(_instance_path, control());
      result_val->init_req(_instance_path, alloc_obj);
      result_i_o ->set_req(_instance_path, i_o());
      result_mem ->set_req(_instance_path, reset_memory());
    }

    // Generate code for the slow case.  We make a call to clone().
    set_control(_gvn.transform(slow_region));
    if (!stopped()) {
      PreserveJVMState pjvms(this);
      CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
      Node* slow_result = set_results_for_java_call(slow_call);
      // this->control() comes from set_results_for_java_call
      result_reg->init_req(_slow_path, control());
      result_val->init_req(_slow_path, slow_result);
      result_i_o ->set_req(_slow_path, i_o());
      result_mem ->set_req(_slow_path, reset_memory());
    }

    // Return the combined state.
    set_control(    _gvn.transform(result_reg));
    set_i_o(        _gvn.transform(result_i_o));
    set_all_memory( _gvn.transform(result_mem));
  } // original reexecute is set back here

  set_result(_gvn.transform(result_val));
  return true;
}

//------------------------------basictype2arraycopy----------------------------
address LibraryCallKit::basictype2arraycopy(BasicType t,
                                            Node* src_offset,
                                            Node* dest_offset,
                                            bool disjoint_bases,
                                            const char* &name,
                                            bool dest_uninitialized) {
  const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
  const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;

  bool aligned = false;
  bool disjoint = disjoint_bases;

  // if the offsets are the same, we can treat the memory regions as
  // disjoint, because either the memory regions are in different arrays,
  // or they are identical (which we can treat as disjoint.)  We can also
  // treat a copy with a destination index  less that the source index
  // as disjoint since a low->high copy will work correctly in this case.
  if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
      dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
    // both indices are constants
    int s_offs = src_offset_inttype->get_con();
    int d_offs = dest_offset_inttype->get_con();
    int element_size = type2aelembytes(t);
    aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
              ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
    if (s_offs >= d_offs)  disjoint = true;
  } else if (src_offset == dest_offset && src_offset != NULL) {
    // This can occur if the offsets are identical non-constants.
    disjoint = true;
  }

  return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
}


//------------------------------inline_arraycopy-----------------------
// public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
//                                                      Object dest, int destPos,
//                                                      int length);
bool LibraryCallKit::inline_arraycopy() {
  // Get the arguments.
  Node* src         = argument(0);  // type: oop
  Node* src_offset  = argument(1);  // type: int
  Node* dest        = argument(2);  // type: oop
  Node* dest_offset = argument(3);  // type: int
  Node* length      = argument(4);  // type: int

  // Compile time checks.  If any of these checks cannot be verified at compile time,
  // we do not make a fast path for this call.  Instead, we let the call remain as it
  // is.  The checks we choose to mandate at compile time are:
  //
  // (1) src and dest are arrays.
  const Type* src_type  = src->Value(&_gvn);
  const Type* dest_type = dest->Value(&_gvn);
  const TypeAryPtr* top_src  = src_type->isa_aryptr();
  const TypeAryPtr* top_dest = dest_type->isa_aryptr();

  // Do we have the type of src?
  bool has_src = (top_src != NULL && top_src->klass() != NULL);
  // Do we have the type of dest?
  bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
  // Is the type for src from speculation?
  bool src_spec = false;
  // Is the type for dest from speculation?
  bool dest_spec = false;

  if (!has_src || !has_dest) {
    // We don't have sufficient type information, let's see if
    // speculative types can help. We need to have types for both src
    // and dest so that it pays off.

    // Do we already have or could we have type information for src
    bool could_have_src = has_src;
    // Do we already have or could we have type information for dest
    bool could_have_dest = has_dest;

    ciKlass* src_k = NULL;
    if (!has_src) {
      src_k = src_type->speculative_type();
      if (src_k != NULL && src_k->is_array_klass()) {
        could_have_src = true;
      }
    }

    ciKlass* dest_k = NULL;
    if (!has_dest) {
      dest_k = dest_type->speculative_type();
      if (dest_k != NULL && dest_k->is_array_klass()) {
        could_have_dest = true;
      }
    }

    if (could_have_src && could_have_dest) {
      // This is going to pay off so emit the required guards
      if (!has_src) {
        src = maybe_cast_profiled_obj(src, src_k);
        src_type  = _gvn.type(src);
        top_src  = src_type->isa_aryptr();
        has_src = (top_src != NULL && top_src->klass() != NULL);
        src_spec = true;
      }
      if (!has_dest) {
        dest = maybe_cast_profiled_obj(dest, dest_k);
        dest_type  = _gvn.type(dest);
        top_dest  = dest_type->isa_aryptr();
        has_dest = (top_dest != NULL && top_dest->klass() != NULL);
        dest_spec = true;
      }
    }
  }

  if (!has_src || !has_dest) {
    // Conservatively insert a memory barrier on all memory slices.
    // Do not let writes into the source float below the arraycopy.
    insert_mem_bar(Op_MemBarCPUOrder);

    // Call StubRoutines::generic_arraycopy stub.
    generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
                       src, src_offset, dest, dest_offset, length);

    // Do not let reads from the destination float above the arraycopy.
    // Since we cannot type the arrays, we don't know which slices
    // might be affected.  We could restrict this barrier only to those
    // memory slices which pertain to array elements--but don't bother.
    if (!InsertMemBarAfterArraycopy)
      // (If InsertMemBarAfterArraycopy, there is already one in place.)
      insert_mem_bar(Op_MemBarCPUOrder);
    return true;
  }

  // (2) src and dest arrays must have elements of the same BasicType
  // Figure out the size and type of the elements we will be copying.
  BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
  BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
  if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
  if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;

  if (src_elem != dest_elem || dest_elem == T_VOID) {
    // The component types are not the same or are not recognized.  Punt.
    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
    generate_slow_arraycopy(TypePtr::BOTTOM,
                            src, src_offset, dest, dest_offset, length,
                            /*dest_uninitialized*/false);
    return true;
  }

  if (src_elem == T_OBJECT) {
    // If both arrays are object arrays then having the exact types
    // for both will remove the need for a subtype check at runtime
    // before the call and may make it possible to pick a faster copy
    // routine (without a subtype check on every element)
    // Do we have the exact type of src?
    bool could_have_src = src_spec;
    // Do we have the exact type of dest?
    bool could_have_dest = dest_spec;
    ciKlass* src_k = top_src->klass();
    ciKlass* dest_k = top_dest->klass();
    if (!src_spec) {
      src_k = src_type->speculative_type();
      if (src_k != NULL && src_k->is_array_klass()) {
          could_have_src = true;
      }
    }
    if (!dest_spec) {
      dest_k = dest_type->speculative_type();
      if (dest_k != NULL && dest_k->is_array_klass()) {
        could_have_dest = true;
      }
    }
    if (could_have_src && could_have_dest) {
      // If we can have both exact types, emit the missing guards
      if (could_have_src && !src_spec) {
        src = maybe_cast_profiled_obj(src, src_k);
      }
      if (could_have_dest && !dest_spec) {
        dest = maybe_cast_profiled_obj(dest, dest_k);
      }
    }
  }

  //---------------------------------------------------------------------------
  // We will make a fast path for this call to arraycopy.

  // We have the following tests left to perform:
  //
  // (3) src and dest must not be null.
  // (4) src_offset must not be negative.
  // (5) dest_offset must not be negative.
  // (6) length must not be negative.
  // (7) src_offset + length must not exceed length of src.
  // (8) dest_offset + length must not exceed length of dest.
  // (9) each element of an oop array must be assignable

  RegionNode* slow_region = new (C) RegionNode(1);
  record_for_igvn(slow_region);

  // (3) operands must not be null
  // We currently perform our null checks with the null_check routine.
  // This means that the null exceptions will be reported in the caller
  // rather than (correctly) reported inside of the native arraycopy call.
  // This should be corrected, given time.  We do our null check with the
  // stack pointer restored.
  src  = null_check(src,  T_ARRAY);
  dest = null_check(dest, T_ARRAY);

  // (4) src_offset must not be negative.
  generate_negative_guard(src_offset, slow_region);

  // (5) dest_offset must not be negative.
  generate_negative_guard(dest_offset, slow_region);

  // (6) length must not be negative (moved to generate_arraycopy()).
  // generate_negative_guard(length, slow_region);

  // (7) src_offset + length must not exceed length of src.
  generate_limit_guard(src_offset, length,
                       load_array_length(src),
                       slow_region);

  // (8) dest_offset + length must not exceed length of dest.
  generate_limit_guard(dest_offset, length,
                       load_array_length(dest),
                       slow_region);

  // (9) each element of an oop array must be assignable
  // The generate_arraycopy subroutine checks this.

  // This is where the memory effects are placed:
  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
  generate_arraycopy(adr_type, dest_elem,
                     src, src_offset, dest, dest_offset, length,
                     false, false, slow_region);

  return true;
}

//-----------------------------generate_arraycopy----------------------
// Generate an optimized call to arraycopy.
// Caller must guard against non-arrays.
// Caller must determine a common array basic-type for both arrays.
// Caller must validate offsets against array bounds.
// The slow_region has already collected guard failure paths
// (such as out of bounds length or non-conformable array types).
// The generated code has this shape, in general:
//
//     if (length == 0)  return   // via zero_path
//     slowval = -1
//     if (types unknown) {
//       slowval = call generic copy loop
//       if (slowval == 0)  return  // via checked_path
//     } else if (indexes in bounds) {
//       if ((is object array) && !(array type check)) {
//         slowval = call checked copy loop
//         if (slowval == 0)  return  // via checked_path
//       } else {
//         call bulk copy loop
//         return  // via fast_path
//       }
//     }
//     // adjust params for remaining work:
//     if (slowval != -1) {
//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
//     }
//   slow_region:
//     call slow arraycopy(src, src_offset, dest, dest_offset, length)
//     return  // via slow_call_path
//
// This routine is used from several intrinsics:  System.arraycopy,
// Object.clone (the array subcase), and Arrays.copyOf[Range].
//
void
LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
                                   BasicType basic_elem_type,
                                   Node* src,  Node* src_offset,
                                   Node* dest, Node* dest_offset,
                                   Node* copy_length,
                                   bool disjoint_bases,
                                   bool length_never_negative,
                                   RegionNode* slow_region) {

  if (slow_region == NULL) {
    slow_region = new(C) RegionNode(1);
    record_for_igvn(slow_region);
  }

  Node* original_dest      = dest;
  AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
  bool  dest_uninitialized = false;

  // See if this is the initialization of a newly-allocated array.
  // If so, we will take responsibility here for initializing it to zero.
  // (Note:  Because tightly_coupled_allocation performs checks on the
  // out-edges of the dest, we need to avoid making derived pointers
  // from it until we have checked its uses.)
  if (ReduceBulkZeroing
      && !ZeroTLAB              // pointless if already zeroed
      && basic_elem_type != T_CONFLICT // avoid corner case
      && !src->eqv_uncast(dest)
      && ((alloc = tightly_coupled_allocation(dest, slow_region))
          != NULL)
      && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
      && alloc->maybe_set_complete(&_gvn)) {
    // "You break it, you buy it."
    InitializeNode* init = alloc->initialization();
    assert(init->is_complete(), "we just did this");
    init->set_complete_with_arraycopy();
    assert(dest->is_CheckCastPP(), "sanity");
    assert(dest->in(0)->in(0) == init, "dest pinned");
    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
    // From this point on, every exit path is responsible for
    // initializing any non-copied parts of the object to zero.
    // Also, if this flag is set we make sure that arraycopy interacts properly
    // with G1, eliding pre-barriers. See CR 6627983.
    dest_uninitialized = true;
  } else {
    // No zeroing elimination here.
    alloc             = NULL;
    //original_dest   = dest;
    //dest_uninitialized = false;
  }

  // Results are placed here:
  enum { fast_path        = 1,  // normal void-returning assembly stub
         checked_path     = 2,  // special assembly stub with cleanup
         slow_call_path   = 3,  // something went wrong; call the VM
         zero_path        = 4,  // bypass when length of copy is zero
         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
         PATH_LIMIT       = 6
  };
  RegionNode* result_region = new(C) RegionNode(PATH_LIMIT);
  PhiNode*    result_i_o    = new(C) PhiNode(result_region, Type::ABIO);
  PhiNode*    result_memory = new(C) PhiNode(result_region, Type::MEMORY, adr_type);
  record_for_igvn(result_region);
  _gvn.set_type_bottom(result_i_o);
  _gvn.set_type_bottom(result_memory);
  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");

  // The slow_control path:
  Node* slow_control;
  Node* slow_i_o = i_o();
  Node* slow_mem = memory(adr_type);
  debug_only(slow_control = (Node*) badAddress);

  // Checked control path:
  Node* checked_control = top();
  Node* checked_mem     = NULL;
  Node* checked_i_o     = NULL;
  Node* checked_value   = NULL;

  if (basic_elem_type == T_CONFLICT) {
    assert(!dest_uninitialized, "");
    Node* cv = generate_generic_arraycopy(adr_type,
                                          src, src_offset, dest, dest_offset,
                                          copy_length, dest_uninitialized);
    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
    checked_control = control();
    checked_i_o     = i_o();
    checked_mem     = memory(adr_type);
    checked_value   = cv;
    set_control(top());         // no fast path
  }

  Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
  if (not_pos != NULL) {
    PreserveJVMState pjvms(this);
    set_control(not_pos);

    // (6) length must not be negative.
    if (!length_never_negative) {
      generate_negative_guard(copy_length, slow_region);
    }

    // copy_length is 0.
    if (!stopped() && dest_uninitialized) {
      Node* dest_length = alloc->in(AllocateNode::ALength);
      if (copy_length->eqv_uncast(dest_length)
          || _gvn.find_int_con(dest_length, 1) <= 0) {
        // There is no zeroing to do. No need for a secondary raw memory barrier.
      } else {
        // Clear the whole thing since there are no source elements to copy.
        generate_clear_array(adr_type, dest, basic_elem_type,
                             intcon(0), NULL,
                             alloc->in(AllocateNode::AllocSize));
        // Use a secondary InitializeNode as raw memory barrier.
        // Currently it is needed only on this path since other
        // paths have stub or runtime calls as raw memory barriers.
        InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
                                                       Compile::AliasIdxRaw,
                                                       top())->as_Initialize();
        init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
      }
    }

    // Present the results of the fast call.
    result_region->init_req(zero_path, control());
    result_i_o   ->init_req(zero_path, i_o());
    result_memory->init_req(zero_path, memory(adr_type));
  }

  if (!stopped() && dest_uninitialized) {
    // We have to initialize the *uncopied* part of the array to zero.
    // The copy destination is the slice dest[off..off+len].  The other slices
    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
    Node* dest_length = alloc->in(AllocateNode::ALength);
    Node* dest_tail   = _gvn.transform(new(C) AddINode(dest_offset,
                                                          copy_length));

    // If there is a head section that needs zeroing, do it now.
    if (find_int_con(dest_offset, -1) != 0) {
      generate_clear_array(adr_type, dest, basic_elem_type,
                           intcon(0), dest_offset,
                           NULL);
    }

    // Next, perform a dynamic check on the tail length.
    // It is often zero, and we can win big if we prove this.
    // There are two wins:  Avoid generating the ClearArray
    // with its attendant messy index arithmetic, and upgrade
    // the copy to a more hardware-friendly word size of 64 bits.
    Node* tail_ctl = NULL;
    if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
      Node* cmp_lt   = _gvn.transform(new(C) CmpINode(dest_tail, dest_length));
      Node* bol_lt   = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt));
      tail_ctl = generate_slow_guard(bol_lt, NULL);
      assert(tail_ctl != NULL || !stopped(), "must be an outcome");
    }

    // At this point, let's assume there is no tail.
    if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
      // There is no tail.  Try an upgrade to a 64-bit copy.
      bool didit = false;
      { PreserveJVMState pjvms(this);
        didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
                                         src, src_offset, dest, dest_offset,
                                         dest_size, dest_uninitialized);
        if (didit) {
          // Present the results of the block-copying fast call.
          result_region->init_req(bcopy_path, control());
          result_i_o   ->init_req(bcopy_path, i_o());
          result_memory->init_req(bcopy_path, memory(adr_type));
        }
      }
      if (didit)
        set_control(top());     // no regular fast path
    }

    // Clear the tail, if any.
    if (tail_ctl != NULL) {
      Node* notail_ctl = stopped() ? NULL : control();
      set_control(tail_ctl);
      if (notail_ctl == NULL) {
        generate_clear_array(adr_type, dest, basic_elem_type,
                             dest_tail, NULL,
                             dest_size);
      } else {
        // Make a local merge.
        Node* done_ctl = new(C) RegionNode(3);
        Node* done_mem = new(C) PhiNode(done_ctl, Type::MEMORY, adr_type);
        done_ctl->init_req(1, notail_ctl);
        done_mem->init_req(1, memory(adr_type));
        generate_clear_array(adr_type, dest, basic_elem_type,
                             dest_tail, NULL,
                             dest_size);
        done_ctl->init_req(2, control());
        done_mem->init_req(2, memory(adr_type));
        set_control( _gvn.transform(done_ctl));
        set_memory(  _gvn.transform(done_mem), adr_type );
      }
    }
  }

  BasicType copy_type = basic_elem_type;
  assert(basic_elem_type != T_ARRAY, "caller must fix this");
  if (!stopped() && copy_type == T_OBJECT) {
    // If src and dest have compatible element types, we can copy bits.
    // Types S[] and D[] are compatible if D is a supertype of S.
    //
    // If they are not, we will use checked_oop_disjoint_arraycopy,
    // which performs a fast optimistic per-oop check, and backs off
    // further to JVM_ArrayCopy on the first per-oop check that fails.
    // (Actually, we don't move raw bits only; the GC requires card marks.)

    // Get the Klass* for both src and dest
    Node* src_klass  = load_object_klass(src);
    Node* dest_klass = load_object_klass(dest);

    // Generate the subtype check.
    // This might fold up statically, or then again it might not.
    //
    // Non-static example:  Copying List<String>.elements to a new String[].
    // The backing store for a List<String> is always an Object[],
    // but its elements are always type String, if the generic types
    // are correct at the source level.
    //
    // Test S[] against D[], not S against D, because (probably)
    // the secondary supertype cache is less busy for S[] than S.
    // This usually only matters when D is an interface.
    Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
    // Plug failing path into checked_oop_disjoint_arraycopy
    if (not_subtype_ctrl != top()) {
      PreserveJVMState pjvms(this);
      set_control(not_subtype_ctrl);
      // (At this point we can assume disjoint_bases, since types differ.)
      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
      Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
      Node* dest_elem_klass = _gvn.transform(n1);
      Node* cv = generate_checkcast_arraycopy(adr_type,
                                              dest_elem_klass,
                                              src, src_offset, dest, dest_offset,
                                              ConvI2X(copy_length), dest_uninitialized);
      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
      checked_control = control();
      checked_i_o     = i_o();
      checked_mem     = memory(adr_type);
      checked_value   = cv;
    }
    // At this point we know we do not need type checks on oop stores.

    // Let's see if we need card marks:
    if (alloc != NULL && use_ReduceInitialCardMarks()) {
      // If we do not need card marks, copy using the jint or jlong stub.
      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
      assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
             "sizes agree");
    }
  }

  if (!stopped()) {
    // Generate the fast path, if possible.
    PreserveJVMState pjvms(this);
    generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
                                 src, src_offset, dest, dest_offset,
                                 ConvI2X(copy_length), dest_uninitialized);

    // Present the results of the fast call.
    result_region->init_req(fast_path, control());
    result_i_o   ->init_req(fast_path, i_o());
    result_memory->init_req(fast_path, memory(adr_type));
  }

  // Here are all the slow paths up to this point, in one bundle:
  slow_control = top();
  if (slow_region != NULL)
    slow_control = _gvn.transform(slow_region);
  DEBUG_ONLY(slow_region = (RegionNode*)badAddress);

  set_control(checked_control);
  if (!stopped()) {
    // Clean up after the checked call.
    // The returned value is either 0 or -1^K,
    // where K = number of partially transferred array elements.
    Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0)));
    Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
    IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);

    // If it is 0, we are done, so transfer to the end.
    Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff));
    result_region->init_req(checked_path, checks_done);
    result_i_o   ->init_req(checked_path, checked_i_o);
    result_memory->init_req(checked_path, checked_mem);

    // If it is not zero, merge into the slow call.
    set_control( _gvn.transform(new(C) IfFalseNode(iff) ));
    RegionNode* slow_reg2 = new(C) RegionNode(3);
    PhiNode*    slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
    PhiNode*    slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
    record_for_igvn(slow_reg2);
    slow_reg2  ->init_req(1, slow_control);
    slow_i_o2  ->init_req(1, slow_i_o);
    slow_mem2  ->init_req(1, slow_mem);
    slow_reg2  ->init_req(2, control());
    slow_i_o2  ->init_req(2, checked_i_o);
    slow_mem2  ->init_req(2, checked_mem);

    slow_control = _gvn.transform(slow_reg2);
    slow_i_o     = _gvn.transform(slow_i_o2);
    slow_mem     = _gvn.transform(slow_mem2);

    if (alloc != NULL) {
      // We'll restart from the very beginning, after zeroing the whole thing.
      // This can cause double writes, but that's OK since dest is brand new.
      // So we ignore the low 31 bits of the value returned from the stub.
    } else {
      // We must continue the copy exactly where it failed, or else
      // another thread might see the wrong number of writes to dest.
      Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1)));
      Node* slow_offset    = new(C) PhiNode(slow_reg2, TypeInt::INT);
      slow_offset->init_req(1, intcon(0));
      slow_offset->init_req(2, checked_offset);
      slow_offset  = _gvn.transform(slow_offset);

      // Adjust the arguments by the conditionally incoming offset.
      Node* src_off_plus  = _gvn.transform(new(C) AddINode(src_offset,  slow_offset));
      Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset));
      Node* length_minus  = _gvn.transform(new(C) SubINode(copy_length, slow_offset));

      // Tweak the node variables to adjust the code produced below:
      src_offset  = src_off_plus;
      dest_offset = dest_off_plus;
      copy_length = length_minus;
    }
  }

  set_control(slow_control);
  if (!stopped()) {
    // Generate the slow path, if needed.
    PreserveJVMState pjvms(this);   // replace_in_map may trash the map

    set_memory(slow_mem, adr_type);
    set_i_o(slow_i_o);

    if (dest_uninitialized) {
      generate_clear_array(adr_type, dest, basic_elem_type,
                           intcon(0), NULL,
                           alloc->in(AllocateNode::AllocSize));
    }

    generate_slow_arraycopy(adr_type,
                            src, src_offset, dest, dest_offset,
                            copy_length, /*dest_uninitialized*/false);

    result_region->init_req(slow_call_path, control());
    result_i_o   ->init_req(slow_call_path, i_o());
    result_memory->init_req(slow_call_path, memory(adr_type));
  }

  // Remove unused edges.
  for (uint i = 1; i < result_region->req(); i++) {
    if (result_region->in(i) == NULL)
      result_region->init_req(i, top());
  }

  // Finished; return the combined state.
  set_control( _gvn.transform(result_region));
  set_i_o(     _gvn.transform(result_i_o)    );
  set_memory(  _gvn.transform(result_memory), adr_type );

  // The memory edges above are precise in order to model effects around
  // array copies accurately to allow value numbering of field loads around
  // arraycopy.  Such field loads, both before and after, are common in Java
  // collections and similar classes involving header/array data structures.
  //
  // But with low number of register or when some registers are used or killed
  // by arraycopy calls it causes registers spilling on stack. See 6544710.
  // The next memory barrier is added to avoid it. If the arraycopy can be
  // optimized away (which it can, sometimes) then we can manually remove
  // the membar also.
  //
  // Do not let reads from the cloned object float above the arraycopy.
  if (alloc != NULL) {
    // Do not let stores that initialize this object be reordered with
    // a subsequent store that would make this object accessible by
    // other threads.
    // Record what AllocateNode this StoreStore protects so that
    // escape analysis can go from the MemBarStoreStoreNode to the
    // AllocateNode and eliminate the MemBarStoreStoreNode if possible
    // based on the escape status of the AllocateNode.
    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
  } else if (InsertMemBarAfterArraycopy)
    insert_mem_bar(Op_MemBarCPUOrder);
}


// Helper function which determines if an arraycopy immediately follows
// an allocation, with no intervening tests or other escapes for the object.
AllocateArrayNode*
LibraryCallKit::tightly_coupled_allocation(Node* ptr,
                                           RegionNode* slow_region) {
  if (stopped())             return NULL;  // no fast path
  if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around

  AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
  if (alloc == NULL)  return NULL;

  Node* rawmem = memory(Compile::AliasIdxRaw);
  // Is the allocation's memory state untouched?
  if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
    // Bail out if there have been raw-memory effects since the allocation.
    // (Example:  There might have been a call or safepoint.)
    return NULL;
  }
  rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
  if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
    return NULL;
  }

  // There must be no unexpected observers of this allocation.
  for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
    Node* obs = ptr->fast_out(i);
    if (obs != this->map()) {
      return NULL;
    }
  }

  // This arraycopy must unconditionally follow the allocation of the ptr.
  Node* alloc_ctl = ptr->in(0);
  assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");

  Node* ctl = control();
  while (ctl != alloc_ctl) {
    // There may be guards which feed into the slow_region.
    // Any other control flow means that we might not get a chance
    // to finish initializing the allocated object.
    if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
      IfNode* iff = ctl->in(0)->as_If();
      Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
      assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
      if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
        ctl = iff->in(0);       // This test feeds the known slow_region.
        continue;
      }
      // One more try:  Various low-level checks bottom out in
      // uncommon traps.  If the debug-info of the trap omits
      // any reference to the allocation, as we've already
      // observed, then there can be no objection to the trap.
      bool found_trap = false;
      for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
        Node* obs = not_ctl->fast_out(j);
        if (obs->in(0) == not_ctl && obs->is_Call() &&
            (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
          found_trap = true; break;
        }
      }
      if (found_trap) {
        ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
        continue;
      }
    }
    return NULL;
  }

  // If we get this far, we have an allocation which immediately
  // precedes the arraycopy, and we can take over zeroing the new object.
  // The arraycopy will finish the initialization, and provide
  // a new control state to which we will anchor the destination pointer.

  return alloc;
}

// Helper for initialization of arrays, creating a ClearArray.
// It writes zero bits in [start..end), within the body of an array object.
// The memory effects are all chained onto the 'adr_type' alias category.
//
// Since the object is otherwise uninitialized, we are free
// to put a little "slop" around the edges of the cleared area,
// as long as it does not go back into the array's header,
// or beyond the array end within the heap.
//
// The lower edge can be rounded down to the nearest jint and the
// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
//
// Arguments:
//   adr_type           memory slice where writes are generated
//   dest               oop of the destination array
//   basic_elem_type    element type of the destination
//   slice_idx          array index of first element to store
//   slice_len          number of elements to store (or NULL)
//   dest_size          total size in bytes of the array object
//
// Exactly one of slice_len or dest_size must be non-NULL.
// If dest_size is non-NULL, zeroing extends to the end of the object.
// If slice_len is non-NULL, the slice_idx value must be a constant.
void
LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
                                     Node* dest,
                                     BasicType basic_elem_type,
                                     Node* slice_idx,
                                     Node* slice_len,
                                     Node* dest_size) {
  // one or the other but not both of slice_len and dest_size:
  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
  if (slice_len == NULL)  slice_len = top();
  if (dest_size == NULL)  dest_size = top();

  // operate on this memory slice:
  Node* mem = memory(adr_type); // memory slice to operate on

  // scaling and rounding of indexes:
  int scale = exact_log2(type2aelembytes(basic_elem_type));
  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
  int bump_bit  = (-1 << scale) & BytesPerInt;

  // determine constant starts and ends
  const intptr_t BIG_NEG = -128;
  assert(BIG_NEG + 2*abase < 0, "neg enough");
  intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
  intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
  if (slice_len_con == 0) {
    return;                     // nothing to do here
  }
  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
  intptr_t end_con   = find_intptr_t_con(dest_size, -1);
  if (slice_idx_con >= 0 && slice_len_con >= 0) {
    assert(end_con < 0, "not two cons");
    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
                       BytesPerLong);
  }

  if (start_con >= 0 && end_con >= 0) {
    // Constant start and end.  Simple.
    mem = ClearArrayNode::clear_memory(control(), mem, dest,
                                       start_con, end_con, &_gvn);
  } else if (start_con >= 0 && dest_size != top()) {
    // Constant start, pre-rounded end after the tail of the array.
    Node* end = dest_size;
    mem = ClearArrayNode::clear_memory(control(), mem, dest,
                                       start_con, end, &_gvn);
  } else if (start_con >= 0 && slice_len != top()) {
    // Constant start, non-constant end.  End needs rounding up.
    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
    intptr_t end_base  = abase + (slice_idx_con << scale);
    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
    Node*    end       = ConvI2X(slice_len);
    if (scale != 0)
      end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) ));
    end_base += end_round;
    end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base)));
    end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round)));
    mem = ClearArrayNode::clear_memory(control(), mem, dest,
                                       start_con, end, &_gvn);
  } else if (start_con < 0 && dest_size != top()) {
    // Non-constant start, pre-rounded end after the tail of the array.
    // This is almost certainly a "round-to-end" operation.
    Node* start = slice_idx;
    start = ConvI2X(start);
    if (scale != 0)
      start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) ));
    start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase)));
    if ((bump_bit | clear_low) != 0) {
      int to_clear = (bump_bit | clear_low);
      // Align up mod 8, then store a jint zero unconditionally
      // just before the mod-8 boundary.
      if (((abase + bump_bit) & ~to_clear) - bump_bit
          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
        bump_bit = 0;
        assert((abase & to_clear) == 0, "array base must be long-aligned");
      } else {
        // Bump 'start' up to (or past) the next jint boundary:
        start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit)));
        assert((abase & clear_low) == 0, "array base must be int-aligned");
      }
      // Round bumped 'start' down to jlong boundary in body of array.
      start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
      if (bump_bit != 0) {
        // Store a zero to the immediately preceding jint:
        Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
        Node* p1 = basic_plus_adr(dest, x1);
        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
        mem = _gvn.transform(mem);
      }
    }
    Node* end = dest_size; // pre-rounded
    mem = ClearArrayNode::clear_memory(control(), mem, dest,
                                       start, end, &_gvn);
  } else {
    // Non-constant start, unrounded non-constant end.
    // (Nobody zeroes a random midsection of an array using this routine.)
    ShouldNotReachHere();       // fix caller
  }

  // Done.
  set_memory(mem, adr_type);
}


bool
LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
                                         BasicType basic_elem_type,
                                         AllocateNode* alloc,
                                         Node* src,  Node* src_offset,
                                         Node* dest, Node* dest_offset,
                                         Node* dest_size, bool dest_uninitialized) {
  // See if there is an advantage from block transfer.
  int scale = exact_log2(type2aelembytes(basic_elem_type));
  if (scale >= LogBytesPerLong)
    return false;               // it is already a block transfer

  // Look at the alignment of the starting offsets.
  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);

  intptr_t src_off_con  = (intptr_t) find_int_con(src_offset, -1);
  intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
  if (src_off_con < 0 || dest_off_con < 0)
    // At present, we can only understand constants.
    return false;

  intptr_t src_off  = abase + (src_off_con  << scale);
  intptr_t dest_off = abase + (dest_off_con << scale);

  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
    // Non-aligned; too bad.
    // One more chance:  Pick off an initial 32-bit word.
    // This is a common case, since abase can be odd mod 8.
    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
      Node* sptr = basic_plus_adr(src,  src_off);
      Node* dptr = basic_plus_adr(dest, dest_off);
      const TypePtr* s_adr_type = _gvn.type(sptr)->is_ptr();
      assert(s_adr_type->isa_aryptr(), "impossible slice");
      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, s_adr_type, MemNode::unordered);
      store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
      src_off += BytesPerInt;
      dest_off += BytesPerInt;
    } else {
      return false;
    }
  }
  assert(src_off % BytesPerLong == 0, "");
  assert(dest_off % BytesPerLong == 0, "");

  // Do this copy by giant steps.
  Node* sptr  = basic_plus_adr(src,  src_off);
  Node* dptr  = basic_plus_adr(dest, dest_off);
  Node* countx = dest_size;
  countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off)));
  countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong)));

  bool disjoint_bases = true;   // since alloc != NULL
  generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
                               sptr, NULL, dptr, NULL, countx, dest_uninitialized);

  return true;
}


// Helper function; generates code for the slow case.
// We make a call to a runtime method which emulates the native method,
// but without the native wrapper overhead.
void
LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
                                        Node* src,  Node* src_offset,
                                        Node* dest, Node* dest_offset,
                                        Node* copy_length, bool dest_uninitialized) {
  assert(!dest_uninitialized, "Invariant");
  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
                                 OptoRuntime::slow_arraycopy_Type(),
                                 OptoRuntime::slow_arraycopy_Java(),
                                 "slow_arraycopy", adr_type,
                                 src, src_offset, dest, dest_offset,
                                 copy_length);

  // Handle exceptions thrown by this fellow:
  make_slow_call_ex(call, env()->Throwable_klass(), false);
}

// Helper function; generates code for cases requiring runtime checks.
Node*
LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
                                             Node* dest_elem_klass,
                                             Node* src,  Node* src_offset,
                                             Node* dest, Node* dest_offset,
                                             Node* copy_length, bool dest_uninitialized) {
  if (stopped())  return NULL;

  address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
    return NULL;
  }

  // Pick out the parameters required to perform a store-check
  // for the target array.  This is an optimistic check.  It will
  // look in each non-null element's class, at the desired klass's
  // super_check_offset, for the desired klass.
  int sco_offset = in_bytes(Klass::super_check_offset_offset());
  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
  Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
  Node* check_offset = ConvI2X(_gvn.transform(n3));
  Node* check_value  = dest_elem_klass;

  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);

  // (We know the arrays are never conjoint, because their types differ.)
  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
                                 OptoRuntime::checkcast_arraycopy_Type(),
                                 copyfunc_addr, "checkcast_arraycopy", adr_type,
                                 // five arguments, of which two are
                                 // intptr_t (jlong in LP64)
                                 src_start, dest_start,
                                 copy_length XTOP,
                                 check_offset XTOP,
                                 check_value);

  return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
}


// Helper function; generates code for cases requiring runtime checks.
Node*
LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
                                           Node* src,  Node* src_offset,
                                           Node* dest, Node* dest_offset,
                                           Node* copy_length, bool dest_uninitialized) {
  assert(!dest_uninitialized, "Invariant");
  if (stopped())  return NULL;
  address copyfunc_addr = StubRoutines::generic_arraycopy();
  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
    return NULL;
  }

  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
                    OptoRuntime::generic_arraycopy_Type(),
                    copyfunc_addr, "generic_arraycopy", adr_type,
                    src, src_offset, dest, dest_offset, copy_length);

  return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
}

// Helper function; generates the fast out-of-line call to an arraycopy stub.
void
LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
                                             BasicType basic_elem_type,
                                             bool disjoint_bases,
                                             Node* src,  Node* src_offset,
                                             Node* dest, Node* dest_offset,
                                             Node* copy_length, bool dest_uninitialized) {
  if (stopped())  return;               // nothing to do

  Node* src_start  = src;
  Node* dest_start = dest;
  if (src_offset != NULL || dest_offset != NULL) {
    assert(src_offset != NULL && dest_offset != NULL, "");
    src_start  = array_element_address(src,  src_offset,  basic_elem_type);
    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
  }

  // Figure out which arraycopy runtime method to call.
  const char* copyfunc_name = "arraycopy";
  address     copyfunc_addr =
      basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
                          disjoint_bases, copyfunc_name, dest_uninitialized);

  // Call it.  Note that the count_ix value is not scaled to a byte-size.
  make_runtime_call(RC_LEAF|RC_NO_FP,
                    OptoRuntime::fast_arraycopy_Type(),
                    copyfunc_addr, copyfunc_name, adr_type,
                    src_start, dest_start, copy_length XTOP);
}

//-------------inline_encodeISOArray-----------------------------------
// encode char[] to byte[] in ISO_8859_1
bool LibraryCallKit::inline_encodeISOArray() {
  assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
  // no receiver since it is static method
  Node *src         = argument(0);
  Node *src_offset  = argument(1);
  Node *dst         = argument(2);
  Node *dst_offset  = argument(3);
  Node *length      = argument(4);

  const Type* src_type = src->Value(&_gvn);
  const Type* dst_type = dst->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  const TypeAryPtr* top_dest = dst_type->isa_aryptr();
  if (top_src  == NULL || top_src->klass()  == NULL ||
      top_dest == NULL || top_dest->klass() == NULL) {
    // failed array check
    return false;
  }

  // Figure out the size and type of the elements we will be copying.
  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (src_elem != T_CHAR || dst_elem != T_BYTE) {
    return false;
  }
  Node* src_start = array_element_address(src, src_offset, src_elem);
  Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
  // 'src_start' points to src array + scaled offset
  // 'dst_start' points to dst array + scaled offset

  const TypeAryPtr* mtype = TypeAryPtr::BYTES;
  Node* enc = new (C) EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
  enc = _gvn.transform(enc);
  Node* res_mem = _gvn.transform(new (C) SCMemProjNode(enc));
  set_memory(res_mem, mtype);
  set_result(enc);
  return true;
}

//-------------inline_multiplyToLen-----------------------------------
bool LibraryCallKit::inline_multiplyToLen() {
  assert(UseMultiplyToLenIntrinsic, "not implementated on this platform");

  address stubAddr = StubRoutines::multiplyToLen();
  if (stubAddr == NULL) {
    return false; // Intrinsic's stub is not implemented on this platform
  }
  const char* stubName = "multiplyToLen";

  assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");

  // no receiver because it is a static method
  Node* x    = argument(0);
  Node* xlen = argument(1);
  Node* y    = argument(2);
  Node* ylen = argument(3);
  Node* z    = argument(4);

  const Type* x_type = x->Value(&_gvn);
  const Type* y_type = y->Value(&_gvn);
  const TypeAryPtr* top_x = x_type->isa_aryptr();
  const TypeAryPtr* top_y = y_type->isa_aryptr();
  if (top_x  == NULL || top_x->klass()  == NULL ||
      top_y == NULL || top_y->klass() == NULL) {
    // failed array check
    return false;
  }

  BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (x_elem != T_INT || y_elem != T_INT) {
    return false;
  }

  // Set the original stack and the reexecute bit for the interpreter to reexecute
  // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
  // on the return from z array allocation in runtime.
  { PreserveReexecuteState preexecs(this);
    jvms()->set_should_reexecute(true);

    Node* x_start = array_element_address(x, intcon(0), x_elem);
    Node* y_start = array_element_address(y, intcon(0), y_elem);
    // 'x_start' points to x array + scaled xlen
    // 'y_start' points to y array + scaled ylen

    // Allocate the result array
    Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen));
    ciKlass* klass = ciTypeArrayKlass::make(T_INT);
    Node* klass_node = makecon(TypeKlassPtr::make(klass));

    IdealKit ideal(this);

#define __ ideal.
     Node* one = __ ConI(1);
     Node* zero = __ ConI(0);
     IdealVariable need_alloc(ideal), z_alloc(ideal);  __ declarations_done();
     __ set(need_alloc, zero);
     __ set(z_alloc, z);
     __ if_then(z, BoolTest::eq, null()); {
       __ increment (need_alloc, one);
     } __ else_(); {
       // Update graphKit memory and control from IdealKit.
       sync_kit(ideal);
       Node* zlen_arg = load_array_length(z);
       // Update IdealKit memory and control from graphKit.
       __ sync_kit(this);
       __ if_then(zlen_arg, BoolTest::lt, zlen); {
         __ increment (need_alloc, one);
       } __ end_if();
     } __ end_if();

     __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
       // Update graphKit memory and control from IdealKit.
       sync_kit(ideal);
       Node * narr = new_array(klass_node, zlen, 1);
       // Update IdealKit memory and control from graphKit.
       __ sync_kit(this);
       __ set(z_alloc, narr);
     } __ end_if();

     sync_kit(ideal);
     z = __ value(z_alloc);
     // Can't use TypeAryPtr::INTS which uses Bottom offset.
     _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
     // Final sync IdealKit and GraphKit.
     final_sync(ideal);
#undef __

    Node* z_start = array_element_address(z, intcon(0), T_INT);

    Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
                                   OptoRuntime::multiplyToLen_Type(),
                                   stubAddr, stubName, TypePtr::BOTTOM,
                                   x_start, xlen, y_start, ylen, z_start, zlen);
  } // original reexecute is set back here

  C->set_has_split_ifs(true); // Has chance for split-if optimization
  set_result(z);
  return true;
}

//-------------inline_squareToLen------------------------------------
bool LibraryCallKit::inline_squareToLen() {
  assert(UseSquareToLenIntrinsic, "not implementated on this platform");

  address stubAddr = StubRoutines::squareToLen();
  if (stubAddr == NULL) {
    return false; // Intrinsic's stub is not implemented on this platform
  }
  const char* stubName = "squareToLen";

  assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");

  Node* x    = argument(0);
  Node* len  = argument(1);
  Node* z    = argument(2);
  Node* zlen = argument(3);

  const Type* x_type = x->Value(&_gvn);
  const Type* z_type = z->Value(&_gvn);
  const TypeAryPtr* top_x = x_type->isa_aryptr();
  const TypeAryPtr* top_z = z_type->isa_aryptr();
  if (top_x  == NULL || top_x->klass()  == NULL ||
      top_z  == NULL || top_z->klass()  == NULL) {
    // failed array check
    return false;
  }

  BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (x_elem != T_INT || z_elem != T_INT) {
    return false;
  }


  Node* x_start = array_element_address(x, intcon(0), x_elem);
  Node* z_start = array_element_address(z, intcon(0), z_elem);

  Node*  call = make_runtime_call(RC_LEAF|RC_NO_FP,
                                  OptoRuntime::squareToLen_Type(),
                                  stubAddr, stubName, TypePtr::BOTTOM,
                                  x_start, len, z_start, zlen);

  set_result(z);
  return true;
}

//-------------inline_mulAdd------------------------------------------
bool LibraryCallKit::inline_mulAdd() {
  assert(UseMulAddIntrinsic, "not implementated on this platform");

  address stubAddr = StubRoutines::mulAdd();
  if (stubAddr == NULL) {
    return false; // Intrinsic's stub is not implemented on this platform
  }
  const char* stubName = "mulAdd";

  assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");

  Node* out      = argument(0);
  Node* in       = argument(1);
  Node* offset   = argument(2);
  Node* len      = argument(3);
  Node* k        = argument(4);

  const Type* out_type = out->Value(&_gvn);
  const Type* in_type = in->Value(&_gvn);
  const TypeAryPtr* top_out = out_type->isa_aryptr();
  const TypeAryPtr* top_in = in_type->isa_aryptr();
  if (top_out  == NULL || top_out->klass()  == NULL ||
      top_in == NULL || top_in->klass() == NULL) {
    // failed array check
    return false;
  }

  BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (out_elem != T_INT || in_elem != T_INT) {
    return false;
  }

  Node* outlen = load_array_length(out);
  Node* new_offset = _gvn.transform(new (C) SubINode(outlen, offset));
  Node* out_start = array_element_address(out, intcon(0), out_elem);
  Node* in_start = array_element_address(in, intcon(0), in_elem);

  Node*  call = make_runtime_call(RC_LEAF|RC_NO_FP,
                                  OptoRuntime::mulAdd_Type(),
                                  stubAddr, stubName, TypePtr::BOTTOM,
                                  out_start,in_start, new_offset, len, k);
  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
  set_result(result);
  return true;
}

//-------------inline_montgomeryMultiply-----------------------------------
bool LibraryCallKit::inline_montgomeryMultiply() {
  address stubAddr = StubRoutines::montgomeryMultiply();
  if (stubAddr == NULL) {
    return false; // Intrinsic's stub is not implemented on this platform
  }

  assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
  const char* stubName = "montgomery_multiply";

  assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");

  Node* a    = argument(0);
  Node* b    = argument(1);
  Node* n    = argument(2);
  Node* len  = argument(3);
  Node* inv  = argument(4);
  Node* m    = argument(6);

  const Type* a_type = a->Value(&_gvn);
  const TypeAryPtr* top_a = a_type->isa_aryptr();
  const Type* b_type = b->Value(&_gvn);
  const TypeAryPtr* top_b = b_type->isa_aryptr();
  const Type* n_type = a->Value(&_gvn);
  const TypeAryPtr* top_n = n_type->isa_aryptr();
  const Type* m_type = a->Value(&_gvn);
  const TypeAryPtr* top_m = m_type->isa_aryptr();
  if (top_a  == NULL || top_a->klass()  == NULL ||
      top_b == NULL || top_b->klass()  == NULL ||
      top_n == NULL || top_n->klass()  == NULL ||
      top_m == NULL || top_m->klass()  == NULL) {
    // failed array check
    return false;
  }

  BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
    return false;
  }

  // Make the call
  {
    Node* a_start = array_element_address(a, intcon(0), a_elem);
    Node* b_start = array_element_address(b, intcon(0), b_elem);
    Node* n_start = array_element_address(n, intcon(0), n_elem);
    Node* m_start = array_element_address(m, intcon(0), m_elem);

    Node* call = NULL;
    if (CCallingConventionRequiresIntsAsLongs) {
      Node* len_I2L = ConvI2L(len);
      call = make_runtime_call(RC_LEAF,
                               OptoRuntime::montgomeryMultiply_Type(),
                               stubAddr, stubName, TypePtr::BOTTOM,
                               a_start, b_start, n_start, len_I2L XTOP, inv,
                               top(), m_start);
    } else {
      call = make_runtime_call(RC_LEAF,
                               OptoRuntime::montgomeryMultiply_Type(),
                               stubAddr, stubName, TypePtr::BOTTOM,
                               a_start, b_start, n_start, len, inv, top(),
                               m_start);
    }
    set_result(m);
  }

  return true;
}

bool LibraryCallKit::inline_montgomerySquare() {
  address stubAddr = StubRoutines::montgomerySquare();
  if (stubAddr == NULL) {
    return false; // Intrinsic's stub is not implemented on this platform
  }

  assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
  const char* stubName = "montgomery_square";

  assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");

  Node* a    = argument(0);
  Node* n    = argument(1);
  Node* len  = argument(2);
  Node* inv  = argument(3);
  Node* m    = argument(5);

  const Type* a_type = a->Value(&_gvn);
  const TypeAryPtr* top_a = a_type->isa_aryptr();
  const Type* n_type = a->Value(&_gvn);
  const TypeAryPtr* top_n = n_type->isa_aryptr();
  const Type* m_type = a->Value(&_gvn);
  const TypeAryPtr* top_m = m_type->isa_aryptr();
  if (top_a  == NULL || top_a->klass()  == NULL ||
      top_n == NULL || top_n->klass()  == NULL ||
      top_m == NULL || top_m->klass()  == NULL) {
    // failed array check
    return false;
  }

  BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
    return false;
  }

  // Make the call
  {
    Node* a_start = array_element_address(a, intcon(0), a_elem);
    Node* n_start = array_element_address(n, intcon(0), n_elem);
    Node* m_start = array_element_address(m, intcon(0), m_elem);

    Node* call = NULL;
    if (CCallingConventionRequiresIntsAsLongs) {
      Node* len_I2L = ConvI2L(len);
      call = make_runtime_call(RC_LEAF,
                               OptoRuntime::montgomerySquare_Type(),
                               stubAddr, stubName, TypePtr::BOTTOM,
                               a_start, n_start, len_I2L XTOP, inv, top(),
                               m_start);
    } else {
      call = make_runtime_call(RC_LEAF,
                               OptoRuntime::montgomerySquare_Type(),
                               stubAddr, stubName, TypePtr::BOTTOM,
                               a_start, n_start, len, inv, top(),
                               m_start);
    }

    set_result(m);
  }

  return true;
}


/**
 * Calculate CRC32 for byte.
 * int java.util.zip.CRC32.update(int crc, int b)
 */
bool LibraryCallKit::inline_updateCRC32() {
  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
  assert(callee()->signature()->size() == 2, "update has 2 parameters");
  // no receiver since it is static method
  Node* crc  = argument(0); // type: int
  Node* b    = argument(1); // type: int

  /*
   *    int c = ~ crc;
   *    b = timesXtoThe32[(b ^ c) & 0xFF];
   *    b = b ^ (c >>> 8);
   *    crc = ~b;
   */

  Node* M1 = intcon(-1);
  crc = _gvn.transform(new (C) XorINode(crc, M1));
  Node* result = _gvn.transform(new (C) XorINode(crc, b));
  result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));

  Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
  Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
  Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
  result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);

  crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
  result = _gvn.transform(new (C) XorINode(crc, result));
  result = _gvn.transform(new (C) XorINode(result, M1));
  set_result(result);
  return true;
}

/**
 * Calculate CRC32 for byte[] array.
 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
 */
bool LibraryCallKit::inline_updateBytesCRC32() {
  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
  assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
  // no receiver since it is static method
  Node* crc     = argument(0); // type: int
  Node* src     = argument(1); // type: oop
  Node* offset  = argument(2); // type: int
  Node* length  = argument(3); // type: int

  const Type* src_type = src->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  if (top_src  == NULL || top_src->klass()  == NULL) {
    // failed array check
    return false;
  }

  // Figure out the size and type of the elements we will be copying.
  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (src_elem != T_BYTE) {
    return false;
  }

  // 'src_start' points to src array + scaled offset
  Node* src_start = array_element_address(src, offset, src_elem);

  // We assume that range check is done by caller.
  // TODO: generate range check (offset+length < src.length) in debug VM.

  // Call the stub.
  address stubAddr = StubRoutines::updateBytesCRC32();
  const char *stubName = "updateBytesCRC32";
  Node* call;
  if (CCallingConventionRequiresIntsAsLongs) {
   call =  make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
                             stubAddr, stubName, TypePtr::BOTTOM,
                             crc XTOP, src_start, length XTOP);
  } else {
    call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
                             stubAddr, stubName, TypePtr::BOTTOM,
                             crc, src_start, length);
  }
  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
  set_result(result);
  return true;
}

/**
 * Calculate CRC32 for ByteBuffer.
 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 */
bool LibraryCallKit::inline_updateByteBufferCRC32() {
  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
  assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
  // no receiver since it is static method
  Node* crc     = argument(0); // type: int
  Node* src     = argument(1); // type: long
  Node* offset  = argument(3); // type: int
  Node* length  = argument(4); // type: int

  src = ConvL2X(src);  // adjust Java long to machine word
  Node* base = _gvn.transform(new (C) CastX2PNode(src));
  offset = ConvI2X(offset);

  // 'src_start' points to src array + scaled offset
  Node* src_start = basic_plus_adr(top(), base, offset);

  // Call the stub.
  address stubAddr = StubRoutines::updateBytesCRC32();
  const char *stubName = "updateBytesCRC32";
  Node* call;
  if (CCallingConventionRequiresIntsAsLongs) {
    call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
                      stubAddr, stubName, TypePtr::BOTTOM,
                      crc XTOP, src_start, length XTOP);
  } else {
    call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
                             stubAddr, stubName, TypePtr::BOTTOM,
                             crc, src_start, length);
  }
  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
  set_result(result);
  return true;
}

//----------------------------inline_reference_get----------------------------
// public T java.lang.ref.Reference.get();
bool LibraryCallKit::inline_reference_get() {
  const int referent_offset = java_lang_ref_Reference::referent_offset;
  guarantee(referent_offset > 0, "should have already been set");

  // Get the argument:
  Node* reference_obj = null_check_receiver();
  if (stopped()) return true;

  Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);

  ciInstanceKlass* klass = env()->Object_klass();
  const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);

  Node* no_ctrl = NULL;
  Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);

  // Use the pre-barrier to record the value in the referent field
  pre_barrier(false /* do_load */,
              control(),
              NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
              result /* pre_val */,
              T_OBJECT);

  // Add memory barrier to prevent commoning reads from this field
  // across safepoint since GC can change its value.
  insert_mem_bar(Op_MemBarCPUOrder);

  set_result(result);
  return true;
}


Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
                                              bool is_exact=true, bool is_static=false) {

  const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
  assert(tinst != NULL, "obj is null");
  assert(tinst->klass()->is_loaded(), "obj is not loaded");
  assert(!is_exact || tinst->klass_is_exact(), "klass not exact");

  ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
                                                                          ciSymbol::make(fieldTypeString),
                                                                          is_static);
  if (field == NULL) return (Node *) NULL;
  assert (field != NULL, "undefined field");

  // Next code  copied from Parse::do_get_xxx():

  // Compute address and memory type.
  int offset  = field->offset_in_bytes();
  bool is_vol = field->is_volatile();
  ciType* field_klass = field->type();
  assert(field_klass->is_loaded(), "should be loaded");
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node *adr = basic_plus_adr(fromObj, fromObj, offset);
  BasicType bt = field->layout_type();

  // Build the resultant type of the load
  const Type *type;
  if (bt == T_OBJECT) {
    type = TypeOopPtr::make_from_klass(field_klass->as_klass());
  } else {
    type = Type::get_const_basic_type(bt);
  }

  Node* leading_membar = NULL;
  if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
    leading_membar = insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
  }
  // Build the load.
  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
  // If reference is volatile, prevent following memory ops from
  // floating up past the volatile read.  Also prevents commoning
  // another volatile read.
  if (is_vol) {
    // Memory barrier includes bogus read of value to force load BEFORE membar
    Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField);
    mb->as_MemBar()->set_trailing_load();
  }
  return loadedField;
}


//------------------------------inline_aescrypt_Block-----------------------
bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
  address stubAddr = NULL;
  const char *stubName;
  assert(UseAES, "need AES instruction support");

  switch(id) {
  case vmIntrinsics::_aescrypt_encryptBlock:
    stubAddr = StubRoutines::aescrypt_encryptBlock();
    stubName = "aescrypt_encryptBlock";
    break;
  case vmIntrinsics::_aescrypt_decryptBlock:
    stubAddr = StubRoutines::aescrypt_decryptBlock();
    stubName = "aescrypt_decryptBlock";
    break;
  }
  if (stubAddr == NULL) return false;

  Node* aescrypt_object = argument(0);
  Node* src             = argument(1);
  Node* src_offset      = argument(2);
  Node* dest            = argument(3);
  Node* dest_offset     = argument(4);

  // (1) src and dest are arrays.
  const Type* src_type = src->Value(&_gvn);
  const Type* dest_type = dest->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
  assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");

  // for the quick and dirty code we will skip all the checks.
  // we are just trying to get the call to be generated.
  Node* src_start  = src;
  Node* dest_start = dest;
  if (src_offset != NULL || dest_offset != NULL) {
    assert(src_offset != NULL && dest_offset != NULL, "");
    src_start  = array_element_address(src,  src_offset,  T_BYTE);
    dest_start = array_element_address(dest, dest_offset, T_BYTE);
  }

  // now need to get the start of its expanded key array
  // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
  Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
  if (k_start == NULL) return false;

  if (Matcher::pass_original_key_for_aes()) {
    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
    // compatibility issues between Java key expansion and SPARC crypto instructions
    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
    if (original_k_start == NULL) return false;

    // Call the stub.
    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
                      stubAddr, stubName, TypePtr::BOTTOM,
                      src_start, dest_start, k_start, original_k_start);
  } else {
    // Call the stub.
    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
                      stubAddr, stubName, TypePtr::BOTTOM,
                      src_start, dest_start, k_start);
  }

  return true;
}

//------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
  address stubAddr = NULL;
  const char *stubName = NULL;

  assert(UseAES, "need AES instruction support");

  switch(id) {
  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
    stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
    stubName = "cipherBlockChaining_encryptAESCrypt";
    break;
  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
    stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
    stubName = "cipherBlockChaining_decryptAESCrypt";
    break;
  }
  if (stubAddr == NULL) return false;

  Node* cipherBlockChaining_object = argument(0);
  Node* src                        = argument(1);
  Node* src_offset                 = argument(2);
  Node* len                        = argument(3);
  Node* dest                       = argument(4);
  Node* dest_offset                = argument(5);

  // (1) src and dest are arrays.
  const Type* src_type = src->Value(&_gvn);
  const Type* dest_type = dest->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
  assert (top_src  != NULL && top_src->klass()  != NULL
          &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");

  // checks are the responsibility of the caller
  Node* src_start  = src;
  Node* dest_start = dest;
  if (src_offset != NULL || dest_offset != NULL) {
    assert(src_offset != NULL && dest_offset != NULL, "");
    src_start  = array_element_address(src,  src_offset,  T_BYTE);
    dest_start = array_element_address(dest, dest_offset, T_BYTE);
  }

  // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
  // (because of the predicated logic executed earlier).
  // so we cast it here safely.
  // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java

  Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
  if (embeddedCipherObj == NULL) return false;

  // cast it to what we know it will be at runtime
  const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
  assert(tinst != NULL, "CBC obj is null");
  assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
  ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
  assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");

  ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
  const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
  const TypeOopPtr* xtype = aklass->as_instance_type();
  Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
  aescrypt_object = _gvn.transform(aescrypt_object);

  // we need to get the start of the aescrypt_object's expanded key array
  Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
  if (k_start == NULL) return false;

  // similarly, get the start address of the r vector
  Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
  if (objRvec == NULL) return false;
  Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);

  Node* cbcCrypt;
  if (Matcher::pass_original_key_for_aes()) {
    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
    // compatibility issues between Java key expansion and SPARC crypto instructions
    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
    if (original_k_start == NULL) return false;

    // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
                                 stubAddr, stubName, TypePtr::BOTTOM,
                                 src_start, dest_start, k_start, r_start, len, original_k_start);
  } else {
    // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
                                 stubAddr, stubName, TypePtr::BOTTOM,
                                 src_start, dest_start, k_start, r_start, len);
  }

  // return cipher length (int)
  Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
  set_result(retvalue);
  return true;
}

//------------------------------get_key_start_from_aescrypt_object-----------------------
Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
#ifdef PPC64
  // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
  // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
  // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
  // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
  Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
  assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
  if (objSessionK == NULL) {
    return (Node *) NULL;
  }
  Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
#else
  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
#endif // PPC64
  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
  if (objAESCryptKey == NULL) return (Node *) NULL;

  // now have the array, need to get the start address of the K array
  Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
  return k_start;
}

//------------------------------get_original_key_start_from_aescrypt_object-----------------------
Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
  if (objAESCryptKey == NULL) return (Node *) NULL;

  // now have the array, need to get the start address of the lastKey array
  Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
  return original_k_start;
}

//----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
// Return node representing slow path of predicate check.
// the pseudo code we want to emulate with this predicate is:
// for encryption:
//    if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
// for decryption:
//    if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
//    note cipher==plain is more conservative than the original java code but that's OK
//
Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
  // The receiver was checked for NULL already.
  Node* objCBC = argument(0);

  // Load embeddedCipher field of CipherBlockChaining object.
  Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);

  // get AESCrypt klass for instanceOf check
  // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
  // will have same classloader as CipherBlockChaining object
  const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
  assert(tinst != NULL, "CBCobj is null");
  assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");

  // we want to do an instanceof comparison against the AESCrypt class
  ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
  if (!klass_AESCrypt->is_loaded()) {
    // if AESCrypt is not even loaded, we never take the intrinsic fast path
    Node* ctrl = control();
    set_control(top()); // no regular fast path
    return ctrl;
  }
  ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();

  Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
  Node* cmp_instof  = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
  Node* bool_instof  = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));

  Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);

  // for encryption, we are done
  if (!decrypting)
    return instof_false;  // even if it is NULL

  // for decryption, we need to add a further check to avoid
  // taking the intrinsic path when cipher and plain are the same
  // see the original java code for why.
  RegionNode* region = new(C) RegionNode(3);
  region->init_req(1, instof_false);
  Node* src = argument(1);
  Node* dest = argument(4);
  Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
  Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
  Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
  region->init_req(2, src_dest_conjoint);

  record_for_igvn(region);
  return _gvn.transform(region);
}

//------------------------------inline_ghash_processBlocks
bool LibraryCallKit::inline_ghash_processBlocks() {
  address stubAddr;
  const char *stubName;
  assert(UseGHASHIntrinsics, "need GHASH intrinsics support");

  stubAddr = StubRoutines::ghash_processBlocks();
  stubName = "ghash_processBlocks";

  Node* data           = argument(0);
  Node* offset         = argument(1);
  Node* len            = argument(2);
  Node* state          = argument(3);
  Node* subkeyH        = argument(4);

  Node* state_start  = array_element_address(state, intcon(0), T_LONG);
  assert(state_start, "state is NULL");
  Node* subkeyH_start  = array_element_address(subkeyH, intcon(0), T_LONG);
  assert(subkeyH_start, "subkeyH is NULL");
  Node* data_start  = array_element_address(data, offset, T_BYTE);
  assert(data_start, "data is NULL");

  Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
                                  OptoRuntime::ghash_processBlocks_Type(),
                                  stubAddr, stubName, TypePtr::BOTTOM,
                                  state_start, subkeyH_start, data_start, len);
  return true;
}

//------------------------------inline_sha_implCompress-----------------------
//
// Calculate SHA (i.e., SHA-1) for single-block byte[] array.
// void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
//
// Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
// void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
//
// Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
// void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
//
bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
  assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");

  Node* sha_obj = argument(0);
  Node* src     = argument(1); // type oop
  Node* ofs     = argument(2); // type int

  const Type* src_type = src->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  if (top_src  == NULL || top_src->klass()  == NULL) {
    // failed array check
    return false;
  }
  // Figure out the size and type of the elements we will be copying.
  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (src_elem != T_BYTE) {
    return false;
  }
  // 'src_start' points to src array + offset
  Node* src_start = array_element_address(src, ofs, src_elem);
  Node* state = NULL;
  address stubAddr;
  const char *stubName;

  switch(id) {
  case vmIntrinsics::_sha_implCompress:
    assert(UseSHA1Intrinsics, "need SHA1 instruction support");
    state = get_state_from_sha_object(sha_obj);
    stubAddr = StubRoutines::sha1_implCompress();
    stubName = "sha1_implCompress";
    break;
  case vmIntrinsics::_sha2_implCompress:
    assert(UseSHA256Intrinsics, "need SHA256 instruction support");
    state = get_state_from_sha_object(sha_obj);
    stubAddr = StubRoutines::sha256_implCompress();
    stubName = "sha256_implCompress";
    break;
  case vmIntrinsics::_sha5_implCompress:
    assert(UseSHA512Intrinsics, "need SHA512 instruction support");
    state = get_state_from_sha5_object(sha_obj);
    stubAddr = StubRoutines::sha512_implCompress();
    stubName = "sha512_implCompress";
    break;
  default:
    fatal_unexpected_iid(id);
    return false;
  }
  if (state == NULL) return false;

  // Call the stub.
  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
                                 stubAddr, stubName, TypePtr::BOTTOM,
                                 src_start, state);

  return true;
}

//------------------------------inline_digestBase_implCompressMB-----------------------
//
// Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
//
bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
  assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
         "need SHA1/SHA256/SHA512 instruction support");
  assert((uint)predicate < 3, "sanity");
  assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");

  Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
  Node* src            = argument(1); // byte[] array
  Node* ofs            = argument(2); // type int
  Node* limit          = argument(3); // type int

  const Type* src_type = src->Value(&_gvn);
  const TypeAryPtr* top_src = src_type->isa_aryptr();
  if (top_src  == NULL || top_src->klass()  == NULL) {
    // failed array check
    return false;
  }
  // Figure out the size and type of the elements we will be copying.
  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
  if (src_elem != T_BYTE) {
    return false;
  }
  // 'src_start' points to src array + offset
  Node* src_start = array_element_address(src, ofs, src_elem);

  const char* klass_SHA_name = NULL;
  const char* stub_name = NULL;
  address     stub_addr = NULL;
  bool        long_state = false;

  switch (predicate) {
  case 0:
    if (UseSHA1Intrinsics) {
      klass_SHA_name = "sun/security/provider/SHA";
      stub_name = "sha1_implCompressMB";
      stub_addr = StubRoutines::sha1_implCompressMB();
    }
    break;
  case 1:
    if (UseSHA256Intrinsics) {
      klass_SHA_name = "sun/security/provider/SHA2";
      stub_name = "sha256_implCompressMB";
      stub_addr = StubRoutines::sha256_implCompressMB();
    }
    break;
  case 2:
    if (UseSHA512Intrinsics) {
      klass_SHA_name = "sun/security/provider/SHA5";
      stub_name = "sha512_implCompressMB";
      stub_addr = StubRoutines::sha512_implCompressMB();
      long_state = true;
    }
    break;
  default:
    fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
  }
  if (klass_SHA_name != NULL) {
    // get DigestBase klass to lookup for SHA klass
    const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
    assert(tinst != NULL, "digestBase_obj is not instance???");
    assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");

    ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
    assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
    ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
    return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
  }
  return false;
}
//------------------------------inline_sha_implCompressMB-----------------------
bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
                                               bool long_state, address stubAddr, const char *stubName,
                                               Node* src_start, Node* ofs, Node* limit) {
  const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
  const TypeOopPtr* xtype = aklass->as_instance_type();
  Node* sha_obj = new (C) CheckCastPPNode(control(), digestBase_obj, xtype);
  sha_obj = _gvn.transform(sha_obj);

  Node* state;
  if (long_state) {
    state = get_state_from_sha5_object(sha_obj);
  } else {
    state = get_state_from_sha_object(sha_obj);
  }
  if (state == NULL) return false;

  // Call the stub.
  Node *call;
  if (CCallingConventionRequiresIntsAsLongs) {
    call = make_runtime_call(RC_LEAF|RC_NO_FP,
                             OptoRuntime::digestBase_implCompressMB_Type(),
                             stubAddr, stubName, TypePtr::BOTTOM,
                             src_start, state, ofs XTOP, limit XTOP);
  } else {
    call = make_runtime_call(RC_LEAF|RC_NO_FP,
                             OptoRuntime::digestBase_implCompressMB_Type(),
                             stubAddr, stubName, TypePtr::BOTTOM,
                             src_start, state, ofs, limit);
  }
  // return ofs (int)
  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
  set_result(result);

  return true;
}

//------------------------------get_state_from_sha_object-----------------------
Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
  Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
  assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
  if (sha_state == NULL) return (Node *) NULL;

  // now have the array, need to get the start address of the state array
  Node* state = array_element_address(sha_state, intcon(0), T_INT);
  return state;
}

//------------------------------get_state_from_sha5_object-----------------------
Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
  Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
  assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
  if (sha_state == NULL) return (Node *) NULL;

  // now have the array, need to get the start address of the state array
  Node* state = array_element_address(sha_state, intcon(0), T_LONG);
  return state;
}

//----------------------------inline_digestBase_implCompressMB_predicate----------------------------
// Return node representing slow path of predicate check.
// the pseudo code we want to emulate with this predicate is:
//    if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
//
Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
  assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
         "need SHA1/SHA256/SHA512 instruction support");
  assert((uint)predicate < 3, "sanity");

  // The receiver was checked for NULL already.
  Node* digestBaseObj = argument(0);

  // get DigestBase klass for instanceOf check
  const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
  assert(tinst != NULL, "digestBaseObj is null");
  assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");

  const char* klass_SHA_name = NULL;
  switch (predicate) {
  case 0:
    if (UseSHA1Intrinsics) {
      // we want to do an instanceof comparison against the SHA class
      klass_SHA_name = "sun/security/provider/SHA";
    }
    break;
  case 1:
    if (UseSHA256Intrinsics) {
      // we want to do an instanceof comparison against the SHA2 class
      klass_SHA_name = "sun/security/provider/SHA2";
    }
    break;
  case 2:
    if (UseSHA512Intrinsics) {
      // we want to do an instanceof comparison against the SHA5 class
      klass_SHA_name = "sun/security/provider/SHA5";
    }
    break;
  default:
    fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
  }

  ciKlass* klass_SHA = NULL;
  if (klass_SHA_name != NULL) {
    klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
  }
  if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
    // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
    Node* ctrl = control();
    set_control(top()); // no intrinsic path
    return ctrl;
  }
  ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();

  Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
  Node* cmp_instof = _gvn.transform(new (C) CmpINode(instofSHA, intcon(1)));
  Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
  Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);

  return instof_false;  // even if it is NULL
}

bool LibraryCallKit::inline_profileBoolean() {
  Node* counts = argument(1);
  const TypeAryPtr* ary = NULL;
  ciArray* aobj = NULL;
  if (counts->is_Con()
      && (ary = counts->bottom_type()->isa_aryptr()) != NULL
      && (aobj = ary->const_oop()->as_array()) != NULL
      && (aobj->length() == 2)) {
    // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
    jint false_cnt = aobj->element_value(0).as_int();
    jint  true_cnt = aobj->element_value(1).as_int();

    if (C->log() != NULL) {
      C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
                     false_cnt, true_cnt);
    }

    if (false_cnt + true_cnt == 0) {
      // According to profile, never executed.
      uncommon_trap_exact(Deoptimization::Reason_intrinsic,
                          Deoptimization::Action_reinterpret);
      return true;
    }

    // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
    // is a number of each value occurrences.
    Node* result = argument(0);
    if (false_cnt == 0 || true_cnt == 0) {
      // According to profile, one value has been never seen.
      int expected_val = (false_cnt == 0) ? 1 : 0;

      Node* cmp  = _gvn.transform(new (C) CmpINode(result, intcon(expected_val)));
      Node* test = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));

      IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
      Node* fast_path = _gvn.transform(new (C) IfTrueNode(check));
      Node* slow_path = _gvn.transform(new (C) IfFalseNode(check));

      { // Slow path: uncommon trap for never seen value and then reexecute
        // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
        // the value has been seen at least once.
        PreserveJVMState pjvms(this);
        PreserveReexecuteState preexecs(this);
        jvms()->set_should_reexecute(true);

        set_control(slow_path);
        set_i_o(i_o());

        uncommon_trap_exact(Deoptimization::Reason_intrinsic,
                            Deoptimization::Action_reinterpret);
      }
      // The guard for never seen value enables sharpening of the result and
      // returning a constant. It allows to eliminate branches on the same value
      // later on.
      set_control(fast_path);
      result = intcon(expected_val);
    }
    // Stop profiling.
    // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
    // By replacing method body with profile data (represented as ProfileBooleanNode
    // on IR level) we effectively disable profiling.
    // It enables full speed execution once optimized code is generated.
    Node* profile = _gvn.transform(new (C) ProfileBooleanNode(result, false_cnt, true_cnt));
    C->record_for_igvn(profile);
    set_result(profile);
    return true;
  } else {
    // Continue profiling.
    // Profile data isn't available at the moment. So, execute method's bytecode version.
    // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
    // is compiled and counters aren't available since corresponding MethodHandle
    // isn't a compile-time constant.
    return false;
  }
}
C:\hotspot-69087d08d473\src\share\vm/opto/live.cpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/callnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/live.hpp"
#include "opto/machnode.hpp"


// Compute live-in/live-out.  We use a totally incremental algorithm.  The LIVE
// problem is monotonic.  The steady-state solution looks like this: pull a
// block from the worklist.  It has a set of delta's - values which are newly
// live-in from the block.  Push these to the live-out sets of all predecessor
// blocks.  At each predecessor, the new live-out values are ANDed with what is
// already live-out (extra stuff is added to the live-out sets).  Then the
// remaining new live-out values are ANDed with what is locally defined.
// Leftover bits become the new live-in for the predecessor block, and the pred
// block is put on the worklist.
//   The locally live-in stuff is computed once and added to predecessor
// live-out sets.  This separate compilation is done in the outer loop below.
PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
}

void PhaseLive::compute(uint maxlrg) {
  _maxlrg   = maxlrg;
  _worklist = new (_arena) Block_List();

  // Init the sparse live arrays.  This data is live on exit from here!
  // The _live info is the live-out info.
  _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
  uint i;
  for (i = 0; i < _cfg.number_of_blocks(); i++) {
    _live[i].initialize(_maxlrg);
  }

  // Init the sparse arrays for delta-sets.
  ResourceMark rm;              // Nuke temp storage on exit

  // Does the memory used by _defs and _deltas get reclaimed?  Does it matter?  TT

  // Array of values defined locally in blocks
  _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
  for (i = 0; i < _cfg.number_of_blocks(); i++) {
    _defs[i].initialize(_maxlrg);
  }

  // Array of delta-set pointers, indexed by block pre_order-1.
  _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
  memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());

  _free_IndexSet = NULL;

  // Blocks having done pass-1
  VectorSet first_pass(Thread::current()->resource_area());

  // Outer loop: must compute local live-in sets and push into predecessors.
  for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
    Block* block = _cfg.get_block(j - 1);

    // Compute the local live-in set.  Start with any new live-out bits.
    IndexSet* use = getset(block);
    IndexSet* def = &_defs[block->_pre_order-1];
    DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
    uint i;
    for (i = block->number_of_nodes(); i > 1; i--) {
      Node* n = block->get_node(i-1);
      if (n->is_Phi()) {
        break;
      }

      uint r = _names.at(n->_idx);
      assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
      def->insert( r );
      use->remove( r );
      uint cnt = n->req();
      for (uint k = 1; k < cnt; k++) {
        Node *nk = n->in(k);
        uint nkidx = nk->_idx;
        if (_cfg.get_block_for_node(nk) != block) {
          uint u = _names.at(nkidx);
          use->insert(u);
          DEBUG_ONLY(def_outside->insert(u);)
        }
      }
    }
#ifdef ASSERT
    def_outside->set_next(_free_IndexSet);
    _free_IndexSet = def_outside;     // Drop onto free list
#endif
    // Remove anything defined by Phis and the block start instruction
    for (uint k = i; k > 0; k--) {
      uint r = _names.at(block->get_node(k - 1)->_idx);
      def->insert(r);
      use->remove(r);
    }

    // Push these live-in things to predecessors
    for (uint l = 1; l < block->num_preds(); l++) {
      Block* p = _cfg.get_block_for_node(block->pred(l));
      add_liveout(p, use, first_pass);

      // PhiNode uses go in the live-out set of prior blocks.
      for (uint k = i; k > 0; k--) {
        add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
      }
    }
    freeset(block);
    first_pass.set(block->_pre_order);

    // Inner loop: blocks that picked up new live-out values to be propagated
    while (_worklist->size()) {
      Block* block = _worklist->pop();
      IndexSet *delta = getset(block);
      assert( delta->count(), "missing delta set" );

      // Add new-live-in to predecessors live-out sets
      for (uint l = 1; l < block->num_preds(); l++) {
        Block* predecessor = _cfg.get_block_for_node(block->pred(l));
        add_liveout(predecessor, delta, first_pass);
      }

      freeset(block);
    } // End of while-worklist-not-empty

  } // End of for-all-blocks-outer-loop

  // We explicitly clear all of the IndexSets which we are about to release.
  // This allows us to recycle their internal memory into IndexSet's free list.

  for (i = 0; i < _cfg.number_of_blocks(); i++) {
    _defs[i].clear();
    if (_deltas[i]) {
      // Is this always true?
      _deltas[i]->clear();
    }
  }
  IndexSet *free = _free_IndexSet;
  while (free != NULL) {
    IndexSet *temp = free;
    free = free->next();
    temp->clear();
  }

}

#ifndef PRODUCT
void PhaseLive::stats(uint iters) const {
}
#endif

// Get an IndexSet for a block.  Return existing one, if any.  Make a new
// empty one if a prior one does not exist.
IndexSet *PhaseLive::getset( Block *p ) {
  IndexSet *delta = _deltas[p->_pre_order-1];
  if( !delta )                  // Not on worklist?
    // Get a free set; flag as being on worklist
    delta = _deltas[p->_pre_order-1] = getfreeset();
  return delta;                 // Return set of new live-out items
}

// Pull from free list, or allocate.  Internal allocation on the returned set
// is always from thread local storage.
IndexSet *PhaseLive::getfreeset( ) {
  IndexSet *f = _free_IndexSet;
  if( !f ) {
    f = new IndexSet;
//    f->set_arena(Thread::current()->resource_area());
    f->initialize(_maxlrg, Thread::current()->resource_area());
  } else {
    // Pull from free list
    _free_IndexSet = f->next();
  //f->_cnt = 0;                        // Reset to empty
//    f->set_arena(Thread::current()->resource_area());
    f->initialize(_maxlrg, Thread::current()->resource_area());
  }
  return f;
}

// Free an IndexSet from a block.
void PhaseLive::freeset( const Block *p ) {
  IndexSet *f = _deltas[p->_pre_order-1];
  f->set_next(_free_IndexSet);
  _free_IndexSet = f;           // Drop onto free list
  _deltas[p->_pre_order-1] = NULL;
}

// Add a live-out value to a given blocks live-out set.  If it is new, then
// also add it to the delta set and stick the block on the worklist.
void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
  IndexSet *live = &_live[p->_pre_order-1];
  if( live->insert(r) ) {       // If actually inserted...
    // We extended the live-out set.  See if the value is generated locally.
    // If it is not, then we must extend the live-in set.
    if( !_defs[p->_pre_order-1].member( r ) ) {
      if( !_deltas[p->_pre_order-1] && // Not on worklist?
          first_pass.test(p->_pre_order) )
        _worklist->push(p);     // Actually go on worklist if already 1st pass
      getset(p)->insert(r);
    }
  }
}

// Add a vector of live-out values to a given blocks live-out set.
void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
  IndexSet *live = &_live[p->_pre_order-1];
  IndexSet *defs = &_defs[p->_pre_order-1];
  IndexSet *on_worklist = _deltas[p->_pre_order-1];
  IndexSet *delta = on_worklist ? on_worklist : getfreeset();

  IndexSetIterator elements(lo);
  uint r;
  while ((r = elements.next()) != 0) {
    if( live->insert(r) &&      // If actually inserted...
        !defs->member( r ) )    // and not defined locally
      delta->insert(r);         // Then add to live-in set
  }

  if( delta->count() ) {                // If actually added things
    _deltas[p->_pre_order-1] = delta; // Flag as on worklist now
    if( !on_worklist &&         // Not on worklist?
        first_pass.test(p->_pre_order) )
      _worklist->push(p);       // Actually go on worklist if already 1st pass
  } else {                      // Nothing there; just free it
    delta->set_next(_free_IndexSet);
    _free_IndexSet = delta;     // Drop onto free list
  }
}

#ifndef PRODUCT
// Dump the live-out set for a block
void PhaseLive::dump( const Block *b ) const {
  tty->print("Block %d: ",b->_pre_order);
  tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
  uint cnt = b->number_of_nodes();
  for( uint i=0; i<cnt; i++ ) {
    tty->print("L%d/", _names.at(b->get_node(i)->_idx));
    b->get_node(i)->dump();
  }
  tty->print("\n");
}

// Verify that base pointers and derived pointers are still sane.
void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
#ifdef ASSERT
  Unique_Node_List worklist(a);
  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
    Block* block = _cfg.get_block(i);
    for (uint j = block->end_idx() + 1; j > 1; j--) {
      Node* n = block->get_node(j-1);
      if (n->is_Phi()) {
        break;
      }
      // Found a safepoint?
      if (n->is_MachSafePoint()) {
        MachSafePointNode *sfpt = n->as_MachSafePoint();
        JVMState* jvms = sfpt->jvms();
        if (jvms != NULL) {
          // Now scan for a live derived pointer
          if (jvms->oopoff() < sfpt->req()) {
            // Check each derived/base pair
            for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
              Node *check = sfpt->in(idx);
              bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
              // search upwards through spills and spill phis for AddP
              worklist.clear();
              worklist.push(check);
              uint k = 0;
              while( k < worklist.size() ) {
                check = worklist.at(k);
                assert(check,"Bad base or derived pointer");
                // See PhaseChaitin::find_base_for_derived() for all cases.
                int isc = check->is_Copy();
                if( isc ) {
                  worklist.push(check->in(isc));
                } else if( check->is_Phi() ) {
                  for (uint m = 1; m < check->req(); m++)
                    worklist.push(check->in(m));
                } else if( check->is_Con() ) {
                  if (is_derived) {
                    // Derived is NULL+offset
                    assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
                  } else {
                    assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
                    // Base either ConP(NULL) or loadConP
                    if (check->is_Mach()) {
                      assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
                    } else {
                      assert(check->Opcode() == Op_ConP &&
                             check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
                    }
                  }
                } else if( check->bottom_type()->is_ptr()->_offset == 0 ) {
                  if(check->is_Proj() || check->is_Mach() &&
                     (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
                      check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
                      check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
                      check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
#ifdef _LP64
                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
#endif
                      check->as_Mach()->ideal_Opcode() == Op_LoadP ||
                      check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
                    // Valid nodes
                  } else {
                    check->dump();
                    assert(false,"Bad base or derived pointer");
                  }
                } else {
                  assert(is_derived,"Bad base pointer");
                  assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
                }
                k++;
                assert(k < 100000,"Derived pointer checking in infinite loop");
              } // End while
            }
          } // End of check for derived pointers
        } // End of Kcheck for debug info
      } // End of if found a safepoint
    } // End of forall instructions in block
  } // End of forall blocks
#endif
}

// Verify that graphs and base pointers are still sane.
void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
#ifdef ASSERT
  if( VerifyOpto || VerifyRegisterAllocator ) {
    _cfg.verify();
    verify_base_ptrs(a);
    if(verify_ifg)
      _ifg->verify(this);
  }
#endif
}

#endif
C:\hotspot-69087d08d473\src\share\vm/opto/live.hpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_LIVE_HPP
#define SHARE_VM_OPTO_LIVE_HPP

#include "libadt/port.hpp"
#include "libadt/vectset.hpp"
#include "opto/block.hpp"
#include "opto/indexSet.hpp"
#include "opto/phase.hpp"
#include "opto/regmask.hpp"

class Block;
class PhaseCFG;
class VectorSet;
class IndexSet;

//------------------------------LRG_List---------------------------------------
// Map Node indices to Live RanGe indices.
// Array lookup in the optimized case.
typedef GrowableArray<uint> LRG_List;

//------------------------------PhaseLive--------------------------------------
// Compute live-in/live-out
class PhaseLive : public Phase {
  // Array of Sets of values live at the start of a block.
  // Indexed by block pre-order number.
  IndexSet *_live;

  // Array of Sets of values defined locally in the block
  // Indexed by block pre-order number.
  IndexSet *_defs;

  // Array of delta-set pointers, indexed by block pre-order number
  IndexSet **_deltas;
  IndexSet *_free_IndexSet;     // Free list of same

  Block_List *_worklist;        // Worklist for iterative solution

  const PhaseCFG &_cfg;         // Basic blocks
  const LRG_List &_names;       // Mapping from Nodes to live ranges
  uint _maxlrg;                 // Largest live-range number
  Arena *_arena;

  IndexSet *getset( Block *p );
  IndexSet *getfreeset( );
  void freeset( const Block *p );
  void add_liveout( Block *p, uint r, VectorSet &first_pass );
  void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass );

public:
  PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena);
  ~PhaseLive() {}
  // Compute liveness info
  void compute(uint maxlrg);
  // Reset arena storage
  void reset() { _live = NULL; }

  // Return the live-out set for this block
  IndexSet *live( const Block * b ) { return &_live[b->_pre_order-1]; }

#ifndef PRODUCT
  void dump( const Block *b ) const;
  void stats(uint iters) const;
#endif
};

#endif // SHARE_VM_OPTO_LIVE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/locknode.cpp
/*
 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "opto/locknode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"

//=============================================================================
const RegMask &BoxLockNode::in_RegMask(uint i) const {
  return _inmask;
}

const RegMask &BoxLockNode::out_RegMask() const {
  return *Matcher::idealreg2regmask[Op_RegP];
}

uint BoxLockNode::size_of() const { return sizeof(*this); }

BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
                                       _slot(slot), _is_eliminated(false) {
  init_class_id(Class_BoxLock);
  init_flags(Flag_rematerialize);
  OptoReg::Name reg = OptoReg::stack2reg(_slot);
  _inmask.Insert(reg);
}

//-----------------------------hash--------------------------------------------
uint BoxLockNode::hash() const {
  if (EliminateNestedLocks)
    return NO_HASH; // Each locked region has own BoxLock node
  return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
}

//------------------------------cmp--------------------------------------------
uint BoxLockNode::cmp( const Node &n ) const {
  if (EliminateNestedLocks)
    return (&n == this); // Always fail except on self
  const BoxLockNode &bn = (const BoxLockNode &)n;
  return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
}

BoxLockNode* BoxLockNode::box_node(Node* box) {
  // Chase down the BoxNode after RA which may spill box nodes.
  while (!box->is_BoxLock()) {
    //    if (box_node->is_SpillCopy()) {
    //      Node *m = box_node->in(1);
    //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
    //        box_node = m->in(m->as_Mach()->operand_index(2));
    //        continue;
    //      }
    //    }
    assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
    // Only BoxLock nodes with the same stack slot are merged.
    // So it is enough to trace one path to find the slot value.
    box = box->in(1);
  }
  return box->as_BoxLock();
}

OptoReg::Name BoxLockNode::reg(Node* box) {
  return box_node(box)->in_RegMask(0).find_first_elem();
}

// Is BoxLock node used for one simple lock region (same box and obj)?
bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
  LockNode* lock = NULL;
  bool has_one_lock = false;
  for (uint i = 0; i < this->outcnt(); i++) {
    Node* n = this->raw_out(i);
    assert(!n->is_Phi(), "should not merge BoxLock nodes");
    if (n->is_AbstractLock()) {
      AbstractLockNode* alock = n->as_AbstractLock();
      // Check lock's box since box could be referenced by Lock's debug info.
      if (alock->box_node() == this) {
        if (alock->obj_node()->eqv_uncast(obj)) {
          if ((unique_lock != NULL) && alock->is_Lock()) {
            if (lock == NULL) {
              lock = alock->as_Lock();
              has_one_lock = true;
            } else if (lock != alock->as_Lock()) {
              has_one_lock = false;
            }
          }
        } else {
          return false; // Different objects
        }
      }
    }
  }
#ifdef ASSERT
  // Verify that FastLock and Safepoint reference only this lock region.
  for (uint i = 0; i < this->outcnt(); i++) {
    Node* n = this->raw_out(i);
    if (n->is_FastLock()) {
      FastLockNode* flock = n->as_FastLock();
      assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
    }
    // Don't check monitor info in safepoints since the referenced object could
    // be different from the locked object. It could be Phi node of different
    // cast nodes which point to this locked object.
    // We assume that no other objects could be referenced in monitor info
    // associated with this BoxLock node because all associated locks and
    // unlocks are reference only this one object.
  }
#endif
  if (unique_lock != NULL && has_one_lock) {
    *unique_lock = lock;
  }
  return true;
}

//=============================================================================
//-----------------------------hash--------------------------------------------
uint FastLockNode::hash() const { return NO_HASH; }

uint FastLockNode::size_of() const { return sizeof(*this); }

//------------------------------cmp--------------------------------------------
uint FastLockNode::cmp( const Node &n ) const {
  return (&n == this);                // Always fail except on self
}

//=============================================================================
//-----------------------------hash--------------------------------------------
uint FastUnlockNode::hash() const { return NO_HASH; }

//------------------------------cmp--------------------------------------------
uint FastUnlockNode::cmp( const Node &n ) const {
  return (&n == this);                // Always fail except on self
}

//
// Create a counter which counts the number of times this lock is acquired
//
void FastLockNode::create_lock_counter(JVMState* state) {
  BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
           OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
  _counters = blnc->counters();
}

void FastLockNode::create_rtm_lock_counter(JVMState* state) {
#if INCLUDE_RTM_OPT
  Compile* C = Compile::current();
  if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
    RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
           OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
    _rtm_counters = rlnc->counters();
    if (UseRTMForStackLocks) {
      rlnc = (RTMLockingNamedCounter*)
           OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
      _stack_rtm_counters = rlnc->counters();
    }
  }
#endif
}

//=============================================================================
//------------------------------do_monitor_enter-------------------------------
void Parse::do_monitor_enter() {
  kill_dead_locals();

  // Null check; get casted pointer.
  Node* obj = null_check(peek());
  // Check for locking null object
  if (stopped()) return;

  // the monitor object is not part of debug info expression stack
  pop();

  // Insert a FastLockNode which takes as arguments the current thread pointer,
  // the obj pointer & the address of the stack slot pair used for the lock.
  shared_lock(obj);
}

//------------------------------do_monitor_exit--------------------------------
void Parse::do_monitor_exit() {
  kill_dead_locals();

  pop();                        // Pop oop to unlock
  // Because monitors are guaranteed paired (else we bail out), we know
  // the matching Lock for this Unlock.  Hence we know there is no need
  // for a null check on Unlock.
  shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
}
C:\hotspot-69087d08d473\src\share\vm/opto/locknode.hpp
/*
 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_OPTO_LOCKNODE_HPP
#define SHARE_VM_OPTO_LOCKNODE_HPP

#include "opto/node.hpp"
#include "opto/opcodes.hpp"
#include "opto/subnode.hpp"
#if defined AD_MD_HPP
# include AD_MD_HPP
#elif defined TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
#elif defined TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#elif defined TARGET_ARCH_MODEL_aarch64
# include "adfiles/ad_aarch64.hpp"
#elif defined TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#elif defined TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
#elif defined TARGET_ARCH_MODEL_ppc_64
# include "adfiles/ad_ppc_64.hpp"
#endif

//------------------------------BoxLockNode------------------------------------
class BoxLockNode : public Node {
  const int     _slot; // stack slot
  RegMask     _inmask; // OptoReg corresponding to stack slot
  bool _is_eliminated; // Associated locks were safely eliminated

public:
  BoxLockNode( int lock );
  virtual int Opcode() const;
  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
  virtual uint size(PhaseRegAlloc *ra_) const;
  virtual const RegMask &in_RegMask(uint) const;
  virtual const RegMask &out_RegMask() const;
  virtual uint size_of() const;
  virtual uint hash() const;
  virtual uint cmp( const Node &n ) const;
  virtual const class Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
  virtual uint ideal_reg() const { return Op_RegP; }

  static OptoReg::Name reg(Node* box_node);
  static BoxLockNode* box_node(Node* box_node);
  static bool same_slot(Node* box1, Node* box2) {
    return box1->as_BoxLock()->_slot == box2->as_BoxLock()->_slot;
  }
  int stack_slot() const { return _slot; }

  bool is_eliminated() const { return _is_eliminated; }
  // mark lock as eliminated.
  void set_eliminated()      { _is_eliminated = true; }

  // Is BoxLock node used for one simple lock region?
  bool is_simple_lock_region(LockNode** unique_lock, Node* obj);

#ifndef PRODUCT
  virtual void format( PhaseRegAlloc *, outputStream *st ) const;
  virtual void dump_spec(outputStream *st) const { st->print("  Lock %d",_slot); }
#endif
};

//------------------------------FastLockNode-----------------------------------
class FastLockNode: public CmpNode {
private:
  BiasedLockingCounters*        _counters;
  RTMLockingCounters*       _rtm_counters; // RTM lock counters for inflated locks
  RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks

public:
  FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
    init_req(0,ctrl);
    init_class_id(Class_FastLock);
    _counters = NULL;
    _rtm_counters = NULL;
    _stack_rtm_counters = NULL;
  }
  Node* obj_node() const { return in(1); }
  Node* box_node() const { return in(2); }
  void  set_box_node(Node* box) { set_req(2, box); }

  // FastLock and FastUnlockNode do not hash, we need one for each correspoding
  // LockNode/UnLockNode to avoid creating Phi's.
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint size_of() const;
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
  const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}

  void create_lock_counter(JVMState* s);
  void create_rtm_lock_counter(JVMState* state);
  BiasedLockingCounters*        counters() const { return _counters; }
  RTMLockingCounters*       rtm_counters() const { return _rtm_counters; }
  RTMLockingCounters* stack_rtm_counters() const { return _stack_rtm_counters; }
};


//------------------------------FastUnlockNode---------------------------------
class FastUnlockNode: public CmpNode {
public:
  FastUnlockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
    init_req(0,ctrl);
    init_class_id(Class_FastUnlock);
  }
  Node* obj_node() const { return in(1); }
  Node* box_node() const { return in(2); }


  // FastLock and FastUnlockNode do not hash, we need one for each correspoding
  // LockNode/UnLockNode to avoid creating Phi's.
  virtual uint hash() const ;                  // { return NO_HASH; }
  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
  virtual int Opcode() const;
  virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
  const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}

};

#endif // SHARE_VM_OPTO_LOCKNODE_HPP
C:\hotspot-69087d08d473\src\share\vm/opto/loopnode.cpp
/*
 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "ci/ciMethodData.hpp"
#include "compiler/compileLog.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/loopnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/rootnode.hpp"
#include "opto/superword.hpp"

//=============================================================================
//------------------------------is_loop_iv-------------------------------------
// Determine if a node is Counted loop induction variable.
// The method is declared in node.hpp.
const Node* Node::is_loop_iv() const {
  if (this->is_Phi() && !this->as_Phi()->is_copy() &&
      this->as_Phi()->region()->is_CountedLoop() &&
      this->as_Phi()->region()->as_CountedLoop()->phi() == this) {
    return this;
  } else {
    return NULL;
  }
}

//=============================================================================
//------------------------------dump_spec--------------------------------------
// Dump special per-node info
#ifndef PRODUCT
void LoopNode::dump_spec(outputStream *st) const {
  if (is_inner_loop()) st->print( "inner " );
  if (is_partial_peel_loop()) st->print( "partial_peel " );
  if (partial_peel_has_failed()) st->print( "partial_peel_failed " );
}
#endif

//------------------------------is_valid_counted_loop-------------------------
bool LoopNode::is_valid_counted_loop() const {
  if (is_CountedLoop()) {
    CountedLoopNode*    l  = as_CountedLoop();
    CountedLoopEndNode* le = l->loopexit();
    if (le != NULL &&
        le->proj_out(1 /* true */) == l->in(LoopNode::LoopBackControl)) {
      Node* phi  = l->phi();
      Node* exit = le->proj_out(0 /* false */);
      if (exit != NULL && exit->Opcode() == Op_IfFalse &&
          phi != NULL && phi->is_Phi() &&
          phi->in(LoopNode::LoopBackControl) == l->incr() &&
          le->loopnode() == l && le->stride_is_con()) {
        return true;
      }
    }
  }
  return false;
}

//------------------------------get_early_ctrl---------------------------------
// Compute earliest legal control
Node *PhaseIdealLoop::get_early_ctrl( Node *n ) {
  assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
  uint i;
  Node *early;
  if (n->in(0) && !n->is_expensive()) {
    early = n->in(0);
    if (!early->is_CFG()) // Might be a non-CFG multi-def
      early = get_ctrl(early);        // So treat input as a straight data input
    i = 1;
  } else {
    early = get_ctrl(n->in(1));
    i = 2;
  }
  uint e_d = dom_depth(early);
  assert( early, "" );
  for (; i < n->req(); i++) {
    Node *cin = get_ctrl(n->in(i));
    assert( cin, "" );
    // Keep deepest dominator depth
    uint c_d = dom_depth(cin);
    if (c_d > e_d) {           // Deeper guy?
      early = cin;              // Keep deepest found so far
      e_d = c_d;
    } else if (c_d == e_d &&    // Same depth?
               early != cin) { // If not equal, must use slower algorithm
      // If same depth but not equal, one _must_ dominate the other
      // and we want the deeper (i.e., dominated) guy.
      Node *n1 = early;
      Node *n2 = cin;
      while (1) {
        n1 = idom(n1);          // Walk up until break cycle
        n2 = idom(n2);
        if (n1 == cin ||        // Walked early up to cin
            dom_depth(n2) < c_d)
          break;                // early is deeper; keep him
        if (n2 == early ||      // Walked cin up to early
            dom_depth(n1) < c_d) {
          early = cin;          // cin is deeper; keep him
          break;
        }
      }
      e_d = dom_depth(early);   // Reset depth register cache
    }
  }

  // Return earliest legal location
  assert(early == find_non_split_ctrl(early), "unexpected early control");

  if (n->is_expensive()) {
    assert(n->in(0), "should have control input");
    early = get_early_ctrl_for_expensive(n, early);
  }

  return early;
}

//------------------------------get_early_ctrl_for_expensive---------------------------------
// Move node up the dominator tree as high as legal while still beneficial
Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
  assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
  assert(OptimizeExpensiveOps, "optimization off?");

  Node* ctl = n->in(0);
  assert(ctl->is_CFG(), "expensive input 0 must be cfg");
  uint min_dom_depth = dom_depth(earliest);
#ifdef ASSERT
  if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
    dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
    assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
  }
#endif
  if (dom_depth(ctl) < min_dom_depth) {
    return earliest;
  }

  while (1) {
    Node *next = ctl;
    // Moving the node out of a loop on the projection of a If
    // confuses loop predication. So once we hit a Loop in a If branch
    // that doesn't branch to an UNC, we stop. The code that process
    // expensive nodes will notice the loop and skip over it to try to
    // move the node further up.
    if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
      if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
        break;
      }
      next = idom(ctl->in(1)->in(0));
    } else if (ctl->is_Proj()) {
      // We only move it up along a projection if the projection is
      // the single control projection for its parent: same code path,
      // if it's a If with UNC or fallthrough of a call.
      Node* parent_ctl = ctl->in(0);
      if (parent_ctl == NULL) {
        break;
      } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
        next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
      } else if (parent_ctl->is_If()) {
        if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
          break;
        }
        assert(idom(ctl) == parent_ctl, "strange");
        next = idom(parent_ctl);
      } else if (ctl->is_CatchProj()) {
        if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
          break;
        }
        assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
        next = parent_ctl->in(0)->in(0)->in(0);
      } else {
        // Check if parent control has a single projection (this
        // control is the only possible successor of the parent
        // control). If so, we can try to move the node above the
        // parent control.
        int nb_ctl_proj = 0;
        for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
          Node *p = parent_ctl->fast_out(i);
          if (p->is_Proj() && p->is_CFG()) {
            nb_ctl_proj++;
            if (nb_ctl_proj > 1) {
              break;
            }
          }
        }

        if (nb_ctl_proj > 1) {
          break;
        }
        assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call(), "unexpected node");
        assert(idom(ctl) == parent_ctl, "strange");
        next = idom(parent_ctl);
      }
    } else {
      next = idom(ctl);
    }
    if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
      break;
    }
    ctl = next;
  }

  if (ctl != n->in(0)) {
    _igvn.hash_delete(n);
    n->set_req(0, ctl);
    _igvn.hash_insert(n);
  }

  return ctl;
}


//------------------------------set_early_ctrl---------------------------------
// Set earliest legal control
void PhaseIdealLoop::set_early_ctrl( Node *n ) {
  Node *early = get_early_ctrl(n);

  // Record earliest legal location
  set_ctrl(n, early);
}

//------------------------------set_subtree_ctrl-------------------------------
// set missing _ctrl entries on new nodes
void PhaseIdealLoop::set_subtree_ctrl( Node *n ) {
  // Already set?  Get out.
  if( _nodes[n->_idx] ) return;
  // Recursively set _nodes array to indicate where the Node goes
  uint i;
  for( i = 0; i < n->req(); ++i ) {
    Node *m = n->in(i);
    if( m && m != C->root() )
      set_subtree_ctrl( m );
  }

  // Fixup self
  set_early_ctrl( n );
}

//------------------------------is_counted_loop--------------------------------
bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) {
  PhaseGVN *gvn = &_igvn;

  // Counted loop head must be a good RegionNode with only 3 not NULL
  // control input edges: Self, Entry, LoopBack.
  if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) {
    return false;
  }
  Node *init_control = x->in(LoopNode::EntryControl);
  Node *back_control = x->in(LoopNode::LoopBackControl);
  if (init_control == NULL || back_control == NULL)    // Partially dead
    return false;
  // Must also check for TOP when looking for a dead loop
  if (init_control->is_top() || back_control->is_top())
    return false;

  // Allow funny placement of Safepoint
  if (back_control->Opcode() == Op_SafePoint) {
    if (UseCountedLoopSafepoints) {
      // Leaving the safepoint on the backedge and creating a
      // CountedLoop will confuse optimizations. We can't move the
      // safepoint around because its jvm state wouldn't match a new
      // location. Give up on that loop.
      return false;
    }
    back_control = back_control->in(TypeFunc::Control);
  }

  // Controlling test for loop
  Node *iftrue = back_control;
  uint iftrue_op = iftrue->Opcode();
  if (iftrue_op != Op_IfTrue &&
      iftrue_op != Op_IfFalse)
    // I have a weird back-control.  Probably the loop-exit test is in
    // the middle of the loop and I am looking at some trailing control-flow
    // merge point.  To fix this I would have to partially peel the loop.
    return false; // Obscure back-control

  // Get boolean guarding loop-back test
  Node *iff = iftrue->in(0);
  if (get_loop(iff) != loop || !iff->in(1)->is_Bool())
    return false;
  BoolNode *test = iff->in(1)->as_Bool();
  BoolTest::mask bt = test->_test._test;
  float cl_prob = iff->as_If()->_prob;
  if (iftrue_op == Op_IfFalse) {
    bt = BoolTest(bt).negate();
    cl_prob = 1.0 - cl_prob;
  }
  // Get backedge compare
  Node *cmp = test->in(1);
  int cmp_op = cmp->Opcode();
  if (cmp_op != Op_CmpI)
    return false;                // Avoid pointer & float compares

  // Find the trip-counter increment & limit.  Limit must be loop invariant.
  Node *incr  = cmp->in(1);
  Node *limit = cmp->in(2);

  // ---------
  // need 'loop()' test to tell if limit is loop invariant
  // ---------

  if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
    Node *tmp = incr;            // Then reverse order into the CmpI
    incr = limit;
    limit = tmp;
    bt = BoolTest(bt).commute(); // And commute the exit test
  }
  if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant
    return false;
  if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant
    return false;

  Node* phi_incr = NULL;
  // Trip-counter increment must be commutative & associative.
  if (incr->is_Phi()) {
    if (incr->as_Phi()->region() != x || incr->req() != 3)
      return false; // Not simple trip counter expression
    phi_incr = incr;
    incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
    if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant
      return false;
  }

  Node* trunc1 = NULL;
  Node* trunc2 = NULL;
  const TypeInt* iv_trunc_t = NULL;
  if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) {
    return false; // Funny increment opcode
  }
  assert(incr->Opcode() == Op_AddI, "wrong increment code");

  // Get merge point
  Node *xphi = incr->in(1);
  Node *stride = incr->in(2);
  if (!stride->is_Con()) {     // Oops, swap these
    if (!xphi->is_Con())       // Is the other guy a constant?
      return false;             // Nope, unknown stride, bail out
    Node *tmp = xphi;           // 'incr' is commutative, so ok to swap
    xphi = stride;
    stride = tmp;
  }
  // Stride must be constant
  int stride_con = stride->get_int();
  if (stride_con == 0)
    return false; // missed some peephole opt

  if (!xphi->is_Phi())
    return false; // Too much math on the trip counter
  if (phi_incr != NULL && phi_incr != xphi)
    return false;
  PhiNode *phi = xphi->as_Phi();

  // Phi must be of loop header; backedge must wrap to increment
  if (phi->region() != x)
    return false;
  if (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr ||
      trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1) {
    return false;
  }
  Node *init_trip = phi->in(LoopNode::EntryControl);

  // If iv trunc type is smaller than int, check for possible wrap.
  if (!TypeInt::INT->higher_equal(iv_trunc_t)) {
    assert(trunc1 != NULL, "must have found some truncation");

    // Get a better type for the phi (filtered thru if's)
    const TypeInt* phi_ft = filtered_type(phi);

    // Can iv take on a value that will wrap?
    //
    // Ensure iv's limit is not within "stride" of the wrap value.
    //
    // Example for "short" type
    //    Truncation ensures value is in the range -32768..32767 (iv_trunc_t)
    //    If the stride is +10, then the last value of the induction
    //    variable before the increment (phi_ft->_hi) must be
    //    <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to
    //    ensure no truncation occurs after the increment.

    if (stride_con > 0) {
      if (iv_trunc_t->_hi - phi_ft->_hi < stride_con ||
          iv_trunc_t->_lo > phi_ft->_lo) {
        return false;  // truncation may occur
      }
    } else if (stride_con < 0) {
      if (iv_trunc_t->_lo - phi_ft->_lo > stride_con ||
          iv_trunc_t->_hi < phi_ft->_hi) {
        return false;  // truncation may occur
      }
    }
    // No possibility of wrap so truncation can be discarded
    // Promote iv type to Int
  } else {
    assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int");
  }

  // If the condition is inverted and we will be rolling
  // through MININT to MAXINT, then bail out.
  if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice!
      // Odd stride
      bt == BoolTest::ne && stride_con != 1 && stride_con != -1 ||
      // Count down loop rolls through MAXINT
      (bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 ||
      // Count up loop rolls through MININT
      (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0) {
    return false; // Bail out
  }

  const TypeInt* init_t = gvn->type(init_trip)->is_int();
  const TypeInt* limit_t = gvn->type(limit)->is_int();

  if (stride_con > 0) {
    jlong init_p = (jlong)init_t->_lo + stride_con;
    if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi)
      return false; // cyclic loop or this loop trips only once
  } else {
    jlong init_p = (jlong)init_t->_hi + stride_con;
    if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo)
      return false; // cyclic loop or this loop trips only once
  }

  if (phi_incr != NULL) {
    // check if there is a possiblity of IV overflowing after the first increment
    if (stride_con > 0) {
      if (init_t->_hi > max_jint - stride_con) {
        return false;
      }
    } else {
      if (init_t->_lo < min_jint - stride_con) {
        return false;
      }
    }
  }

  // =================================================
  // ---- SUCCESS!   Found A Trip-Counted Loop!  -----
  //
  assert(x->Opcode() == Op_Loop, "regular loops only");
  C->print_method(PHASE_BEFORE_CLOOPS, 3);

  Node *hook = new (C) Node(6);

  if (LoopLimitCheck) {

  // ===================================================
  // Generate loop limit check to avoid integer overflow
  // in cases like next (cyclic loops):
  //
  // for (i=0; i <= max_jint; i++) {}
  // for (i=0; i <  max_jint; i+=2) {}
  //
  //
  // Limit check predicate depends on the loop test:
  //
  // for(;i != limit; i++)       --> limit <= (max_jint)
  // for(;i <  limit; i+=stride) --> limit <= (max_jint - stride + 1)
  // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride    )
  //

  // Check if limit is excluded to do more precise int overflow check.
  bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
  int stride_m  = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));

  // If compare points directly to the phi we need to adjust
  // the compare so that it points to the incr. Limit have
  // to be adjusted to keep trip count the same and the
  // adjusted limit should be checked for int overflow.
  if (phi_incr != NULL) {
    stride_m  += stride_con;
  }

  if (limit->is_Con()) {
    int limit_con = limit->get_int();
    if ((stride_con > 0 && limit_con > (max_jint - stride_m)) ||
        (stride_con < 0 && limit_con < (min_jint - stride_m))) {
      // Bailout: it could be integer overflow.
      return false;
    }
  } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) ||
             (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) {
      // Limit's type may satisfy the condition, for example,
      // when it is an array length.
  } else {
    // Generate loop's limit check.
    // Loop limit check predicate should be near the loop.
    ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check);
    if (!limit_check_proj) {
      // The limit check predicate is not generated if this method trapped here before.
#ifdef ASSERT
      if (TraceLoopLimitCheck) {
        tty->print("missing loop limit check:");
        loop->dump_head();
        x->dump(1);
      }
#endif
      return false;
    }

    IfNode* check_iff = limit_check_proj->in(0)->as_If();
    Node* cmp_limit;
    Node* bol;

    if (stride_con > 0) {
      cmp_limit = new (C) CmpINode(limit, _igvn.intcon(max_jint - stride_m));
      bol = new (C) BoolNode(cmp_limit, BoolTest::le);
    } else {
      cmp_limit = new (C) CmpINode(limit, _igvn.intcon(min_jint - stride_m));
      bol = new (C) BoolNode(cmp_limit, BoolTest::ge);
    }
    cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit);
    bol = _igvn.register_new_node_with_optimizer(bol);
    set_subtree_ctrl(bol);

    // Replace condition in original predicate but preserve Opaque node
    // so that previous predicates could be found.
    assert(check_iff->in(1)->Opcode() == Op_Conv2B &&
           check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, "");
    Node* opq = check_iff->in(1)->in(1);
    _igvn.hash_delete(opq);
    opq->set_req(1, bol);
    // Update ctrl.
    set_ctrl(opq, check_iff->in(0));
    set_ctrl(check_iff->in(1), check_iff->in(0));

#ifndef PRODUCT
    // report that the loop predication has been actually performed
    // for this loop
    if (TraceLoopLimitCheck) {
      tty->print_cr("Counted Loop Limit Check generated:");
      debug_only( bol->dump(2); )
    }
#endif
  }

  if (phi_incr != NULL) {
    // If compare points directly to the phi we need to adjust
    // the compare so that it points to the incr. Limit have
    // to be adjusted to keep trip count the same and we
    // should avoid int overflow.
    //
    //   i = init; do {} while(i++ < limit);
    // is converted to
    //   i = init; do {} while(++i < limit+1);
    //
    limit = gvn->transform(new (C) AddINode(limit, stride));
  }

  // Now we need to canonicalize loop condition.
  if (bt == BoolTest::ne) {
    assert(stride_con == 1 || stride_con == -1, "simple increment only");
    // 'ne' can be replaced with 'lt' only when init < limit.
    if (stride_con > 0 && init_t->_hi < limit_t->_lo)
      bt = BoolTest::lt;
    // 'ne' can be replaced with 'gt' only when init > limit.
    if (stride_con < 0 && init_t->_lo > limit_t->_hi)
      bt = BoolTest::gt;
  }

  if (incl_limit) {
    // The limit check guaranties that 'limit <= (max_jint - stride)' so
    // we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
    //
    Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1);
    limit = gvn->transform(new (C) AddINode(limit, one));
    if (bt == BoolTest::le)
      bt = BoolTest::lt;
    else if (bt == BoolTest::ge)
      bt = BoolTest::gt;
    else
      ShouldNotReachHere();
  }
  set_subtree_ctrl( limit );

  } else { // LoopLimitCheck

  // If compare points to incr, we are ok.  Otherwise the compare
  // can directly point to the phi; in this case adjust the compare so that
  // it points to the incr by adjusting the limit.
  if (cmp->in(1) == phi || cmp->in(2) == phi)
    limit = gvn->transform(new (C) AddINode(limit,stride));

  // trip-count for +-tive stride should be: (limit - init_trip + stride - 1)/stride.
  // Final value for iterator should be: trip_count * stride + init_trip.
  Node *one_p = gvn->intcon( 1);
  Node *one_m = gvn->intcon(-1);

  Node *trip_count = NULL;
  switch( bt ) {
  case BoolTest::eq:
    ShouldNotReachHere();
  case BoolTest::ne:            // Ahh, the case we desire
    if (stride_con == 1)
      trip_count = gvn->transform(new (C) SubINode(limit,init_trip));
    else if (stride_con == -1)
      trip_count = gvn->transform(new (C) SubINode(init_trip,limit));
    else
      ShouldNotReachHere();
    set_subtree_ctrl(trip_count);
    //_loop.map(trip_count->_idx,loop(limit));
    break;
  case BoolTest::le:            // Maybe convert to '<' case
    limit = gvn->transform(new (C) AddINode(limit,one_p));
    set_subtree_ctrl( limit );
    hook->init_req(4, limit);

    bt = BoolTest::lt;
    // Make the new limit be in the same loop nest as the old limit
    //_loop.map(limit->_idx,limit_loop);
    // Fall into next case
  case BoolTest::lt: {          // Maybe convert to '!=' case
    if (stride_con < 0) // Count down loop rolls through MAXINT
      ShouldNotReachHere();
    Node *range = gvn->transform(new (C) SubINode(limit,init_trip));
    set_subtree_ctrl( range );
    hook->init_req(0, range);

    Node *bias  = gvn->transform(new (C) AddINode(range,stride));
    set_subtree_ctrl( bias );
    hook->init_req(1, bias);

    Node *bias1 = gvn->transform(new (C) AddINode(bias,one_m));
    set_subtree_ctrl( bias1 );
    hook->init_req(2, bias1);

    trip_count  = gvn->transform(new (C) DivINode(0,bias1,stride));
    set_subtree_ctrl( trip_count );
    hook->init_req(3, trip_count);
    break;
  }

  case BoolTest::ge:            // Maybe convert to '>' case
    limit = gvn->transform(new (C) AddINode(limit,one_m));
    set_subtree_ctrl( limit );
    hook->init_req(4 ,limit);

    bt = BoolTest::gt;
    // Make the new limit be in the same loop nest as the old limit
    //_loop.map(limit->_idx,limit_loop);
    // Fall into next case
  case BoolTest::gt: {          // Maybe convert to '!=' case
    if (stride_con > 0) // count up loop rolls through MININT
      ShouldNotReachHere();
    Node *range = gvn->transform(new (C) SubINode(limit,init_trip));
    set_subtree_ctrl( range );
    hook->init_req(0, range);

    Node *bias  = gvn->transform(new (C) AddINode(range,stride));
    set_subtree_ctrl( bias );
    hook->init_req(1, bias);

    Node *bias1 = gvn->transform(new (C) AddINode(bias,one_p));
    set_subtree_ctrl( bias1 );
    hook->init_req(2, bias1);

    trip_count  = gvn->transform(new (C) DivINode(0,bias1,stride));
    set_subtree_ctrl( trip_count );
    hook->init_req(3, trip_count);
    break;
  }
  } // switch( bt )

  Node *span = gvn->transform(new (C) MulINode(trip_count,stride));
  set_subtree_ctrl( span );
  hook->init_req(5, span);

  limit = gvn->transform(new (C) AddINode(span,init_trip));
  set_subtree_ctrl( limit );

  } // LoopLimitCheck

  if (!UseCountedLoopSafepoints) {
    // Check for SafePoint on backedge and remove
    Node *sfpt = x->in(LoopNode::LoopBackControl);
    if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) {
      lazy_replace( sfpt, iftrue );
      if (loop->_safepts != NULL) {
        loop->_safepts->yank(sfpt);
      }
      loop->_tail = iftrue;
    }
  }

  // Build a canonical trip test.
  // Clone code, as old values may be in use.
  incr = incr->clone();
  incr->set_req(1,phi);
  incr->set_req(2,stride);
  incr = _igvn.register_new_node_with_optimizer(incr);
  set_early_ctrl( incr );
  _igvn.hash_delete(phi);
  phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );

  // If phi type is more restrictive than Int, raise to
  // Int to prevent (almost) infinite recursion in igvn
  // which can only handle integer types for constants or minint..maxint.
  if (!TypeInt::INT->higher_equal(phi->bottom_type())) {
    Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT);
    nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
    nphi = _igvn.register_new_node_with_optimizer(nphi);
    set_ctrl(nphi, get_ctrl(phi));
    _igvn.replace_node(phi, nphi);
    phi = nphi->as_Phi();
  }
  cmp = cmp->clone();
  cmp->set_req(1,incr);
  cmp->set_req(2,limit);
  cmp = _igvn.register_new_node_with_optimizer(cmp);
  set_ctrl(cmp, iff->in(0));

  test = test->clone()->as_Bool();
  (*(BoolTest*)&test->_test)._test = bt;
  test->set_req(1,cmp);
  _igvn.register_new_node_with_optimizer(test);
  set_ctrl(test, iff->in(0));

  // Replace the old IfNode with a new LoopEndNode
  Node *lex = _igvn.register_new_node_with_optimizer(new (C) CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt ));
  IfNode *le = lex->as_If();
  uint dd = dom_depth(iff);
  set_idom(le, le->in(0), dd); // Update dominance for loop exit
  set_loop(le, loop);

  // Get the loop-exit control
  Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue));

  // Need to swap loop-exit and loop-back control?
  if (iftrue_op == Op_IfFalse) {
    Node *ift2=_igvn.register_new_node_with_optimizer(new (C) IfTrueNode (le));
    Node *iff2=_igvn.register_new_node_with_optimizer(new (C) IfFalseNode(le));

    loop->_tail = back_control = ift2;
    set_loop(ift2, loop);
    set_loop(iff2, get_loop(iffalse));

    // Lazy update of 'get_ctrl' mechanism.
    lazy_replace(iffalse, iff2);
    lazy_replace(iftrue,  ift2);

    // Swap names
    iffalse = iff2;
    iftrue  = ift2;
  } else {
    _igvn.hash_delete(iffalse);
    _igvn.hash_delete(iftrue);
    iffalse->set_req_X( 0, le, &_igvn );
    iftrue ->set_req_X( 0, le, &_igvn );
  }

  set_idom(iftrue,  le, dd+1);
  set_idom(iffalse, le, dd+1);
  assert(iff->outcnt() == 0, "should be dead now");
  lazy_replace( iff, le ); // fix 'get_ctrl'

  // Now setup a new CountedLoopNode to replace the existing LoopNode
  CountedLoopNode *l = new (C) CountedLoopNode(init_control, back_control);
  l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve
  // The following assert is approximately true, and defines the intention
  // of can_be_counted_loop.  It fails, however, because phase->type
  // is not yet initialized for this loop and its parts.
  //assert(l->can_be_counted_loop(this), "sanity");
  _igvn.register_new_node_with_optimizer(l);
  set_loop(l, loop);
  loop->_head = l;
  // Fix all data nodes placed at the old loop head.
  // Uses the lazy-update mechanism of 'get_ctrl'.
  lazy_replace( x, l );
  set_idom(l, init_control, dom_depth(x));

  if (!UseCountedLoopSafepoints) {
    // Check for immediately preceding SafePoint and remove
    Node *sfpt2 = le->in(0);
    if (sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) {
      lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control));
      if (loop->_safepts != NULL) {
        loop->_safepts->yank(sfpt2);
      }
    }
  }

  // Free up intermediate goo
  _igvn.remove_dead_node(hook);

#ifdef ASSERT
  assert(l->is_valid_counted_loop(), "counted loop shape is messed up");
  assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" );
#endif
#ifndef PRODUCT
  if (TraceLoopOpts) {
    tty->print("Counted      ");
    loop->dump_head();
  }
#endif

  C->print_method(PHASE_AFTER_CLOOPS, 3);

  return true;
}

//----------------------exact_limit-------------------------------------------
Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
  assert(loop->_head->is_CountedLoop(), "");
  CountedLoopNode *cl = loop->_head->as_CountedLoop();
  assert(cl->is_valid_counted_loop(), "");

  if (!LoopLimitCheck || ABS(cl->stride_con()) == 1 ||
      cl->limit()->Opcode() == Op_LoopLimit) {
    // Old code has exact limit (it could be incorrect in case of int overflow).
    // Loop limit is exact with stride == 1. And loop may already have exact limit.
    return cl->limit();
  }
  Node *limit = NULL;
#ifdef ASSERT
  BoolTest::mask bt = cl->loopexit()->test_trip();
  assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
#endif
  if (cl->has_exact_trip_count()) {
    // Simple case: loop has constant boundaries.
    // Use jlongs to avoid integer overflow.
    int stride_con = cl->stride_con();
    jlong  init_con = cl->init_trip()->get_int();
    jlong limit_con = cl->limit()->get_int();
    julong trip_cnt = cl->trip_count();
    jlong final_con = init_con + trip_cnt*stride_con;
    int final_int = (int)final_con;
    // The final value should be in integer range since the loop
    // is counted and the limit was checked for overflow.
    assert(final_con == (jlong)final_int, "final value should be integer");
    limit = _igvn.intcon(final_int);
  } else {
    // Create new LoopLimit node to get exact limit (final iv value).
    limit = new (C) LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride());
    register_new_node(limit, cl->in(LoopNode::EntryControl));
  }
  assert(limit != NULL, "sanity");
  return limit;
}

//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Attempt to convert into a counted-loop.
Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  if (!can_be_counted_loop(phase)) {
    phase->C->set_major_progress();
  }
  return RegionNode::Ideal(phase, can_reshape);
}


//=============================================================================
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
// Attempt to convert into a counted-loop.
Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  return RegionNode::Ideal(phase, can_reshape);
}

//------------------------------dump_spec--------------------------------------
// Dump special per-node info
#ifndef PRODUCT
void CountedLoopNode::dump_spec(outputStream *st) const {
  LoopNode::dump_spec(st);
  if (stride_is_con()) {
    st->print("stride: %d ",stride_con());
  }
  if (is_pre_loop ()) st->print("pre of N%d" , _main_idx);
  if (is_main_loop()) st->print("main of N%d", _idx);
  if (is_post_loop()) st->print("post of N%d", _main_idx);
}
#endif

//=============================================================================
int CountedLoopEndNode::stride_con() const {
  return stride()->bottom_type()->is_int()->get_con();
}

//=============================================================================
//------------------------------Value-----------------------------------------
const Type *LoopLimitNode::Value( PhaseTransform *phase ) const {
  const Type* init_t   = phase->type(in(Init));
  const Type* limit_t  = phase->type(in(Limit));
  const Type* stride_t = phase->type(in(Stride));
  // Either input is TOP ==> the result is TOP
  if (init_t   == Type::TOP) return Type::TOP;
  if (limit_t  == Type::TOP) return Type::TOP;
  if (stride_t == Type::TOP) return Type::TOP;

  int stride_con = stride_t->is_int()->get_con();
  if (stride_con == 1)
    return NULL;  // Identity

  if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) {
    // Use jlongs to avoid integer overflow.
    jlong init_con   =  init_t->is_int()->get_con();
    jlong limit_con  = limit_t->is_int()->get_con();
    int  stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
    jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
    jlong final_con  = init_con + stride_con*trip_count;
    int final_int = (int)final_con;
    // The final value should be in integer range since the loop
    // is counted and the limit was checked for overflow.
    assert(final_con == (jlong)final_int, "final value should be integer");
    return TypeInt::make(final_int);
  }

  return bottom_type(); // TypeInt::INT
}

//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node.
Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  if (phase->type(in(Init))   == Type::TOP ||
      phase->type(in(Limit))  == Type::TOP ||
      phase->type(in(Stride)) == Type::TOP)
    return NULL;  // Dead

  int stride_con = phase->type(in(Stride))->is_int()->get_con();
  if (stride_con == 1)
    return NULL;  // Identity

  if (in(Init)->is_Con() && in(Limit)->is_Con())
    return NULL;  // Value

  // Delay following optimizations until all loop optimizations
  // done to keep Ideal graph simple.
  if (!can_reshape || phase->C->major_progress())
    return NULL;

  const TypeInt* init_t  = phase->type(in(Init) )->is_int();
  const TypeInt* limit_t = phase->type(in(Limit))->is_int();
  int stride_p;
  jlong lim, ini;
  julong max;
  if (stride_con > 0) {
    stride_p = stride_con;
    lim = limit_t->_hi;
    ini = init_t->_lo;
    max = (julong)max_jint;
  } else {
    stride_p = -stride_con;
    lim = init_t->_hi;
    ini = limit_t->_lo;
    max = (julong)min_jint;
  }
  julong range = lim - ini + stride_p;
  if (range <= max) {
    // Convert to integer expression if it is not overflow.
    Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1));
    Node *range = phase->transform(new (phase->C) SubINode(in(Limit), in(Init)));
    Node *bias  = phase->transform(new (phase->C) AddINode(range, stride_m));
    Node *trip  = phase->transform(new (phase->C) DivINode(0, bias, in(Stride)));
    Node *span  = phase->transform(new (phase->C) MulINode(trip, in(Stride)));
    return new (phase->C) AddINode(span, in(Init)); // exact limit
  }

  if (is_power_of_2(stride_p) ||                // divisor is 2^n
      !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node?
    // Convert to long expression to avoid integer overflow
    // and let igvn optimizer convert this division.
    //
    Node*   init   = phase->transform( new (phase->C) ConvI2LNode(in(Init)));
    Node*  limit   = phase->transform( new (phase->C) ConvI2LNode(in(Limit)));
    Node* stride   = phase->longcon(stride_con);
    Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1));

    Node *range = phase->transform(new (phase->C) SubLNode(limit, init));
    Node *bias  = phase->transform(new (phase->C) AddLNode(range, stride_m));
    Node *span;
    if (stride_con > 0 && is_power_of_2(stride_p)) {
      // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride)
      // and avoid generating rounding for division. Zero trip guard should
      // guarantee that init < limit but sometimes the guard is missing and
      // we can get situation when init > limit. Note, for the empty loop
      // optimization zero trip guard is generated explicitly which leaves
      // only RCE predicate where exact limit is used and the predicate
      // will simply fail forcing recompilation.
      Node* neg_stride   = phase->longcon(-stride_con);
      span = phase->transform(new (phase->C) AndLNode(bias, neg_stride));
    } else {
      Node *trip  = phase->transform(new (phase->C) DivLNode(0, bias, stride));
      span = phase->transform(new (phase->C) MulLNode(trip, stride));
    }
    // Convert back to int
    Node *span_int = phase->transform(new (phase->C) ConvL2INode(span));
    return new (phase->C) AddINode(span_int, in(Init)); // exact limit
  }

  return NULL;    // No progress
}

//------------------------------Identity---------------------------------------
// If stride == 1 return limit node.
Node *LoopLimitNode::Identity( PhaseTransform *phase ) {
  int stride_con = phase->type(in(Stride))->is_int()->get_con();
  if (stride_con == 1 || stride_con == -1)
    return in(Limit);
  return this;
}

//=============================================================================
//----------------------match_incr_with_optional_truncation--------------------
// Match increment with optional truncation:
// CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16
// Return NULL for failure. Success returns the increment node.
Node* CountedLoopNode::match_incr_with_optional_truncation(
                      Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) {
  // Quick cutouts:
  if (expr == NULL || expr->req() != 3)  return NULL;

  Node *t1 = NULL;
  Node *t2 = NULL;
  const TypeInt* trunc_t = TypeInt::INT;
  Node* n1 = expr;
  int   n1op = n1->Opcode();

  // Try to strip (n1 & M) or (n1 << N >> N) from n1.
  if (n1op == Op_AndI &&
      n1->in(2)->is_Con() &&
      n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) {
    // %%% This check should match any mask of 2**K-1.
    t1 = n1;
    n1 = t1->in(1);
    n1op = n1->Opcode();
    trunc_t = TypeInt::CHAR;
  } else if (n1op == Op_RShiftI &&
             n1->in(1) != NULL &&
             n1->in(1)->Opcode() == Op_LShiftI &&
             n1->in(2) == n1->in(1)->in(2) &&
             n1->in(2)->is_Con()) {
    jint shift = n1->in(2)->bottom_type()->is_int()->get_con();
    // %%% This check should match any shift in [1..31].
    if (shift == 16 || shift == 8) {
      t1 = n1;
      t2 = t1->in(1);
      n1 = t2->in(1);
      n1op = n1->Opcode();
      if (shift == 16) {
        trunc_t = TypeInt::SHORT;
      } else if (shift == 8) {
        trunc_t = TypeInt::BYTE;
      }
    }
  }

  // If (maybe after stripping) it is an AddI, we won:
  if (n1op == Op_AddI) {
    *trunc1 = t1;
    *trunc2 = t2;
    *trunc_type = trunc_t;
    return n1;
  }

  // failed
  return NULL;
}


//------------------------------filtered_type--------------------------------
// Return a type based on condition control flow
// A successful return will be a type that is restricted due
// to a series of dominating if-tests, such as:
//    if (i < 10) {
//       if (i > 0) {
//          here: "i" type is [1..10)
//       }
//    }
// or a control flow merge
//    if (i < 10) {
//       do {
//          phi( , ) -- at top of loop type is [min_int..10)
//         i = ?
//       } while ( i < 10)
//
const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) {
  assert(n && n->bottom_type()->is_int(), "must be int");
  const TypeInt* filtered_t = NULL;
  if (!n->is_Phi()) {
    assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control");
    filtered_t = filtered_type_from_dominators(n, n_ctrl);

  } else {
    Node* phi    = n->as_Phi();
    Node* region = phi->in(0);
    assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region");
    if (region && region != C->top()) {
      for (uint i = 1; i < phi->req(); i++) {
        Node* val   = phi->in(i);
        Node* use_c = region->in(i);
        const TypeInt* val_t = filtered_type_from_dominators(val, use_c);
        if (val_t != NULL) {
          if (filtered_t == NULL) {
            filtered_t = val_t;
          } else {
            filtered_t = filtered_t->meet(val_t)->is_int();
          }
        }
      }
    }
  }
  const TypeInt* n_t = _igvn.type(n)->is_int();
  if (filtered_t != NULL) {
    n_t = n_t->join(filtered_t)->is_int();
  }
  return n_t;
}


//------------------------------filtered_type_from_dominators--------------------------------
// Return a possibly more restrictive type for val based on condition control flow of dominators
const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) {
  if (val->is_Con()) {
     return val->bottom_type()->is_int();
  }
  uint if_limit = 10; // Max number of dominating if's visited
  const TypeInt* rtn_t = NULL;

  if (use_ctrl && use_ctrl != C->top()) {
    Node* val_ctrl = get_ctrl(val);
    uint val_dom_depth = dom_depth(val_ctrl);
    Node* pred = use_ctrl;
    uint if_cnt = 0;
    while (if_cnt < if_limit) {
      if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
        if_cnt++;
        const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
        if (if_t != NULL) {
          if (rtn_t == NULL) {
            rtn_t = if_t;
          } else {
            rtn_t = rtn_t->join(if_t)->is_int();
          }
        }
      }
      pred = idom(pred);
      if (pred == NULL || pred == C->top()) {
        break;
      }
      // Stop if going beyond definition block of val
      if (dom_depth(pred) < val_dom_depth) {
        break;
      }
    }
  }
  return rtn_t;
}


//------------------------------dump_spec--------------------------------------
// Dump special per-node info
#ifndef PRODUCT
void CountedLoopEndNode::dump_spec(outputStream *st) const {
  if( in(TestValue)->is_Bool() ) {
    BoolTest bt( test_trip()); // Added this for g++.

    st->print("[");
    bt.dump_on(st);
    st->print("]");
  }
  st->print(" ");
  IfNode::dump_spec(st);
}
#endif

//=============================================================================
//------------------------------is_member--------------------------------------
// Is 'l' a member of 'this'?
int IdealLoopTree::is_member( const IdealLoopTree *l ) const {
  while( l->_nest > _nest ) l = l->_parent;
  return l == this;
}

//------------------------------set_nest---------------------------------------
// Set loop tree nesting depth.  Accumulate _has_call bits.
int IdealLoopTree::set_nest( uint depth ) {
  _nest = depth;
  int bits = _has_call;
  if( _child ) bits |= _child->set_nest(depth+1);
  if( bits ) _has_call = 1;
  if( _next  ) bits |= _next ->set_nest(depth  );
  return bits;
}

//------------------------------split_fall_in----------------------------------
// Split out multiple fall-in edges from the loop header.  Move them to a
// private RegionNode before the loop.  This becomes the loop landing pad.
void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
  PhaseIterGVN &igvn = phase->_igvn;
  uint i;

  // Make a new RegionNode to be the landing pad.
  Node *landing_pad = new (phase->C) RegionNode( fall_in_cnt+1 );
  phase->set_loop(landing_pad,_parent);
  // Gather all the fall-in control paths into the landing pad
  uint icnt = fall_in_cnt;
  uint oreq = _head->req();
  for( i = oreq-1; i>0; i-- )
    if( !phase->is_member( this, _head->in(i) ) )
      landing_pad->set_req(icnt--,_head->in(i));

  // Peel off PhiNode edges as well
  for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
    Node *oj = _head->fast_out(j);
    if( oj->is_Phi() ) {
      PhiNode* old_phi = oj->as_Phi();
      assert( old_phi->region() == _head, "" );
      igvn.hash_delete(old_phi);   // Yank from hash before hacking edges
      Node *p = PhiNode::make_blank(landing_pad, old_phi);
      uint icnt = fall_in_cnt;
      for( i = oreq-1; i>0; i-- ) {
        if( !phase->is_member( this, _head->in(i) ) ) {
          p->init_req(icnt--, old_phi->in(i));
          // Go ahead and clean out old edges from old phi
          old_phi->del_req(i);
        }
      }
      // Search for CSE's here, because ZKM.jar does a lot of
      // loop hackery and we need to be a little incremental
      // with the CSE to avoid O(N^2) node blow-up.
      Node *p2 = igvn.hash_find_insert(p); // Look for a CSE
      if( p2 ) {                // Found CSE
        p->destruct();          // Recover useless new node
        p = p2;                 // Use old node
      } else {
        igvn.register_new_node_with_optimizer(p, old_phi);
      }
      // Make old Phi refer to new Phi.
      old_phi->add_req(p);
      // Check for the special case of making the old phi useless and
      // disappear it.  In JavaGrande I have a case where this useless
      // Phi is the loop limit and prevents recognizing a CountedLoop
      // which in turn prevents removing an empty loop.
      Node *id_old_phi = old_phi->Identity( &igvn );
      if( id_old_phi != old_phi ) { // Found a simple identity?
        // Note that I cannot call 'replace_node' here, because
        // that will yank the edge from old_phi to the Region and
        // I'm mid-iteration over the Region's uses.
        for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
          Node* use = old_phi->last_out(i);
          igvn.rehash_node_delayed(use);
          uint uses_found = 0;
          for (uint j = 0; j < use->len(); j++) {
            if (use->in(j) == old_phi) {
              if (j < use->req()) use->set_req (j, id_old_phi);
              else                use->set_prec(j, id_old_phi);
              uses_found++;
            }
          }
          i -= uses_found;    // we deleted 1 or more copies of this edge
        }
      }
      igvn._worklist.push(old_phi);
    }
  }
  // Finally clean out the fall-in edges from the RegionNode
  for( i = oreq-1; i>0; i-- ) {
    if( !phase->is_member( this, _head->in(i) ) ) {
      _head->del_req(i);
    }
  }
  // Transform landing pad
  igvn.register_new_node_with_optimizer(landing_pad, _head);
  // Insert landing pad into the header
  _head->add_req(landing_pad);
}

//------------------------------split_outer_loop-------------------------------
// Split out the outermost loop from this shared header.
void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
  PhaseIterGVN &igvn = phase->_igvn;

  // Find index of outermost loop; it should also be my tail.
  uint outer_idx = 1;
  while( _head->in(outer_idx) != _tail ) outer_idx++;

  // Make a LoopNode for the outermost loop.
  Node *ctl = _head->in(LoopNode::EntryControl);
  Node *outer = new (phase->C) LoopNode( ctl, _head->in(outer_idx) );
  outer = igvn.register_new_node_with_optimizer(outer, _head);
  phase->set_created_loop_node();

  // Outermost loop falls into '_head' loop
  _head->set_req(LoopNode::EntryControl, outer);
  _head->del_req(outer_idx);
  // Split all the Phis up between '_head' loop and 'outer' loop.
  for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
    Node *out = _head->fast_out(j);
    if( out->is_Phi() ) {
      PhiNode *old_phi = out->as_Phi();
      assert( old_phi->region() == _head, "" );
      Node *phi = PhiNode::make_blank(outer, old_phi);
      phi->init_req(LoopNode::EntryControl,    old_phi->in(LoopNode::EntryControl));
      phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
      phi = igvn.register_new_node_with_optimizer(phi, old_phi);
      // Make old Phi point to new Phi on the fall-in path
      igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
      old_phi->del_req(outer_idx);
    }
  }

  // Use the new loop head instead of the old shared one
  _head = outer;
  phase->set_loop(_head, this);
}

//------------------------------fix_parent-------------------------------------
static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) {
  loop->_parent = parent;
  if( loop->_child ) fix_parent( loop->_child, loop   );
  if( loop->_next  ) fix_parent( loop->_next , parent );
}

//------------------------------estimate_path_freq-----------------------------
static float estimate_path_freq( Node *n ) {
  // Try to extract some path frequency info
  IfNode *iff;
  for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests
    uint nop = n->Opcode();
    if( nop == Op_SafePoint ) {   // Skip any safepoint
      n = n->in(0);
      continue;
    }
    if( nop == Op_CatchProj ) {   // Get count from a prior call
      // Assume call does not always throw exceptions: means the call-site
      // count is also the frequency of the fall-through path.
      assert( n->is_CatchProj(), "" );
      if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index )
        return 0.0f;            // Assume call exception path is rare
      Node *call = n->in(0)->in(0)->in(0);
      assert( call->is_Call(), "expect a call here" );
      const JVMState *jvms = ((CallNode*)call)->jvms();
      ciMethodData* methodData = jvms->method()->method_data();
      if (!methodData->is_mature())  return 0.0f; // No call-site data
      ciProfileData* data = methodData->bci_to_data(jvms->bci());
      if ((data == NULL) || !data->is_CounterData()) {
        // no call profile available, try call's control input
        n = n->in(0);
        continue;
      }
      return data->as_CounterData()->count()/FreqCountInvocations;
    }
    // See if there's a gating IF test
    Node *n_c = n->in(0);
    if( !n_c->is_If() ) break;       // No estimate available
    iff = n_c->as_If();
    if( iff->_fcnt != COUNT_UNKNOWN )   // Have a valid count?
      // Compute how much count comes on this path
      return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt;
    // Have no count info.  Skip dull uncommon-trap like branches.
    if( (nop == Op_IfTrue  && iff->_prob < PROB_LIKELY_MAG(5)) ||
        (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) )
      break;
    // Skip through never-taken branch; look for a real loop exit.
    n = iff->in(0);
  }
  return 0.0f;                  // No estimate available
}

//------------------------------merge_many_backedges---------------------------
// Merge all the backedges from the shared header into a private Region.
// Feed that region as the one backedge to this loop.
void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) {
  uint i;

  // Scan for the top 2 hottest backedges
  float hotcnt = 0.0f;
  float warmcnt = 0.0f;
  uint hot_idx = 0;
  // Loop starts at 2 because slot 1 is the fall-in path
  for( i = 2; i < _head->req(); i++ ) {
    float cnt = estimate_path_freq(_head->in(i));
    if( cnt > hotcnt ) {       // Grab hottest path
      warmcnt = hotcnt;
      hotcnt = cnt;
      hot_idx = i;
    } else if( cnt > warmcnt ) { // And 2nd hottest path
      warmcnt = cnt;
    }
  }

  // See if the hottest backedge is worthy of being an inner loop
  // by being much hotter than the next hottest backedge.
  if( hotcnt <= 0.0001 ||
      hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge

  // Peel out the backedges into a private merge point; peel
  // them all except optionally hot_idx.
  PhaseIterGVN &igvn = phase->_igvn;

  Node *hot_tail = NULL;
  // Make a Region for the merge point
  Node *r = new (phase->C) RegionNode(1);
  for( i = 2; i < _head->req(); i++ ) {
    if( i != hot_idx )
      r->add_req( _head->in(i) );
    else hot_tail = _head->in(i);
  }
  igvn.register_new_node_with_optimizer(r, _head);
  // Plug region into end of loop _head, followed by hot_tail
  while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
  _head->set_req(2, r);
  if( hot_idx ) _head->add_req(hot_tail);

  // Split all the Phis up between '_head' loop and the Region 'r'
  for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) {
    Node *out = _head->fast_out(j);
    if( out->is_Phi() ) {
      PhiNode* n = out->as_Phi();
      igvn.hash_delete(n);      // Delete from hash before hacking edges
      Node *hot_phi = NULL;
      Node *phi = new (phase->C) PhiNode(r, n->type(), n->adr_type());
      // Check all inputs for the ones to peel out
      uint j = 1;
      for( uint i = 2; i < n->req(); i++ ) {
        if( i != hot_idx )
          phi->set_req( j++, n->in(i) );
        else hot_phi = n->in(i);
      }
      // Register the phi but do not transform until whole place transforms
      igvn.register_new_node_with_optimizer(phi, n);
      // Add the merge phi to the old Phi
      while( n->req() > 3 ) n->del_req( n->req()-1 );
      n->set_req(2, phi);
      if( hot_idx ) n->add_req(hot_phi);
    }
  }


  // Insert a new IdealLoopTree inserted below me.  Turn it into a clone
  // of self loop tree.  Turn self into a loop headed by _head and with
  // tail being the new merge point.
  IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail );
  phase->set_loop(_tail,ilt);   // Adjust tail
  _tail = r;                    // Self's tail is new merge point
  phase->set_loop(r,this);
  ilt->_child = _child;         // New guy has my children
  _child = ilt;                 // Self has new guy as only child
  ilt->_parent = this;          // new guy has self for parent
  ilt->_nest = _nest;           // Same nesting depth (for now)

  // Starting with 'ilt', look for child loop trees using the same shared
  // header.  Flatten these out; they will no longer be loops in the end.
  IdealLoopTree **pilt = &_child;
  while( ilt ) {
    if( ilt->_head == _head ) {
      uint i;
      for( i = 2; i < _head->req(); i++ )
        if( _head->in(i) == ilt->_tail )
          break;                // Still a loop
      if( i == _head->req() ) { // No longer a loop
        // Flatten ilt.  Hang ilt's "_next" list from the end of
        // ilt's '_child' list.  Move the ilt's _child up to replace ilt.
        IdealLoopTree **cp = &ilt->_child;
        while( *cp ) cp = &(*cp)->_next;   // Find end of child list
        *cp = ilt->_next;       // Hang next list at end of child list
        *pilt = ilt->_child;    // Move child up to replace ilt
        ilt->_head = NULL;      // Flag as a loop UNIONED into parent
        ilt = ilt->_child;      // Repeat using new ilt
        continue;               // do not advance over ilt->_child
      }
      assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" );
      phase->set_loop(_head,ilt);
    }
    pilt = &ilt->_child;        // Advance to next
    ilt = *pilt;
  }

  if( _child ) fix_parent( _child, this );
}

//------------------------------beautify_loops---------------------------------
// Split shared headers and insert loop landing pads.
// Insert a LoopNode to replace the RegionNode.
// Return TRUE if loop tree is structurally changed.
bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) {
  bool result = false;
  // Cache parts in locals for easy
  PhaseIterGVN &igvn = phase->_igvn;

  igvn.hash_delete(_head);      // Yank from hash before hacking edges

  // Check for multiple fall-in paths.  Peel off a landing pad if need be.
  int fall_in_cnt = 0;
  for( uint i = 1; i < _head->req(); i++ )
    if( !phase->is_member( this, _head->in(i) ) )
      fall_in_cnt++;
  assert( fall_in_cnt, "at least 1 fall-in path" );
  if( fall_in_cnt > 1 )         // Need a loop landing pad to merge fall-ins
    split_fall_in( phase, fall_in_cnt );

  // Swap inputs to the _head and all Phis to move the fall-in edge to
  // the left.
  fall_in_cnt = 1;
  while( phase->is_member( this, _head->in(fall_in_cnt) ) )
    fall_in_cnt++;
  if( fall_in_cnt > 1 ) {
    // Since I am just swapping inputs I do not need to update def-use info
    Node *tmp = _head->in(1);
    _head->set_req( 1, _head->in(fall_in_cnt) );
    _head->set_req( fall_in_cnt, tmp );
    // Swap also all Phis
    for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
      Node* phi = _head->fast_out(i);
      if( phi->is_Phi() ) {
        igvn.hash_delete(phi); // Yank from hash before hacking edges
        tmp = phi->in(1);
        phi->set_req( 1, phi->in(fall_in_cnt) );
        phi->set_req( fall_in_cnt, tmp );
      }
    }
  }
  assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" );
  assert(  phase->is_member( this, _head->in(2) ), "right edge is loop" );

  // If I am a shared header (multiple backedges), peel off the many
  // backedges into a private merge point and use the merge point as
  // the one true backedge.
  if (_head->req() > 3) {
    // Merge the many backedges into a single backedge but leave
    // the hottest backedge as separate edge for the following peel.
    if (!_irreducible) {
      merge_many_backedges( phase );
    }

    // When recursively beautify my children, split_fall_in can change
    // loop tree structure when I am an irreducible loop. Then the head
    // of my children has a req() not bigger than 3. Here we need to set
    // result to true to catch that case in order to tell the caller to
    // rebuild loop tree. See issue JDK-8244407 for details.
    result = true;
  }

  // If I have one hot backedge, peel off myself loop.
  // I better be the outermost loop.
  if (_head->req() > 3 && !_irreducible) {
    split_outer_loop( phase );
    result = true;

  } else if (!_head->is_Loop() && !_irreducible) {
    // Make a new LoopNode to replace the old loop head
    Node *l = new (phase->C) LoopNode( _head->in(1), _head->in(2) );
    l = igvn.register_new_node_with_optimizer(l, _head);
    phase->set_created_loop_node();
    // Go ahead and replace _head
    phase->_igvn.replace_node( _head, l );
    _head = l;
    phase->set_loop(_head, this);
  }

  // Now recursively beautify nested loops
  if( _child ) result |= _child->beautify_loops( phase );
  if( _next  ) result |= _next ->beautify_loops( phase );
  return result;
}

//------------------------------allpaths_check_safepts----------------------------
// Allpaths backwards scan from loop tail, terminating each path at first safepoint
// encountered.  Helper for check_safepts.
void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) {
  assert(stack.size() == 0, "empty stack");
  stack.push(_tail);
  visited.Clear();
  visited.set(_tail->_idx);
  while (stack.size() > 0) {
    Node* n = stack.pop();
    if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
      // Terminate this path
    } else if (n->Opcode() == Op_SafePoint) {
      if (_phase->get_loop(n) != this) {
        if (_required_safept == NULL) _required_safept = new Node_List();
        _required_safept->push(n);  // save the one closest to the tail
      }
      // Terminate this path
    } else {
      uint start = n->is_Region() ? 1 : 0;
      uint end   = n->is_Region() && !n->is_Loop() ? n->req() : start + 1;
      for (uint i = start; i < end; i++) {
        Node* in = n->in(i);
        assert(in->is_CFG(), "must be");
        if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) {
          stack.push(in);
        }
      }
    }
  }
}

//------------------------------check_safepts----------------------------
// Given dominators, try to find loops with calls that must always be
// executed (call dominates loop tail).  These loops do not need non-call
// safepoints (ncsfpt).
//
// A complication is that a safepoint in a inner loop may be needed
// by an outer loop. In the following, the inner loop sees it has a
// call (block 3) on every path from the head (block 2) to the
// backedge (arc 3->2).  So it deletes the ncsfpt (non-call safepoint)
// in block 2, _but_ this leaves the outer loop without a safepoint.
//
//          entry  0
//                 |
//                 v
// outer 1,2    +->1
//              |  |
//              |  v
//              |  2<---+  ncsfpt in 2
//              |_/|\   |
//                 | v  |
// inner 2,3      /  3  |  call in 3
//               /   |  |
//              v    +--+
//        exit  4
//
//
// This method creates a list (_required_safept) of ncsfpt nodes that must
// be protected is created for each loop. When a ncsfpt maybe deleted, it
// is first looked for in the lists for the outer loops of the current loop.
//
// The insights into the problem:
//  A) counted loops are okay
//  B) innermost loops are okay (only an inner loop can delete
//     a ncsfpt needed by an outer loop)
//  C) a loop is immune from an inner loop deleting a safepoint
//     if the loop has a call on the idom-path
//  D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the
//     idom-path that is not in a nested loop
//  E) otherwise, an ncsfpt on the idom-path that is nested in an inner
//     loop needs to be prevented from deletion by an inner loop
//
// There are two analyses:
//  1) The first, and cheaper one, scans the loop body from
//     tail to head following the idom (immediate dominator)
//     chain, looking for the cases (C,D,E) above.
//     Since inner loops are scanned before outer loops, there is summary
//     information about inner loops.  Inner loops can be skipped over
//     when the tail of an inner loop is encountered.
//
//  2) The second, invoked if the first fails to find a call or ncsfpt on
//     the idom path (which is rare), scans all predecessor control paths
//     from the tail to the head, terminating a path when a call or sfpt
//     is encountered, to find the ncsfpt's that are closest to the tail.
//
void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) {
  // Bottom up traversal
  IdealLoopTree* ch = _child;
  if (_child) _child->check_safepts(visited, stack);
  if (_next)  _next ->check_safepts(visited, stack);

  if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) {
    bool  has_call         = false; // call on dom-path
    bool  has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth
    Node* nonlocal_ncsfpt  = NULL;  // ncsfpt on dom-path at a deeper depth
    // Scan the dom-path nodes from tail to head
    for (Node* n = tail(); n != _head; n = _phase->idom(n)) {
      if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) {
        has_call = true;
        _has_sfpt = 1;          // Then no need for a safept!
        break;
      } else if (n->Opcode() == Op_SafePoint) {
        if (_phase->get_loop(n) == this) {
          has_local_ncsfpt = true;
          break;
        }
        if (nonlocal_ncsfpt == NULL) {
          nonlocal_ncsfpt = n; // save the one closest to the tail
        }
      } else {
        IdealLoopTree* nlpt = _phase->get_loop(n);
        if (this != nlpt) {
          // If at an inner loop tail, see if the inner loop has already
          // recorded seeing a call on the dom-path (and stop.)  If not,
          // jump to the head of the inner loop.
          assert(is_member(nlpt), "nested loop");
          Node* tail = nlpt->_tail;
          if (tail->in(0)->is_If()) tail = tail->in(0);
          if (n == tail) {
            // If inner loop has call on dom-path, so does outer loop
            if (nlpt->_has_sfpt) {
              has_call = true;
              _has_sfpt = 1;
              break;
            }
            // Skip to head of inner loop
            assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head");
            n = nlpt->_head;
          }
        }
      }
    }
    // Record safept's that this loop needs preserved when an
    // inner loop attempts to delete it's safepoints.
    if (_child != NULL && !has_call && !has_local_ncsfpt) {
      if (nonlocal_ncsfpt != NULL) {
        if (_required_safept == NULL) _required_safept = new Node_List();
        _required_safept->push(nonlocal_ncsfpt);
      } else {
        // Failed to find a suitable safept on the dom-path.  Now use
        // an all paths walk from tail to head, looking for safepoints to preserve.
        allpaths_check_safepts(visited, stack);
      }
    }
  }
}

//---------------------------is_deleteable_safept----------------------------
// Is safept not required by an outer loop?
bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) {
  assert(sfpt->Opcode() == Op_SafePoint, "");
  IdealLoopTree* lp = get_loop(sfpt)->_parent;
  while (lp != NULL) {
    Node_List* sfpts = lp->_required_safept;
    if (sfpts != NULL) {
      for (uint i = 0; i < sfpts->size(); i++) {
        if (sfpt == sfpts->at(i))
          return false;
      }
    }
    lp = lp->_parent;
  }
  return true;
}

//---------------------------replace_parallel_iv-------------------------------
// Replace parallel induction variable (parallel to trip counter)
void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
  assert(loop->_head->is_CountedLoop(), "");
  CountedLoopNode *cl = loop->_head->as_CountedLoop();
  if (!cl->is_valid_counted_loop())
    return;         // skip malformed counted loop
  Node *incr = cl->incr();
  if (incr == NULL)
    return;         // Dead loop?
  Node *init = cl->init_trip();
  Node *phi  = cl->phi();
  int stride_con = cl->stride_con();

  // Visit all children, looking for Phis
  for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
    Node *out = cl->out(i);
    // Look for other phis (secondary IVs). Skip dead ones
    if (!out->is_Phi() || out == phi || !has_node(out))
      continue;
    PhiNode* phi2 = out->as_Phi();
    Node *incr2 = phi2->in( LoopNode::LoopBackControl );
    // Look for induction variables of the form:  X += constant
    if (phi2->region() != loop->_head ||
        incr2->req() != 3 ||
        incr2->in(1) != phi2 ||
        incr2 == incr ||
        incr2->Opcode() != Op_AddI ||
        !incr2->in(2)->is_Con())
      continue;

    // Check for parallel induction variable (parallel to trip counter)
    // via an affine function.  In particular, count-down loops with
    // count-up array indices are common. We only RCE references off
    // the trip-counter, so we need to convert all these to trip-counter
    // expressions.
    Node *init2 = phi2->in( LoopNode::EntryControl );
    int stride_con2 = incr2->in(2)->get_int();

    // The ratio of the two strides cannot be represented as an int
    // if stride_con2 is min_int and stride_con is -1.
    if (stride_con2 == min_jint && stride_con == -1) {
      continue;
    }

    // The general case here gets a little tricky.  We want to find the
    // GCD of all possible parallel IV's and make a new IV using this
    // GCD for the loop.  Then all possible IVs are simple multiples of
    // the GCD.  In practice, this will cover very few extra loops.
    // Instead we require 'stride_con2' to be a multiple of 'stride_con',
    // where +/-1 is the common case, but other integer multiples are
    // also easy to handle.
    int ratio_con = stride_con2/stride_con;

    if ((ratio_con * stride_con) == stride_con2) { // Check for exact
#ifndef PRODUCT
      if (TraceLoopOpts) {
        tty->print("Parallel IV: %d ", phi2->_idx);
        loop->dump_head();
      }
#endif
      // Convert to using the trip counter.  The parallel induction
      // variable differs from the trip counter by a loop-invariant
      // amount, the difference between their respective initial values.
      // It is scaled by the 'ratio_con'.
      Node* ratio = _igvn.intcon(ratio_con);
      set_ctrl(ratio, C->root());
      Node* ratio_init = new (C) MulINode(init, ratio);
      _igvn.register_new_node_with_optimizer(ratio_init, init);
      set_early_ctrl(ratio_init);
      Node* diff = new (C) SubINode(init2, ratio_init);
      _igvn.register_new_node_with_optimizer(diff, init2);
      set_early_ctrl(diff);
      Node* ratio_idx = new (C) MulINode(phi, ratio);
      _igvn.register_new_node_with_optimizer(ratio_idx, phi);
      set_ctrl(ratio_idx, cl);
      Node* add = new (C) AddINode(ratio_idx, diff);
      _igvn.register_new_node_with_optimizer(add);
      set_ctrl(add, cl);
      _igvn.replace_node( phi2, add );
      // Sometimes an induction variable is unused
      if (add->outcnt() == 0) {
        _igvn.remove_dead_node(add);
      }
      --i; // deleted this phi; rescan starting with next position
      continue;
    }
  }
}

void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) {
  Node* keep = NULL;
  if (keep_one) {
    // Look for a safepoint on the idom-path.
    for (Node* i = tail(); i != _head; i = phase->idom(i)) {
      if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) {
        keep = i;
        break; // Found one
      }
    }
  }

  // Don't remove any safepoints if it is requested to keep a single safepoint and
  // no safepoint was found on idom-path. It is not safe to remove any safepoint
  // in this case since there's no safepoint dominating all paths in the loop body.
  bool prune = !keep_one || keep != NULL;

  // Delete other safepoints in this loop.
  Node_List* sfpts = _safepts;
  if (prune && sfpts != NULL) {
    assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint");
    for (uint i = 0; i < sfpts->size(); i++) {
      Node* n = sfpts->at(i);
      assert(phase->get_loop(n) == this, "");
      if (n != keep && phase->is_deleteable_safept(n)) {
        phase->lazy_replace(n, n->in(TypeFunc::Control));
      }
    }
  }
}

//------------------------------counted_loop-----------------------------------
// Convert to counted loops where possible
void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {

  // For grins, set the inner-loop flag here
  if (!_child) {
    if (_head->is_Loop()) _head->as_Loop()->set_inner_loop();
  }

  if (_head->is_CountedLoop() ||
      phase->is_counted_loop(_head, this)) {

    if (!UseCountedLoopSafepoints) {
      // Indicate we do not need a safepoint here
      _has_sfpt = 1;
    }

    // Remove safepoints
    bool keep_one_sfpt = !(_has_call || _has_sfpt);
    remove_safepoints(phase, keep_one_sfpt);

    // Look for induction variables
    phase->replace_parallel_iv(this);

  } else if (_parent != NULL && !_irreducible) {
    // Not a counted loop. Keep one safepoint.
    bool keep_one_sfpt = true;
    remove_safepoints(phase, keep_one_sfpt);
  }

  // Recursively
  if (_child) _child->counted_loop( phase );
  if (_next)  _next ->counted_loop( phase );
}

#ifndef PRODUCT
//------------------------------dump_head--------------------------------------
// Dump 1 liner for loop header info
void IdealLoopTree::dump_head( ) const {
  for (uint i=0; i<_nest; i++)
    tty->print("  ");
  tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx);
  if (_irreducible) tty->print(" IRREDUCIBLE");
  Node* entry = _head->in(LoopNode::EntryControl);
  if (LoopLimitCheck) {
    Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
    if (predicate != NULL ) {
      tty->print(" limit_check");
      entry = entry->in(0)->in(0);
    }
  }
  if (UseLoopPredicate) {
    entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
    if (entry != NULL) {
      tty->print(" predicated");
    }
  }
  if (_head->is_CountedLoop()) {
    CountedLoopNode *cl = _head->as_CountedLoop();
    tty->print(" counted");

    Node* init_n = cl->init_trip();
    if (init_n  != NULL &&  init_n->is_Con())
      tty->print(" [%d,", cl->init_trip()->get_int());
    else
      tty->print(" [int,");
    Node* limit_n = cl->limit();
    if (limit_n  != NULL &&  limit_n->is_Con())
      tty->print("%d),", cl->limit()->get_int());
    else
      tty->print("int),");
    int stride_con  = cl->stride_con();
    if (stride_con > 0) tty->print("+");
    tty->print("%d", stride_con);

    tty->print(" (%d iters) ", (int)cl->profile_trip_cnt());

    if (cl->is_pre_loop ()) tty->print(" pre" );
    if (cl->is_main_loop()) tty->print(" main");
    if (cl->is_post_loop()) tty->print(" post");
  }
  if (_has_call) tty->print(" has_call");
  if (_has_sfpt) tty->print(" has_sfpt");
  if (_rce_candidate) tty->print(" rce");
  if (_safepts != NULL && _safepts->size() > 0) {
    tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }");
  }
  if (_required_safept != NULL && _required_safept->size() > 0) {
    tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }");
  }
  tty->cr();
}

//------------------------------dump-------------------------------------------
// Dump loops by loop tree
void IdealLoopTree::dump( ) const {
  dump_head();
  if (_child) _child->dump();
  if (_next)  _next ->dump();
}

#endif

static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
  if (loop == root) {
    if (loop->_child != NULL) {
      log->begin_head("loop_tree");
      log->end_head();
      if( loop->_child ) log_loop_tree(root, loop->_child, log);
      log->tail("loop_tree");
      assert(loop->_next == NULL, "what?");
    }
  } else {
    Node* head = loop->_head;
    log->begin_head("loop");
    log->print(" idx='%d' ", head->_idx);
    if (loop->_irreducible) log->print("irreducible='1' ");
    if (head->is_Loop()) {
      if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' ");
      if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' ");
    }
    if (head->is_CountedLoop()) {
      CountedLoopNode* cl = head->as_CountedLoop();
      if (cl->is_pre_loop())  log->print("pre_loop='%d' ",  cl->main_idx());
      if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx);
      if (cl->is_post_loop()) log->print("post_loop='%d' ",  cl->main_idx());
    }
    log->end_head();
    if( loop->_child ) log_loop_tree(root, loop->_child, log);
    log->tail("loop");
    if( loop->_next  ) log_loop_tree(root, loop->_next, log);
  }
}

//---------------------collect_potentially_useful_predicates-----------------------
// Helper function to collect potentially useful predicates to prevent them from
// being eliminated by PhaseIdealLoop::eliminate_useless_predicates
void PhaseIdealLoop::collect_potentially_useful_predicates(
                         IdealLoopTree * loop, Unique_Node_List &useful_predicates) {
  if (loop->_child) { // child
    collect_potentially_useful_predicates(loop->_child, useful_predicates);
  }

  // self (only loops that we can apply loop predication may use their predicates)
  if (loop->_head->is_Loop() &&
      !loop->_irreducible    &&
      !loop->tail()->is_top()) {
    LoopNode* lpn = loop->_head->as_Loop();
    Node* entry = lpn->in(LoopNode::EntryControl);
    Node* predicate_proj = find_predicate(entry); // loop_limit_check first
    if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
      assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be");
      useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
      entry = entry->in(0)->in(0);
    }
    predicate_proj = find_predicate(entry); // Predicate
    if (predicate_proj != NULL ) {
      useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
    }
  }

  if (loop->_next) { // sibling
    collect_potentially_useful_predicates(loop->_next, useful_predicates);
  }
}

//------------------------eliminate_useless_predicates-----------------------------
// Eliminate all inserted predicates if they could not be used by loop predication.
// Note: it will also eliminates loop limits check predicate since it also uses
// Opaque1 node (see Parse::add_predicate()).
void PhaseIdealLoop::eliminate_useless_predicates() {
  if (C->predicate_count() == 0)
    return; // no predicate left

  Unique_Node_List useful_predicates; // to store useful predicates
  if (C->has_loops()) {
    collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates);
  }

  for (int i = C->predicate_count(); i > 0; i--) {
     Node * n = C->predicate_opaque1_node(i-1);
     assert(n->Opcode() == Op_Opaque1, "must be");
     if (!useful_predicates.member(n)) { // not in the useful list
       _igvn.replace_node(n, n->in(1));
     }
  }
}

//------------------------process_expensive_nodes-----------------------------
// Expensive nodes have their control input set to prevent the GVN
// from commoning them and as a result forcing the resulting node to
// be in a more frequent path. Use CFG information here, to change the
// control inputs so that some expensive nodes can be commoned while
// not executed more frequently.
bool PhaseIdealLoop::process_expensive_nodes() {
  assert(OptimizeExpensiveOps, "optimization off?");

  // Sort nodes to bring similar nodes together
  C->sort_expensive_nodes();

  bool progress = false;

  for (int i = 0; i < C->expensive_count(); ) {
    Node* n = C->expensive_node(i);
    int start = i;
    // Find nodes similar to n
    i++;
    for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
    int end = i;
    // And compare them two by two
    for (int j = start; j < end; j++) {
      Node* n1 = C->expensive_node(j);
      if (is_node_unreachable(n1)) {
        continue;
      }
      for (int k = j+1; k < end; k++) {
        Node* n2 = C->expensive_node(k);
        if (is_node_unreachable(n2)) {
          continue;
        }

        assert(n1 != n2, "should be pair of nodes");

        Node* c1 = n1->in(0);
        Node* c2 = n2->in(0);

        Node* parent_c1 = c1;
        Node* parent_c2 = c2;

        // The call to get_early_ctrl_for_expensive() moves the
        // expensive nodes up but stops at loops that are in a if
        // branch. See whether we can exit the loop and move above the
        // If.
        if (c1->is_Loop()) {
          parent_c1 = c1->in(1);
        }
        if (c2->is_Loop()) {
          parent_c2 = c2->in(1);
        }

        if (parent_c1 == parent_c2) {
          _igvn._worklist.push(n1);
          _igvn._worklist.push(n2);
          continue;
        }

        // Look for identical expensive node up the dominator chain.
        if (is_dominator(c1, c2)) {
          c2 = c1;
        } else if (is_dominator(c2, c1)) {
          c1 = c2;
        } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
                   parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
          // Both branches have the same expensive node so move it up
          // before the if.
          c1 = c2 = idom(parent_c1->in(0));
        }
        // Do the actual moves
        if (n1->in(0) != c1) {
          _igvn.hash_delete(n1);
          n1->set_req(0, c1);
          _igvn.hash_insert(n1);
          _igvn._worklist.push(n1);
          progress = true;
        }
        if (n2->in(0) != c2) {
          _igvn.hash_delete(n2);
          n2->set_req(0, c2);
          _igvn.hash_insert(n2);
          _igvn._worklist.push(n2);
          progress = true;
        }
      }
    }
  }

  return progress;
}


//=============================================================================
//----------------------------build_and_optimize-------------------------------
// Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
// its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts) {
  ResourceMark rm;

  int old_progress = C->major_progress();
  uint orig_worklist_size = _igvn._worklist.size();

  // Reset major-progress flag for the driver's heuristics
  C->clear_major_progress();

#ifndef PRODUCT
  // Capture for later assert
  uint unique = C->unique();
  _loop_invokes++;
  _loop_work += unique;
#endif

  // True if the method has at least 1 irreducible loop
  _has_irreducible_loops = false;

  _created_loop_node = false;

  Arena *a = Thread::current()->resource_area();
  VectorSet visited(a);
  // Pre-grow the mapping from Nodes to IdealLoopTrees.
  _nodes.map(C->unique(), NULL);
  memset(_nodes.adr(), 0, wordSize * C->unique());

  // Pre-build the top-level outermost loop tree entry
  _ltree_root = new IdealLoopTree( this, C->root(), C->root() );
  // Do not need a safepoint at the top level
  _ltree_root->_has_sfpt = 1;

  // Initialize Dominators.
  // Checked in clone_loop_predicate() during beautify_loops().
  _idom_size = 0;
  _idom      = NULL;
  _dom_depth = NULL;
  _dom_stk   = NULL;

  // Empty pre-order array
  allocate_preorders();

  // Build a loop tree on the fly.  Build a mapping from CFG nodes to
  // IdealLoopTree entries.  Data nodes are NOT walked.
  build_loop_tree();
  // Check for bailout, and return
  if (C->failing()) {
    return;
  }

  // No loops after all
  if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);

  // There should always be an outer loop containing the Root and Return nodes.
  // If not, we have a degenerate empty program.  Bail out in this case.
  if (!has_node(C->root())) {
    if (!_verify_only) {
      C->clear_major_progress();
      C->record_method_not_compilable("empty program detected during loop optimization");
    }
    return;
  }

  // Nothing to do, so get out
  bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only;
  bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
  if (stop_early && !do_expensive_nodes) {
    _igvn.optimize();           // Cleanup NeverBranches
    return;
  }

  // Set loop nesting depth
  _ltree_root->set_nest( 0 );

  // Split shared headers and insert loop landing pads.
  // Do not bother doing this on the Root loop of course.
  if( !_verify_me && !_verify_only && _ltree_root->_child ) {
    C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
    if( _ltree_root->_child->beautify_loops( this ) ) {
      // Re-build loop tree!
      _ltree_root->_child = NULL;
      _nodes.clear();
      reallocate_preorders();
      build_loop_tree();
      // Check for bailout, and return
      if (C->failing()) {
        return;
      }
      // Reset loop nesting depth
      _ltree_root->set_nest( 0 );

      C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
    }
  }

  // Build Dominators for elision of NULL checks & loop finding.
  // Since nodes do not have a slot for immediate dominator, make
  // a persistent side array for that info indexed on node->_idx.
  _idom_size = C->unique();
  _idom      = NEW_RESOURCE_ARRAY( Node*, _idom_size );
  _dom_depth = NEW_RESOURCE_ARRAY( uint,  _idom_size );
  _dom_stk   = NULL; // Allocated on demand in recompute_dom_depth
  memset( _dom_depth, 0, _idom_size * sizeof(uint) );

  Dominators();

  if (!_verify_only) {
    // As a side effect, Dominators removed any unreachable CFG paths
    // into RegionNodes.  It doesn't do this test against Root, so
    // we do it here.
    for( uint i = 1; i < C->root()->req(); i++ ) {
      if( !_nodes[C->root()->in(i)->_idx] ) {    // Dead path into Root?
        _igvn.delete_input_of(C->root(), i);
        i--;                      // Rerun same iteration on compressed edges
      }
    }

    // Given dominators, try to find inner loops with calls that must
    // always be executed (call dominates loop tail).  These loops do
    // not need a separate safepoint.
    Node_List cisstack(a);
    _ltree_root->check_safepts(visited, cisstack);
  }

  // Walk the DATA nodes and place into loops.  Find earliest control
  // node.  For CFG nodes, the _nodes array starts out and remains
  // holding the associated IdealLoopTree pointer.  For DATA nodes, the
  // _nodes array holds the earliest legal controlling CFG node.

  // Allocate stack with enough space to avoid frequent realloc
  int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
  Node_Stack nstack( a, stack_size );

  visited.Clear();
  Node_List worklist(a);
  // Don't need C->root() on worklist since
  // it will be processed among C->top() inputs
  worklist.push( C->top() );
  visited.set( C->top()->_idx ); // Set C->top() as visited now
  build_loop_early( visited, worklist, nstack );

  // Given early legal placement, try finding counted loops.  This placement
  // is good enough to discover most loop invariants.
  if( !_verify_me && !_verify_only )
    _ltree_root->counted_loop( this );

  // Find latest loop placement.  Find ideal loop placement.
  visited.Clear();
  init_dom_lca_tags();
  // Need C->root() on worklist when processing outs
  worklist.push( C->root() );
  NOT_PRODUCT( C->verify_graph_edges(); )
  worklist.push( C->top() );
  build_loop_late( visited, worklist, nstack );

  if (_verify_only) {
    // restore major progress flag
    for (int i = 0; i < old_progress; i++)
      C->set_major_progress();
    assert(C->unique() == unique, "verification mode made Nodes? ? ?");
    assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
    return;
  }

  // clear out the dead code after build_loop_late
  while (_deadlist.size()) {
    _igvn.remove_globally_dead_node(_deadlist.pop());
  }

  if (stop_early) {
    assert(do_expensive_nodes, "why are we here?");
    if (process_expensive_nodes()) {
      // If we made some progress when processing expensive nodes then
      // the IGVN may modify the graph in a way that will allow us to
      // make some more progress: we need to try processing expensive
      // nodes again.
      C->set_major_progress();
    }
    _igvn.optimize();
    return;
  }

  // Some parser-inserted loop predicates could never be used by loop
  // predication or they were moved away from loop during some optimizations.
  // For example, peeling. Eliminate them before next loop optimizations.
  if (UseLoopPredicate || LoopLimitCheck) {
    eliminate_useless_predicates();
  }

#ifndef PRODUCT
  C->verify_graph_edges();
  if (_verify_me) {             // Nested verify pass?
    // Check to see if the verify mode is broken
    assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?");
    return;
  }
  if(VerifyLoopOptimizations) verify();
  if(TraceLoopOpts && C->has_loops()) {
    _ltree_root->dump();
  }
#endif

  if (skip_loop_opts) {
    // restore major progress flag
    for (int i = 0; i < old_progress; i++) {
      C->set_major_progress();
    }

    // Cleanup any modified bits
    _igvn.optimize();

    if (C->log() != NULL) {
      log_loop_tree(_ltree_root, _ltree_root, C->log());
    }
    return;
  }

  if (ReassociateInvariants) {
    // Reassociate invariants and prep for split_thru_phi
    for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
      IdealLoopTree* lpt = iter.current();
      if (!lpt->is_counted() || !lpt->is_inner()) continue;

      lpt->reassociate_invariants(this);

      // Because RCE opportunities can be masked by split_thru_phi,
      // look for RCE candidates and inhibit split_thru_phi
      // on just their loop-phi's for this pass of loop opts
      if (SplitIfBlocks && do_split_ifs) {
        if (lpt->policy_range_check(this)) {
          lpt->_rce_candidate = 1; // = true
        }
      }
    }
  }

  // Check for aggressive application of split-if and other transforms
  // that require basic-block info (like cloning through Phi's)
  if( SplitIfBlocks && do_split_ifs ) {
    visited.Clear();
    split_if_with_blocks( visited, nstack );
    NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
  }

  if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
    C->set_major_progress();
  }

  // Perform loop predication before iteration splitting
  if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) {
    _ltree_root->_child->loop_predication(this);
  }

  if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) {
    if (do_intrinsify_fill()) {
      C->set_major_progress();
    }
  }

  // Perform iteration-splitting on inner loops.  Split iterations to avoid
  // range checks or one-shot null checks.

  // If split-if's didn't hack the graph too bad (no CFG changes)
  // then do loop opts.
  if (C->has_loops() && !C->major_progress()) {
    memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
    _ltree_root->_child->iteration_split( this, worklist );
    // No verify after peeling!  GCM has hoisted code out of the loop.
    // After peeling, the hoisted code could sink inside the peeled area.
    // The peeling code does not try to recompute the best location for
    // all the code before the peeled area, so the verify pass will always
    // complain about it.
  }
  // Do verify graph edges in any case
  NOT_PRODUCT( C->verify_graph_edges(); );

  if (!do_split_ifs) {
    // We saw major progress in Split-If to get here.  We forced a
    // pass with unrolling and not split-if, however more split-if's
    // might make progress.  If the unrolling didn't make progress
    // then the major-progress flag got cleared and we won't try
    // another round of Split-If.  In particular the ever-common
    // instance-of/check-cast pattern requires at least 2 rounds of
    // Split-If to clear out.
    C->set_major_progress();
  }

  // Repeat loop optimizations if new loops were seen
  if (created_loop_node()) {
    C->set_major_progress();
  }

  // Keep loop predicates and perform optimizations with them
  // until no more loop optimizations could be done.
  // After that switch predicates off and do more loop optimizations.
  if (!C->major_progress() && (C->predicate_count() > 0)) {
     C->cleanup_loop_predicates(_igvn);
#ifndef PRODUCT
     if (TraceLoopOpts) {
       tty->print_cr("PredicatesOff");
     }
#endif
     C->set_major_progress();
  }

  // Convert scalar to superword operations at the end of all loop opts.
  if (UseSuperWord && C->has_loops() && !C->major_progress()) {
    // SuperWord transform
    SuperWord sw(this);
    for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
      IdealLoopTree* lpt = iter.current();
      if (lpt->is_counted()) {
        sw.transform_loop(lpt);
      }
    }
  }

  // Cleanup any modified bits
  _igvn.optimize();

  // disable assert until issue with split_flow_path is resolved (6742111)
  // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(),
  //        "shouldn't introduce irreducible loops");

  if (C->log() != NULL) {
    log_loop_tree(_ltree_root, _ltree_root, C->log());
  }
}

#ifndef PRODUCT
//------------------------------print_statistics-------------------------------
int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes
int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique
void PhaseIdealLoop::print_statistics() {
  tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d", _loop_invokes, _loop_work);
}

//------------------------------verify-----------------------------------------
// Build a verify-only PhaseIdealLoop, and see that it agrees with me.
static int fail;                // debug only, so its multi-thread dont care
void PhaseIdealLoop::verify() const {
  int old_progress = C->major_progress();
  ResourceMark rm;
  PhaseIdealLoop loop_verify( _igvn, this );
  VectorSet visited(Thread::current()->resource_area());

  fail = 0;
  verify_compare( C->root(), &loop_verify, visited );
  assert( fail == 0, "verify loops failed" );
  // Verify loop structure is the same
  _ltree_root->verify_tree(loop_verify._ltree_root, NULL);
  // Reset major-progress.  It was cleared by creating a verify version of
  // PhaseIdealLoop.
  for( int i=0; i<old_progress; i++ )
    C->set_major_progress();
}

//------------------------------verify_compare---------------------------------
// Make sure me and the given PhaseIdealLoop agree on key data structures
void PhaseIdealLoop::verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const {
  if( !n ) return;
  if( visited.test_set( n->_idx ) ) return;
  if( !_nodes[n->_idx] ) {      // Unreachable
    assert( !loop_verify->_nodes[n->_idx], "both should be unreachable" );
    return;
  }

  uint i;
  for( i = 0; i < n->req(); i++ )
    verify_compare( n->in(i), loop_verify, visited );

  // Check the '_nodes' block/loop structure
  i = n->_idx;
  if( has_ctrl(n) ) {           // We have control; verify has loop or ctrl
    if( _nodes[i] != loop_verify->_nodes[i] &&
        get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) {
      tty->print("Mismatched control setting for: ");
      n->dump();
      if( fail++ > 10 ) return;
      Node *c = get_ctrl_no_update(n);
      tty->print("We have it as: ");
      if( c->in(0) ) c->dump();
        else tty->print_cr("N%d",c->_idx);
      tty->print("Verify thinks: ");
      if( loop_verify->has_ctrl(n) )
        loop_verify->get_ctrl_no_update(n)->dump();
      else
        loop_verify->get_loop_idx(n)->dump();
      tty->cr();
    }
  } else {                    // We have a loop
    IdealLoopTree *us = get_loop_idx(n);
    if( loop_verify->has_ctrl(n) ) {
      tty->print("Mismatched loop setting for: ");
      n->dump();
      if( fail++ > 10 ) return;
      tty->print("We have it as: ");
      us->dump();
      tty->print("Verify thinks: ");
      loop_verify->get_ctrl_no_update(n)->dump();
      tty->cr();
    } else if (!C->major_progress()) {
      // Loop selection can be messed up if we did a major progress
      // operation, like split-if.  Do not verify in that case.
      IdealLoopTree *them = loop_verify->get_loop_idx(n);
      if( us->_head != them->_head ||  us->_tail != them->_tail ) {
        tty->print("Unequals loops for: ");
        n->dump();
        if( fail++ > 10 ) return;
        tty->print("We have it as: ");
        us->dump();
        tty->print("Verify thinks: ");
        them->dump();
        tty->cr();
      }
    }
  }

  // Check for immediate dominators being equal
  if( i >= _idom_size ) {
    if( !n->is_CFG() ) return;
    tty->print("CFG Node with no idom: ");
    n->dump();
    return;
  }
  if( !n->is_CFG() ) return;
  if( n == C->root() ) return; // No IDOM here

  assert(n->_idx == i, "sanity");
  Node *id = idom_no_update(n);
  if( id != loop_verify->idom_no_update(n) ) {
    tty->print("Unequals idoms for: ");
    n->dump();
    if( fail++ > 10 ) return;
    tty->print("We have it as: ");
    id->dump();
    tty->print("Verify thinks: ");
    loop_verify->idom_no_update(n)->dump();
    tty->cr();
  }

}

//------------------------------verify_tree------------------------------------
// Verify that tree structures match.  Because the CFG can change, siblings
// within the loop tree can be reordered.  We attempt to deal with that by
// reordering the verify's loop tree if possible.
void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const {
  assert( _parent == parent, "Badly formed loop tree" );

  // Siblings not in same order?  Attempt to re-order.
  if( _head != loop->_head ) {
    // Find _next pointer to update
    IdealLoopTree **pp = &loop->_parent->_child;
    while( *pp != loop )
      pp = &((*pp)->_next);
    // Find proper sibling to be next
    IdealLoopTree **nn = &loop->_next;
    while( (*nn) && (*nn)->_head != _head )
      nn = &((*nn)->_next);

    // Check for no match.
    if( !(*nn) ) {
      // Annoyingly, irreducible loops can pick different headers
      // after a major_progress operation, so the rest of the loop
      // tree cannot be matched.
      if (_irreducible && Compile::current()->major_progress())  return;
      assert( 0, "failed to match loop tree" );
    }

    // Move (*nn) to (*pp)
    IdealLoopTree *hit = *nn;
    *nn = hit->_next;
    hit->_next = loop;
    *pp = loop;
    loop = hit;
    // Now try again to verify
  }

  assert( _head  == loop->_head , "mismatched loop head" );
  Node *tail = _tail;           // Inline a non-updating version of
  while( !tail->in(0) )         // the 'tail()' call.
    tail = tail->in(1);
  assert( tail == loop->_tail, "mismatched loop tail" );

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值