ssssssss19


void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
  if (ic->is_icholder_call()) {
    // The only exception is compiledICHolder oops which may
    // yet be marked below. (We check this further below).
    CompiledICHolder* cichk_oop = ic->cached_icholder();

    if (mark_on_stack) {
      Metadata::mark_on_stack(cichk_oop->holder_metadata());
      Metadata::mark_on_stack(cichk_oop->holder_klass());
    }

    if (cichk_oop->is_loader_alive(is_alive)) {
      return;
    }
  } else {
    Metadata* ic_oop = ic->cached_metadata();
    if (ic_oop != NULL) {
      if (mark_on_stack) {
        Metadata::mark_on_stack(ic_oop);
      }

      if (ic_oop->is_klass()) {
        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
          return;
        }
      } else if (ic_oop->is_method()) {
        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
          return;
        }
      } else {
        ShouldNotReachHere();
      }
    }
  }

  ic->set_to_clean();
}

// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops.

void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
  // Make sure the oop's ready to receive visitors
  assert(!is_zombie() && !is_unloaded(),
         "should not call follow on zombie or unloaded nmethod");

  // If the method is not entrant then a JMP is plastered over the
  // first few bytes.  If an oop in the old code was there, that oop
  // should not get GC'd.  Skip the first few bytes of oops on
  // not-entrant methods.
  address low_boundary = verified_entry_point();
  if (is_not_entrant()) {
    low_boundary += NativeJump::instruction_size;
    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
    // (See comment above.)
  }

  // The RedefineClasses() API can cause the class unloading invariant
  // to no longer be true. See jvmtiExport.hpp for details.
  // Also, leave a debugging breadcrumb in local flag.
  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  if (a_class_was_redefined) {
    // This set of the unloading_occurred flag is done before the
    // call to post_compiled_method_unload() so that the unloading
    // of this nmethod is reported.
    unloading_occurred = true;
  }

  // Exception cache
  clean_exception_cache(is_alive);

  // If class unloading occurred we first iterate over all inline caches and
  // clear ICs where the cached oop is referring to an unloaded klass or method.
  // The remaining live cached oops will be traversed in the relocInfo::oop_type
  // iteration below.
  if (unloading_occurred) {
    RelocIterator iter(this, low_boundary);
    while(iter.next()) {
      if (iter.type() == relocInfo::virtual_call_type) {
        CompiledIC *ic = CompiledIC_at(&iter);
        clean_ic_if_metadata_is_dead(ic, is_alive, false);
      }
    }
  }

  // Compiled code
  {
  RelocIterator iter(this, low_boundary);
  while (iter.next()) {
    if (iter.type() == relocInfo::oop_type) {
      oop_Relocation* r = iter.oop_reloc();
      // In this loop, we must only traverse those oops directly embedded in
      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
      assert(1 == (r->oop_is_immediate()) +
                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
             "oop must be found in exactly one place");
      if (r->oop_is_immediate() && r->oop_value() != NULL) {
        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
          return;
        }
      }
    }
  }
  }


  // Scopes
  for (oop* p = oops_begin(); p < oops_end(); p++) {
    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
    if (can_unload(is_alive, p, unloading_occurred)) {
      return;
    }
  }

  // Ensure that all metadata is still alive
  verify_metadata_loaders(low_boundary, is_alive);
}

template <class CompiledICorStaticCall>
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
  // Ok, to lookup references to zombies here
  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
  if (cb != NULL && cb->is_nmethod()) {
    nmethod* nm = (nmethod*)cb;

    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
      // The nmethod has not been processed yet.
      return true;
    }

    // Clean inline caches pointing to both zombie and not_entrant methods
    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
      ic->set_to_clean();
      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
    }
  }

  return false;
}

static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
}

static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
}

bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
  assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");

  oop_Relocation* r = iter_at_oop->oop_reloc();
  // Traverse those oops directly embedded in the code.
  // Other oops (oop_index>0) are seen as part of scopes_oops.
  assert(1 == (r->oop_is_immediate()) +
         (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
         "oop must be found in exactly one place");
  if (r->oop_is_immediate() && r->oop_value() != NULL) {
    // Unload this nmethod if the oop is dead.
    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
      return true;;
    }
  }

  return false;
}

void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
  assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");

  metadata_Relocation* r = iter_at_metadata->metadata_reloc();
  // In this metadata, we must only follow those metadatas directly embedded in
  // the code.  Other metadatas (oop_index>0) are seen as part of
  // the metadata section below.
  assert(1 == (r->metadata_is_immediate()) +
         (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
         "metadata must be found in exactly one place");
  if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
    Metadata* md = r->metadata_value();
    if (md != _method) Metadata::mark_on_stack(md);
  }
}

void nmethod::mark_metadata_on_stack_non_relocs() {
    // Visit the metadata section
    for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
      if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
      Metadata* md = *p;
      Metadata::mark_on_stack(md);
    }

    // Visit metadata not embedded in the other places.
    if (_method != NULL) Metadata::mark_on_stack(_method);
}

bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
  ResourceMark rm;

  // Make sure the oop's ready to receive visitors
  assert(!is_zombie() && !is_unloaded(),
         "should not call follow on zombie or unloaded nmethod");

  // If the method is not entrant then a JMP is plastered over the
  // first few bytes.  If an oop in the old code was there, that oop
  // should not get GC'd.  Skip the first few bytes of oops on
  // not-entrant methods.
  address low_boundary = verified_entry_point();
  if (is_not_entrant()) {
    low_boundary += NativeJump::instruction_size;
    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
    // (See comment above.)
  }

  // The RedefineClasses() API can cause the class unloading invariant
  // to no longer be true. See jvmtiExport.hpp for details.
  // Also, leave a debugging breadcrumb in local flag.
  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
  if (a_class_was_redefined) {
    // This set of the unloading_occurred flag is done before the
    // call to post_compiled_method_unload() so that the unloading
    // of this nmethod is reported.
    unloading_occurred = true;
  }

  // When class redefinition is used all metadata in the CodeCache has to be recorded,
  // so that unused "previous versions" can be purged. Since walking the CodeCache can
  // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
  bool mark_metadata_on_stack = a_class_was_redefined;

  // Exception cache
  clean_exception_cache(is_alive);

  bool is_unloaded = false;
  bool postponed = false;

  RelocIterator iter(this, low_boundary);
  while(iter.next()) {

    switch (iter.type()) {

    case relocInfo::virtual_call_type:
      if (unloading_occurred) {
        // If class unloading occurred we first iterate over all inline caches and
        // clear ICs where the cached oop is referring to an unloaded klass or method.
        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
      }

      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
      break;

    case relocInfo::opt_virtual_call_type:
      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
      break;

    case relocInfo::static_call_type:
      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
      break;

    case relocInfo::oop_type:
      if (!is_unloaded) {
        is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
      }
      break;

    case relocInfo::metadata_type:
      if (mark_metadata_on_stack) {
        mark_metadata_on_stack_at(&iter);
      }
    }
  }

  if (mark_metadata_on_stack) {
    mark_metadata_on_stack_non_relocs();
  }

  if (is_unloaded) {
    return postponed;
  }

  // Scopes
  for (oop* p = oops_begin(); p < oops_end(); p++) {
    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
    if (can_unload(is_alive, p, unloading_occurred)) {
      is_unloaded = true;
      break;
    }
  }

  if (is_unloaded) {
    return postponed;
  }

  // Ensure that all metadata is still alive
  verify_metadata_loaders(low_boundary, is_alive);

  return postponed;
}

void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
  ResourceMark rm;

  // Make sure the oop's ready to receive visitors
  assert(!is_zombie(),
         "should not call follow on zombie nmethod");

  // If the method is not entrant then a JMP is plastered over the
  // first few bytes.  If an oop in the old code was there, that oop
  // should not get GC'd.  Skip the first few bytes of oops on
  // not-entrant methods.
  address low_boundary = verified_entry_point();
  if (is_not_entrant()) {
    low_boundary += NativeJump::instruction_size;
    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
    // (See comment above.)
  }

  RelocIterator iter(this, low_boundary);
  while(iter.next()) {

    switch (iter.type()) {

    case relocInfo::virtual_call_type:
      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
      break;

    case relocInfo::opt_virtual_call_type:
      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
      break;

    case relocInfo::static_call_type:
      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
      break;
    }
  }
}

#ifdef ASSERT

class CheckClass : AllStatic {
  static BoolObjectClosure* _is_alive;

  // Check class_loader is alive for this bit of metadata.
  static void check_class(Metadata* md) {
    Klass* klass = NULL;
    if (md->is_klass()) {
      klass = ((Klass*)md);
    } else if (md->is_method()) {
      klass = ((Method*)md)->method_holder();
    } else if (md->is_methodData()) {
      klass = ((MethodData*)md)->method()->method_holder();
    } else {
      md->print();
      ShouldNotReachHere();
    }
    assert(klass->is_loader_alive(_is_alive), "must be alive");
  }
 public:
  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
    _is_alive = is_alive;
    nm->metadata_do(check_class);
  }
};

// This is called during a safepoint so can use static data
BoolObjectClosure* CheckClass::_is_alive = NULL;
#endif // ASSERT


// Processing of oop references should have been sufficient to keep
// all strong references alive.  Any weak references should have been
// cleared as well.  Visit all the metadata and ensure that it's
// really alive.
void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
#ifdef ASSERT
    RelocIterator iter(this, low_boundary);
    while (iter.next()) {
    // static_stub_Relocations may have dangling references to
    // Method*s so trim them out here.  Otherwise it looks like
    // compiled code is maintaining a link to dead metadata.
    address static_call_addr = NULL;
    if (iter.type() == relocInfo::opt_virtual_call_type) {
      CompiledIC* cic = CompiledIC_at(&iter);
      if (!cic->is_call_to_interpreted()) {
        static_call_addr = iter.addr();
      }
    } else if (iter.type() == relocInfo::static_call_type) {
      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
      if (!csc->is_call_to_interpreted()) {
        static_call_addr = iter.addr();
      }
    }
    if (static_call_addr != NULL) {
      RelocIterator sciter(this, low_boundary);
      while (sciter.next()) {
        if (sciter.type() == relocInfo::static_stub_type &&
            sciter.static_stub_reloc()->static_call() == static_call_addr) {
          sciter.static_stub_reloc()->clear_inline_cache();
        }
      }
    }
  }
  // Check that the metadata embedded in the nmethod is alive
  CheckClass::do_check_class(is_alive, this);
#endif
}


// Iterate over metadata calling this function.   Used by RedefineClasses
void nmethod::metadata_do(void f(Metadata*)) {
  address low_boundary = verified_entry_point();
  if (is_not_entrant()) {
    low_boundary += NativeJump::instruction_size;
    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
    // (See comment above.)
  }
  {
    // Visit all immediate references that are embedded in the instruction stream.
    RelocIterator iter(this, low_boundary);
    while (iter.next()) {
      if (iter.type() == relocInfo::metadata_type ) {
        metadata_Relocation* r = iter.metadata_reloc();
        // In this metadata, we must only follow those metadatas directly embedded in
        // the code.  Other metadatas (oop_index>0) are seen as part of
        // the metadata section below.
        assert(1 == (r->metadata_is_immediate()) +
               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
               "metadata must be found in exactly one place");
        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
          Metadata* md = r->metadata_value();
          if (md != _method) f(md);
        }
      } else if (iter.type() == relocInfo::virtual_call_type) {
        // Check compiledIC holders associated with this nmethod
        ResourceMark rm;
        CompiledIC *ic = CompiledIC_at(&iter);
        if (ic->is_icholder_call()) {
          CompiledICHolder* cichk = ic->cached_icholder();
          f(cichk->holder_metadata());
          f(cichk->holder_klass());
        } else {
          Metadata* ic_oop = ic->cached_metadata();
          if (ic_oop != NULL) {
            f(ic_oop);
          }
        }
      }
    }
  }

  // Visit the metadata section
  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
    Metadata* md = *p;
    f(md);
  }

  // Call function Method*, not embedded in these other places.
  if (_method != NULL) f(_method);
}

void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
  // make sure the oops ready to receive visitors
  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
  assert(!is_unloaded(), "should not call follow on unloaded nmethod");

  // If the method is not entrant or zombie then a JMP is plastered over the
  // first few bytes.  If an oop in the old code was there, that oop
  // should not get GC'd.  Skip the first few bytes of oops on
  // not-entrant methods.
  address low_boundary = verified_entry_point();
  if (is_not_entrant()) {
    low_boundary += NativeJump::instruction_size;
    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
    // (See comment above.)
  }

  RelocIterator iter(this, low_boundary);

  while (iter.next()) {
    if (iter.type() == relocInfo::oop_type ) {
      oop_Relocation* r = iter.oop_reloc();
      // In this loop, we must only follow those oops directly embedded in
      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
      assert(1 == (r->oop_is_immediate()) +
                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
             "oop must be found in exactly one place");
      if (r->oop_is_immediate() && r->oop_value() != NULL) {
        f->do_oop(r->oop_addr());
      }
    }
  }

  // Scopes
  // This includes oop constants not inlined in the code stream.
  for (oop* p = oops_begin(); p < oops_end(); p++) {
    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
    f->do_oop(p);
  }
}

#define NMETHOD_SENTINEL ((nmethod*)badAddress)

nmethod* volatile nmethod::_oops_do_mark_nmethods;

// An nmethod is "marked" if its _mark_link is set non-null.
// Even if it is the end of the linked list, it will have a non-null link value,
// as long as it is on the list.
// This code must be MP safe, because it is used from parallel GC passes.
bool nmethod::test_set_oops_do_mark() {
  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
  nmethod* observed_mark_link = _oops_do_mark_link;
  if (observed_mark_link == NULL) {
    // Claim this nmethod for this thread to mark.
    observed_mark_link = (nmethod*)
      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
    if (observed_mark_link == NULL) {

      // Atomically append this nmethod (now claimed) to the head of the list:
      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
      for (;;) {
        nmethod* required_mark_nmethods = observed_mark_nmethods;
        _oops_do_mark_link = required_mark_nmethods;
        observed_mark_nmethods = (nmethod*)
          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
        if (observed_mark_nmethods == required_mark_nmethods)
          break;
      }
      // Mark was clear when we first saw this guy.
      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
      return false;
    }
  }
  // On fall through, another racing thread marked this nmethod before we did.
  return true;
}

void nmethod::oops_do_marking_prologue() {
  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
  // We use cmpxchg_ptr instead of regular assignment here because the user
  // may fork a bunch of threads, and we need them all to see the same state.
  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
  guarantee(observed == NULL, "no races in this sequential code");
}

void nmethod::oops_do_marking_epilogue() {
  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
  nmethod* cur = _oops_do_mark_nmethods;
  while (cur != NMETHOD_SENTINEL) {
    assert(cur != NULL, "not NULL-terminated");
    nmethod* next = cur->_oops_do_mark_link;
    cur->_oops_do_mark_link = NULL;
    DEBUG_ONLY(cur->verify_oop_relocations());
    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
    cur = next;
  }
  void* required = _oops_do_mark_nmethods;
  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
  guarantee(observed == required, "no races in this sequential code");
  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
}

class DetectScavengeRoot: public OopClosure {
  bool     _detected_scavenge_root;
public:
  DetectScavengeRoot() : _detected_scavenge_root(false)
  { NOT_PRODUCT(_print_nm = NULL); }
  bool detected_scavenge_root() { return _detected_scavenge_root; }
  virtual void do_oop(oop* p) {
    if ((*p) != NULL && (*p)->is_scavengable()) {
      NOT_PRODUCT(maybe_print(p));
      _detected_scavenge_root = true;
    }
  }
  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }

#ifndef PRODUCT
  nmethod* _print_nm;
  void maybe_print(oop* p) {
    if (_print_nm == NULL)  return;
    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
    tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
                  (void *)(*p), (intptr_t)p);
    (*p)->print();
  }
#endif //PRODUCT
};

bool nmethod::detect_scavenge_root_oops() {
  DetectScavengeRoot detect_scavenge_root;
  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
  oops_do(&detect_scavenge_root);
  return detect_scavenge_root.detected_scavenge_root();
}

// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
#ifndef SHARK
  if (!method()->is_native()) {
    SimpleScopeDesc ssd(this, fr.pc());
    Bytecode_invoke call(ssd.method(), ssd.bci());
    bool has_receiver = call.has_receiver();
    bool has_appendix = call.has_appendix();
    Symbol* signature = call.signature();
    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
  }
#endif // !SHARK
}


oop nmethod::embeddedOop_at(u_char* p) {
  RelocIterator iter(this, p, p + 1);
  while (iter.next())
    if (iter.type() == relocInfo::oop_type) {
      return iter.oop_reloc()->oop_value();
    }
  return NULL;
}


inline bool includes(void* p, void* from, void* to) {
  return from <= p && p < to;
}


void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
  assert(count >= 2, "must be sentinel values, at least");

#ifdef ASSERT
  // must be sorted and unique; we do a binary search in find_pc_desc()
  int prev_offset = pcs[0].pc_offset();
  assert(prev_offset == PcDesc::lower_offset_limit,
         "must start with a sentinel");
  for (int i = 1; i < count; i++) {
    int this_offset = pcs[i].pc_offset();
    assert(this_offset > prev_offset, "offsets must be sorted");
    prev_offset = this_offset;
  }
  assert(prev_offset == PcDesc::upper_offset_limit,
         "must end with a sentinel");
#endif //ASSERT

  // Search for MethodHandle invokes and tag the nmethod.
  for (int i = 0; i < count; i++) {
    if (pcs[i].is_method_handle_invoke()) {
      set_has_method_handle_invokes(true);
      break;
    }
  }
  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");

  int size = count * sizeof(PcDesc);
  assert(scopes_pcs_size() >= size, "oob");
  memcpy(scopes_pcs_begin(), pcs, size);

  // Adjust the final sentinel downward.
  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
  last_pc->set_pc_offset(content_size() + 1);
  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
    // Fill any rounding gaps with copies of the last record.
    last_pc[1] = last_pc[0];
  }
  // The following assert could fail if sizeof(PcDesc) is not
  // an integral multiple of oopSize (the rounding term).
  // If it fails, change the logic to always allocate a multiple
  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
}

void nmethod::copy_scopes_data(u_char* buffer, int size) {
  assert(scopes_data_size() >= size, "oob");
  memcpy(scopes_data_begin(), buffer, size);
}


#ifdef ASSERT
static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
  PcDesc* lower = nm->scopes_pcs_begin();
  PcDesc* upper = nm->scopes_pcs_end();
  lower += 1; // exclude initial sentinel
  PcDesc* res = NULL;
  for (PcDesc* p = lower; p < upper; p++) {
    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
    if (match_desc(p, pc_offset, approximate)) {
      if (res == NULL)
        res = p;
      else
        res = (PcDesc*) badAddress;
    }
  }
  return res;
}
#endif


// Finds a PcDesc with real-pc equal to "pc"
PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
  address base_address = code_begin();
  if ((pc < base_address) ||
      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
    return NULL;  // PC is wildly out of range
  }
  int pc_offset = (int) (pc - base_address);

  // Check the PcDesc cache if it contains the desired PcDesc
  // (This as an almost 100% hit rate.)
  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
  if (res != NULL) {
    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
    return res;
  }

  // Fallback algorithm: quasi-linear search for the PcDesc
  // Find the last pc_offset less than the given offset.
  // The successor must be the required match, if there is a match at all.
  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
  PcDesc* lower = scopes_pcs_begin();
  PcDesc* upper = scopes_pcs_end();
  upper -= 1; // exclude final sentinel
  if (lower >= upper)  return NULL;  // native method; no PcDescs at all

#define assert_LU_OK \
  /* invariant on lower..upper during the following search: */ \
  assert(lower->pc_offset() <  pc_offset, "sanity"); \
  assert(upper->pc_offset() >= pc_offset, "sanity")
  assert_LU_OK;

  // Use the last successful return as a split point.
  PcDesc* mid = _pc_desc_cache.last_pc_desc();
  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
  if (mid->pc_offset() < pc_offset) {
    lower = mid;
  } else {
    upper = mid;
  }

  // Take giant steps at first (4096, then 256, then 16, then 1)
  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
  const int RADIX = (1 << LOG2_RADIX);
  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
    while ((mid = lower + step) < upper) {
      assert_LU_OK;
      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
      if (mid->pc_offset() < pc_offset) {
        lower = mid;
      } else {
        upper = mid;
        break;
      }
    }
    assert_LU_OK;
  }

  // Sneak up on the value with a linear search of length ~16.
  while (true) {
    assert_LU_OK;
    mid = lower + 1;
    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
    if (mid->pc_offset() < pc_offset) {
      lower = mid;
    } else {
      upper = mid;
      break;
    }
  }
#undef assert_LU_OK

  if (match_desc(upper, pc_offset, approximate)) {
    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
    _pc_desc_cache.add_pc_desc(upper);
    return upper;
  } else {
    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
    return NULL;
  }
}


bool nmethod::check_all_dependencies() {
  bool found_check = false;
  // wholesale check of all dependencies
  for (Dependencies::DepStream deps(this); deps.next(); ) {
    if (deps.check_dependency() != NULL) {
      found_check = true;
      NOT_DEBUG(break);
    }
  }
  return found_check;  // tell caller if we found anything
}

bool nmethod::check_dependency_on(DepChange& changes) {
  // What has happened:
  // 1) a new class dependee has been added
  // 2) dependee and all its super classes have been marked
  bool found_check = false;  // set true if we are upset
  for (Dependencies::DepStream deps(this); deps.next(); ) {
    // Evaluate only relevant dependencies.
    if (deps.spot_check_dependency_at(changes) != NULL) {
      found_check = true;
      NOT_DEBUG(break);
    }
  }
  return found_check;
}

bool nmethod::is_evol_dependent_on(Klass* dependee) {
  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
  Array<Method*>* dependee_methods = dependee_ik->methods();
  for (Dependencies::DepStream deps(this); deps.next(); ) {
    if (deps.type() == Dependencies::evol_method) {
      Method* method = deps.method_argument(0);
      for (int j = 0; j < dependee_methods->length(); j++) {
        if (dependee_methods->at(j) == method) {
          // RC_TRACE macro has an embedded ResourceMark
          RC_TRACE(0x01000000,
            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
            _method->method_holder()->external_name(),
            _method->name()->as_C_string(),
            _method->signature()->as_C_string(), compile_id(),
            method->method_holder()->external_name(),
            method->name()->as_C_string(),
            method->signature()->as_C_string()));
          if (TraceDependencies || LogCompilation)
            deps.log_dependency(dependee);
          return true;
        }
      }
    }
  }
  return false;
}

// Called from mark_for_deoptimization, when dependee is invalidated.
bool nmethod::is_dependent_on_method(Method* dependee) {
  for (Dependencies::DepStream deps(this); deps.next(); ) {
    if (deps.type() != Dependencies::evol_method)
      continue;
    Method* method = deps.method_argument(0);
    if (method == dependee) return true;
  }
  return false;
}


bool nmethod::is_patchable_at(address instr_addr) {
  assert(insts_contains(instr_addr), "wrong nmethod used");
  if (is_zombie()) {
    // a zombie may never be patched
    return false;
  }
  return true;
}


address nmethod::continuation_for_implicit_exception(address pc) {
  // Exception happened outside inline-cache check code => we are inside
  // an active nmethod => use cpc to determine a return address
  int exception_offset = pc - code_begin();
  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
#ifdef ASSERT
  if (cont_offset == 0) {
    Thread* thread = ThreadLocalStorage::get_thread_slow();
    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
    HandleMark hm(thread);
    ResourceMark rm(thread);
    CodeBlob* cb = CodeCache::find_blob(pc);
    assert(cb != NULL && cb == this, "");
    ttyLocker ttyl;
    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
    print();
    method()->print_codes();
    print_code();
    print_pcs();
  }
#endif
  if (cont_offset == 0) {
    // Let the normal error handling report the exception
    return NULL;
  }
  return code_begin() + cont_offset;
}



void nmethod_init() {
  // make sure you didn't forget to adjust the filler fields
  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
}


//-------------------------------------------------------------------------------------------


// QQQ might we make this work from a frame??
nmethodLocker::nmethodLocker(address pc) {
  CodeBlob* cb = CodeCache::find_blob(pc);
  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
  _nm = (nmethod*)cb;
  lock_nmethod(_nm);
}

// Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
  if (nm == NULL)  return;
  Atomic::inc(&nm->_lock_count);
  guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
}

void nmethodLocker::unlock_nmethod(nmethod* nm) {
  if (nm == NULL)  return;
  Atomic::dec(&nm->_lock_count);
  guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
}


// -----------------------------------------------------------------------------
// nmethod::get_deopt_original_pc
//
// Return the original PC for the given PC if:
// (a) the given PC belongs to a nmethod and
// (b) it is a deopt PC
address nmethod::get_deopt_original_pc(const frame* fr) {
  if (fr->cb() == NULL)  return NULL;

  nmethod* nm = fr->cb()->as_nmethod_or_null();
  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
    return nm->get_original_pc(fr);

  return NULL;
}


// -----------------------------------------------------------------------------
// MethodHandle

bool nmethod::is_method_handle_return(address return_pc) {
  if (!has_method_handle_invokes())  return false;
  PcDesc* pd = pc_desc_at(return_pc);
  if (pd == NULL)
    return false;
  return pd->is_method_handle_invoke();
}


// -----------------------------------------------------------------------------
// Verification

class VerifyOopsClosure: public OopClosure {
  nmethod* _nm;
  bool     _ok;
public:
  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
  bool ok() { return _ok; }
  virtual void do_oop(oop* p) {
    if ((*p) == NULL || (*p)->is_oop())  return;
    if (_ok) {
      _nm->print_nmethod(true);
      _ok = false;
    }
    tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
  }
  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};

void nmethod::verify() {

  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  // seems odd.

  if (is_zombie() || is_not_entrant() || is_unloaded())
    return;

  // Make sure all the entry points are correctly aligned for patching.
  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());

  // assert(method()->is_oop(), "must be valid");

  ResourceMark rm;

  if (!CodeCache::contains(this)) {
    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
  }

  if(is_native_method() )
    return;

  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
  if (nm != this) {
    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
                  this));
  }

  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
    if (! p->verify(this)) {
      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
    }
  }

  VerifyOopsClosure voc(this);
  oops_do(&voc);
  assert(voc.ok(), "embedded oops must be OK");
  verify_scavenge_root_oops();

  verify_scopes();
}


void nmethod::verify_interrupt_point(address call_site) {
  // Verify IC only when nmethod installation is finished.
  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
  if (is_installed) {
    Thread *cur = Thread::current();
    if (CompiledIC_lock->owner() == cur ||
        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
         SafepointSynchronize::is_at_safepoint())) {
      CompiledIC_at(this, call_site);
      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
    } else {
      MutexLocker ml_verify (CompiledIC_lock);
      CompiledIC_at(this, call_site);
    }
  }

  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
  assert(pd != NULL, "PcDesc must exist");
  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
                                     pd->obj_decode_offset(), pd->should_reexecute(),
                                     pd->return_oop());
       !sd->is_top(); sd = sd->sender()) {
    sd->verify();
  }
}

void nmethod::verify_scopes() {
  if( !method() ) return;       // Runtime stubs have no scope
  if (method()->is_native()) return; // Ignore stub methods.
  // iterate through all interrupt point
  // and verify the debug information is valid.
  RelocIterator iter((nmethod*)this);
  while (iter.next()) {
    address stub = NULL;
    switch (iter.type()) {
      case relocInfo::virtual_call_type:
        verify_interrupt_point(iter.addr());
        break;
      case relocInfo::opt_virtual_call_type:
        stub = iter.opt_virtual_call_reloc()->static_stub();
        verify_interrupt_point(iter.addr());
        break;
      case relocInfo::static_call_type:
        stub = iter.static_call_reloc()->static_stub();
        //verify_interrupt_point(iter.addr());
        break;
      case relocInfo::runtime_call_type:
        address destination = iter.reloc()->value();
        // Right now there is no way to find out which entries support
        // an interrupt point.  It would be nice if we had this
        // information in a table.
        break;
    }
    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
  }
}


// -----------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT

class DebugScavengeRoot: public OopClosure {
  nmethod* _nm;
  bool     _ok;
public:
  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
  bool ok() { return _ok; }
  virtual void do_oop(oop* p) {
    if ((*p) == NULL || !(*p)->is_scavengable())  return;
    if (_ok) {
      _nm->print_nmethod(true);
      _ok = false;
    }
    tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
    (*p)->print();
  }
  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};

void nmethod::verify_scavenge_root_oops() {
  if (UseG1GC) {
    return;
  }

  if (!on_scavenge_root_list()) {
    // Actually look inside, to verify the claim that it's clean.
    DebugScavengeRoot debug_scavenge_root(this);
    oops_do(&debug_scavenge_root);
    if (!debug_scavenge_root.ok())
      fatal("found an unadvertised bad scavengable oop in the code cache");
  }
  assert(scavenge_root_not_marked(), "");
}

#endif // PRODUCT

// Printing operations

void nmethod::print() const {
  ResourceMark rm;
  ttyLocker ttyl;   // keep the following output all in one block

  tty->print("Compiled method ");

  if (is_compiled_by_c1()) {
    tty->print("(c1) ");
  } else if (is_compiled_by_c2()) {
    tty->print("(c2) ");
  } else if (is_compiled_by_shark()) {
    tty->print("(shark) ");
  } else {
    tty->print("(nm) ");
  }

  print_on(tty, NULL);

  if (WizardMode) {
    tty->print("((nmethod*) " INTPTR_FORMAT ") ", this);
    tty->print(" for method " INTPTR_FORMAT , (address)method());
    tty->print(" { ");
    if (is_in_use())      tty->print("in_use ");
    if (is_not_entrant()) tty->print("not_entrant ");
    if (is_zombie())      tty->print("zombie ");
    if (is_unloaded())    tty->print("unloaded ");
    if (on_scavenge_root_list())  tty->print("scavenge_root ");
    tty->print_cr("}:");
  }
  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              (address)this,
                                              (address)this + size(),
                                              size());
  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              relocation_begin(),
                                              relocation_end(),
                                              relocation_size());
  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              consts_begin(),
                                              consts_end(),
                                              consts_size());
  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              insts_begin(),
                                              insts_end(),
                                              insts_size());
  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              stub_begin(),
                                              stub_end(),
                                              stub_size());
  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              oops_begin(),
                                              oops_end(),
                                              oops_size());
  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              metadata_begin(),
                                              metadata_end(),
                                              metadata_size());
  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              scopes_data_begin(),
                                              scopes_data_end(),
                                              scopes_data_size());
  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              scopes_pcs_begin(),
                                              scopes_pcs_end(),
                                              scopes_pcs_size());
  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              dependencies_begin(),
                                              dependencies_end(),
                                              dependencies_size());
  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              handler_table_begin(),
                                              handler_table_end(),
                                              handler_table_size());
  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
                                              nul_chk_table_begin(),
                                              nul_chk_table_end(),
                                              nul_chk_table_size());
}

#ifndef PRODUCT

void nmethod::print_scopes() {
  // Find the first pc desc for all scopes in the code and print it.
  ResourceMark rm;
  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
      continue;

    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
    sd->print_on(tty, p);
  }
}

void nmethod::print_dependencies() {
  ResourceMark rm;
  ttyLocker ttyl;   // keep the following output all in one block
  tty->print_cr("Dependencies:");
  for (Dependencies::DepStream deps(this); deps.next(); ) {
    deps.print_dependency();
    Klass* ctxk = deps.context_type();
    if (ctxk != NULL) {
      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
      }
    }
    deps.log_dependency();  // put it into the xml log also
  }
}


void nmethod::print_relocations() {
  ResourceMark m;       // in case methods get printed via the debugger
  tty->print_cr("relocations:");
  RelocIterator iter(this);
  iter.print();
  if (UseRelocIndex) {
    jint* index_end   = (jint*)relocation_end() - 1;
    jint  index_size  = *index_end;
    jint* index_start = (jint*)( (address)index_end - index_size );
    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
    if (index_size > 0) {
      jint* ip;
      for (ip = index_start; ip+2 <= index_end; ip += 2)
        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
                      ip[0],
                      ip[1],
                      header_end()+ip[0],
                      relocation_begin()-1+ip[1]);
      for (; ip < index_end; ip++)
        tty->print_cr("  (%d ?)", ip[0]);
      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
      ip++;
      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
    }
  }
}


void nmethod::print_pcs() {
  ResourceMark m;       // in case methods get printed via debugger
  tty->print_cr("pc-bytecode offsets:");
  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
    p->print(this);
  }
}

#endif // PRODUCT

const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
  RelocIterator iter(this, begin, end);
  bool have_one = false;
  while (iter.next()) {
    have_one = true;
    switch (iter.type()) {
        case relocInfo::none:                  return "no_reloc";
        case relocInfo::oop_type: {
          stringStream st;
          oop_Relocation* r = iter.oop_reloc();
          oop obj = r->oop_value();
          st.print("oop(");
          if (obj == NULL) st.print("NULL");
          else obj->print_value_on(&st);
          st.print(")");
          return st.as_string();
        }
        case relocInfo::metadata_type: {
          stringStream st;
          metadata_Relocation* r = iter.metadata_reloc();
          Metadata* obj = r->metadata_value();
          st.print("metadata(");
          if (obj == NULL) st.print("NULL");
          else obj->print_value_on(&st);
          st.print(")");
          return st.as_string();
        }
        case relocInfo::virtual_call_type:     return "virtual_call";
        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
        case relocInfo::static_call_type:      return "static_call";
        case relocInfo::static_stub_type:      return "static_stub";
        case relocInfo::runtime_call_type:     return "runtime_call";
        case relocInfo::external_word_type:    return "external_word";
        case relocInfo::internal_word_type:    return "internal_word";
        case relocInfo::section_word_type:     return "section_word";
        case relocInfo::poll_type:             return "poll";
        case relocInfo::poll_return_type:      return "poll_return";
        case relocInfo::type_mask:             return "type_bit_mask";
    }
  }
  return have_one ? "other" : NULL;
}

// Return a the last scope in (begin..end]
ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
  PcDesc* p = pc_desc_near(begin+1);
  if (p != NULL && p->real_pc(this) <= end) {
    return new ScopeDesc(this, p->scope_decode_offset(),
                         p->obj_decode_offset(), p->should_reexecute(),
                         p->return_oop());
  }
  return NULL;
}

void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");

  if (has_method_handle_invokes())
    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");

  if (block_begin == consts_begin())            stream->print_cr("[Constants]");

  if (block_begin == entry_point()) {
    methodHandle m = method();
    if (m.not_null()) {
      stream->print("  # ");
      m->print_value_on(stream);
      stream->cr();
    }
    if (m.not_null() && !is_osr_method()) {
      ResourceMark rm;
      int sizeargs = m->size_of_parameters();
      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
      {
        int sig_index = 0;
        if (!m->is_static())
          sig_bt[sig_index++] = T_OBJECT; // 'this'
        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
          BasicType t = ss.type();
          sig_bt[sig_index++] = t;
          if (type2size[t] == 2) {
            sig_bt[sig_index++] = T_VOID;
          } else {
            assert(type2size[t] == 1, "size is 1 or 2");
          }
        }
        assert(sig_index == sizeargs, "");
      }
      const char* spname = "sp"; // make arch-specific?
      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
      int stack_slot_offset = this->frame_size() * wordSize;
      int tab1 = 14, tab2 = 24;
      int sig_index = 0;
      int arg_index = (m->is_static() ? 0 : -1);
      bool did_old_sp = false;
      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
        bool at_this = (arg_index == -1);
        bool at_old_sp = false;
        BasicType t = (at_this ? T_OBJECT : ss.type());
        assert(t == sig_bt[sig_index], "sigs in sync");
        if (at_this)
          stream->print("  # this: ");
        else
          stream->print("  # parm%d: ", arg_index);
        stream->move_to(tab1);
        VMReg fst = regs[sig_index].first();
        VMReg snd = regs[sig_index].second();
        if (fst->is_reg()) {
          stream->print("%s", fst->name());
          if (snd->is_valid())  {
            stream->print(":%s", snd->name());
          }
        } else if (fst->is_stack()) {
          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
          if (offset == stack_slot_offset)  at_old_sp = true;
          stream->print("[%s+0x%x]", spname, offset);
        } else {
          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
        }
        stream->print(" ");
        stream->move_to(tab2);
        stream->print("= ");
        if (at_this) {
          m->method_holder()->print_value_on(stream);
        } else {
          bool did_name = false;
          if (!at_this && ss.is_object()) {
            Symbol* name = ss.as_symbol_or_null();
            if (name != NULL) {
              name->print_value_on(stream);
              did_name = true;
            }
          }
          if (!did_name)
            stream->print("%s", type2name(t));
        }
        if (at_old_sp) {
          stream->print("  (%s of caller)", spname);
          did_old_sp = true;
        }
        stream->cr();
        sig_index += type2size[t];
        arg_index += 1;
        if (!at_this)  ss.next();
      }
      if (!did_old_sp) {
        stream->print("  # ");
        stream->move_to(tab1);
        stream->print("[%s+0x%x]", spname, stack_slot_offset);
        stream->print("  (%s of caller)", spname);
        stream->cr();
      }
    }
  }
}

void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
  // First, find an oopmap in (begin, end].
  // We use the odd half-closed interval so that oop maps and scope descs
  // which are tied to the byte after a call are printed with the call itself.
  address base = code_begin();
  OopMapSet* oms = oop_maps();
  if (oms != NULL) {
    for (int i = 0, imax = oms->size(); i < imax; i++) {
      OopMap* om = oms->at(i);
      address pc = base + om->offset();
      if (pc > begin) {
        if (pc <= end) {
          st->move_to(column);
          st->print("; ");
          om->print_on(st);
        }
        break;
      }
    }
  }

  // Print any debug info present at this pc.
  ScopeDesc* sd  = scope_desc_in(begin, end);
  if (sd != NULL) {
    st->move_to(column);
    if (sd->bci() == SynchronizationEntryBCI) {
      st->print(";*synchronization entry");
    } else {
      if (sd->method() == NULL) {
        st->print("method is NULL");
      } else if (sd->method()->is_native()) {
        st->print("method is native");
      } else {
        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
        st->print(";*%s", Bytecodes::name(bc));
        switch (bc) {
        case Bytecodes::_invokevirtual:
        case Bytecodes::_invokespecial:
        case Bytecodes::_invokestatic:
        case Bytecodes::_invokeinterface:
          {
            Bytecode_invoke invoke(sd->method(), sd->bci());
            st->print(" ");
            if (invoke.name() != NULL)
              invoke.name()->print_symbol_on(st);
            else
              st->print("<UNKNOWN>");
            break;
          }
        case Bytecodes::_getfield:
        case Bytecodes::_putfield:
        case Bytecodes::_getstatic:
        case Bytecodes::_putstatic:
          {
            Bytecode_field field(sd->method(), sd->bci());
            st->print(" ");
            if (field.name() != NULL)
              field.name()->print_symbol_on(st);
            else
              st->print("<UNKNOWN>");
          }
        }
      }
    }

    // Print all scopes
    for (;sd != NULL; sd = sd->sender()) {
      st->move_to(column);
      st->print("; -");
      if (sd->method() == NULL) {
        st->print("method is NULL");
      } else {
        sd->method()->print_short_name(st);
      }
      int lineno = sd->method()->line_number_from_bci(sd->bci());
      if (lineno != -1) {
        st->print("@%d (line %d)", sd->bci(), lineno);
      } else {
        st->print("@%d", sd->bci());
      }
      st->cr();
    }
  }

  // Print relocation information
  const char* str = reloc_string_for(begin, end);
  if (str != NULL) {
    if (sd != NULL) st->cr();
    st->move_to(column);
    st->print(";   {%s}", str);
  }
  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
  if (cont_offset != 0) {
    st->move_to(column);
    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
  }

}

#ifndef PRODUCT

void nmethod::print_value_on(outputStream* st) const {
  st->print("nmethod");
  print_on(st, NULL);
}

void nmethod::print_calls(outputStream* st) {
  RelocIterator iter(this);
  while (iter.next()) {
    switch (iter.type()) {
    case relocInfo::virtual_call_type:
    case relocInfo::opt_virtual_call_type: {
      VerifyMutexLocker mc(CompiledIC_lock);
      CompiledIC_at(&iter)->print();
      break;
    }
    case relocInfo::static_call_type:
      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
      compiledStaticCall_at(iter.reloc())->print();
      break;
    }
  }
}

void nmethod::print_handler_table() {
  ExceptionHandlerTable(this).print();
}

void nmethod::print_nul_chk_table() {
  ImplicitExceptionTable(this).print(code_begin());
}

void nmethod::print_statistics() {
  ttyLocker ttyl;
  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
  nmethod_stats.print_native_nmethod_stats();
  nmethod_stats.print_nmethod_stats();
  DebugInformationRecorder::print_statistics();
  nmethod_stats.print_pc_stats();
  Dependencies::print_statistics();
  if (xtty != NULL)  xtty->tail("statistics");
}

#endif // PRODUCT
C:\hotspot-69087d08d473\src\share\vm/code/nmethod.hpp
/*
 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_NMETHOD_HPP
#define SHARE_VM_CODE_NMETHOD_HPP

#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"

// This class is used internally by nmethods, to cache
// exception/pc/handler information.

class ExceptionCache : public CHeapObj<mtCode> {
  friend class VMStructs;
 private:
  enum { cache_size = 16 };
  Klass*   _exception_type;
  address  _pc[cache_size];
  address  _handler[cache_size];
  volatile int _count;
  ExceptionCache* _next;

  address pc_at(int index)                     { assert(index >= 0 && index < count(),""); return _pc[index]; }
  void    set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  address handler_at(int index)                { assert(index >= 0 && index < count(),""); return _handler[index]; }
  void    set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  int     count()                              { return OrderAccess::load_acquire(&_count); }
  // increment_count is only called under lock, but there may be concurrent readers.
  void    increment_count()                    { OrderAccess::release_store(&_count, _count + 1); }

 public:

  ExceptionCache(Handle exception, address pc, address handler);

  Klass*    exception_type()                { return _exception_type; }
  ExceptionCache* next()                    { return _next; }
  void      set_next(ExceptionCache *ec)    { _next = ec; }

  address match(Handle exception, address pc);
  bool    match_exception_with_space(Handle exception) ;
  address test_address(address addr);
  bool    add_address_and_handler(address addr, address handler) ;
};


// cache pc descs found in earlier inquiries
class PcDescCache VALUE_OBJ_CLASS_SPEC {
  friend class VMStructs;
 private:
  enum { cache_size = 4 };
  // The array elements MUST be volatile! Several threads may modify
  // and read from the cache concurrently. find_pc_desc_internal has
  // returned wrong results. C++ compiler (namely xlC12) may duplicate
  // C++ field accesses if the elements are not volatile.
  typedef PcDesc* PcDescPtr;
  volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 public:
  PcDescCache() { debug_only(_pc_descs[0] = NULL); }
  void    reset_to(PcDesc* initial_pc_desc);
  PcDesc* find_pc_desc(int pc_offset, bool approximate);
  void    add_pc_desc(PcDesc* pc_desc);
  PcDesc* last_pc_desc() { return _pc_descs[0]; }
};


// nmethods (native methods) are the compiled code versions of Java methods.
//
// An nmethod contains:
//  - header                 (the nmethod structure)
//  [Relocation]
//  - relocation information
//  - constant part          (doubles, longs and floats used in nmethod)
//  - oop table
//  [Code]
//  - code body
//  - exception handler
//  - stub code
//  [Debugging information]
//  - oop array
//  - data array
//  - pcs
//  [Exception handler table]
//  - handler entry point array
//  [Implicit Null Pointer exception table]
//  - implicit null table array

class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;

class nmethod : public CodeBlob {
  friend class VMStructs;
  friend class NMethodSweeper;
  friend class CodeCache;  // scavengable oops
 private:

  // GC support to help figure out if an nmethod has been
  // cleaned/unloaded by the current GC.
  static unsigned char _global_unloading_clock;

  // Shared fields for all nmethod's
  Method*   _method;
  int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
  jmethodID _jmethod_id;       // Cache of method()->jmethod_id()

  // To support simple linked-list chaining of nmethods:
  nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head

  union {
    // Used by G1 to chain nmethods.
    nmethod* _unloading_next;
    // Used by non-G1 GCs to chain nmethods.
    nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
  };

  static nmethod* volatile _oops_do_mark_nmethods;
  nmethod*        volatile _oops_do_mark_link;

  AbstractCompiler* _compiler; // The compiler which compiled this nmethod

  // offsets for entry points
  address _entry_point;                      // entry point with class check
  address _verified_entry_point;             // entry point without class check
  address _osr_entry_point;                  // entry point for on stack replacement

  // Offsets for different nmethod parts
  int _exception_offset;
  // All deoptee's will resume execution at this location described by
  // this offset.
  int _deoptimize_offset;
  // All deoptee's at a MethodHandle call site will resume execution
  // at this location described by this offset.
  int _deoptimize_mh_offset;
  // Offset of the unwind handler if it exists
  int _unwind_handler_offset;

#ifdef HAVE_DTRACE_H
  int _trap_offset;
#endif // def HAVE_DTRACE_H
  int _consts_offset;
  int _stub_offset;
  int _oops_offset;                       // offset to where embedded oop table begins (inside data)
  int _metadata_offset;                   // embedded meta data table
  int _scopes_data_offset;
  int _scopes_pcs_offset;
  int _dependencies_offset;
  int _handler_table_offset;
  int _nul_chk_table_offset;
  int _nmethod_end_offset;

  // location in frame (offset for sp) that deopt can store the original
  // pc during a deopt.
  int _orig_pc_offset;

  int _compile_id;                           // which compilation made this nmethod
  int _comp_level;                           // compilation level

  // protected by CodeCache_lock
  bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)

  bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
  bool _marked_for_deoptimization;           // Used for stack deoptimization

  // used by jvmti to track if an unload event has been posted for this nmethod.
  bool _unload_reported;

  // set during construction
  unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
  unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
  unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
  unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints

  // Protected by Patching_lock
  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}

  volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod

#ifdef ASSERT
  bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
#endif

  enum { in_use       = 0,   // executable nmethod
         not_entrant  = 1,   // marked for deoptimization but activations may still exist,
                             // will be transformed to zombie when all activations are gone
         zombie       = 2,   // no activations exist, nmethod is ready for purge
         unloaded     = 3 }; // there should be no activations, should not be called,
                             // will be transformed to zombie immediately

  jbyte _scavenge_root_state;

#if INCLUDE_RTM_OPT
  // RTM state at compile time. Used during deoptimization to decide
  // whether to restart collecting RTM locking abort statistic again.
  RTMState _rtm_state;
#endif

  // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
  // and is not made into a zombie. However, once the nmethod is made into
  // a zombie, it will be locked one final time if CompiledMethodUnload
  // event processing needs to be done.
  jint  _lock_count;

  // not_entrant method removal. Each mark_sweep pass will update
  // this mark to current sweep invocation count if it is seen on the
  // stack.  An not_entrant method can be removed when there are no
  // more activations, i.e., when the _stack_traversal_mark is less than
  // current sweep traversal index.
  long _stack_traversal_mark;

  // The _hotness_counter indicates the hotness of a method. The higher
  // the value the hotter the method. The hotness counter of a nmethod is
  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
  // is active while stack scanning (mark_active_nmethods()). The hotness
  // counter is decreased (by 1) while sweeping.
  int _hotness_counter;

  ExceptionCache * volatile _exception_cache;
  PcDescCache     _pc_desc_cache;

  // These are used for compiled synchronized native methods to
  // locate the owner and stack slot for the BasicLock so that we can
  // properly revoke the bias of the owner if necessary. They are
  // needed because there is no debug information for compiled native
  // wrappers and the oop maps are insufficient to allow
  // frame::retrieve_receiver() to work. Currently they are expected
  // to be byte offsets from the Java stack pointer for maximum code
  // sharing between platforms. Note that currently biased locking
  // will never cause Class instances to be biased but this code
  // handles the static synchronized case as well.
  // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
  // for non-static native wrapper frames.
  ByteSize _native_receiver_sp_offset;
  ByteSize _native_basic_lock_sp_offset;

  friend class nmethodLocker;

  // For native wrappers
  nmethod(Method* method,
          int nmethod_size,
          int compile_id,
          CodeOffsets* offsets,
          CodeBuffer *code_buffer,
          int frame_size,
          ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
          ByteSize basic_lock_sp_offset,       /* synchronized natives only */
          OopMapSet* oop_maps);

#ifdef HAVE_DTRACE_H
  // For native wrappers
  nmethod(Method* method,
          int nmethod_size,
          CodeOffsets* offsets,
          CodeBuffer *code_buffer,
          int frame_size);
#endif // def HAVE_DTRACE_H

  // Creation support
  nmethod(Method* method,
          int nmethod_size,
          int compile_id,
          int entry_bci,
          CodeOffsets* offsets,
          int orig_pc_offset,
          DebugInformationRecorder *recorder,
          Dependencies* dependencies,
          CodeBuffer *code_buffer,
          int frame_size,
          OopMapSet* oop_maps,
          ExceptionHandlerTable* handler_table,
          ImplicitExceptionTable* nul_chk_table,
          AbstractCompiler* compiler,
          int comp_level);

  // helper methods
  void* operator new(size_t size, int nmethod_size) throw();

  const char* reloc_string_for(u_char* begin, u_char* end);
  // Returns true if this thread changed the state of the nmethod or
  // false if another thread performed the transition.
  bool make_not_entrant_or_zombie(unsigned int state);
  void inc_decompile_count();

  // Used to manipulate the exception cache
  void add_exception_cache_entry(ExceptionCache* new_entry);
  ExceptionCache* exception_cache_entry_for_exception(Handle exception);

  // Inform external interfaces that a compiled method has been unloaded
  void post_compiled_method_unload();

  // Initailize fields to their default values
  void init_defaults();

 public:
  // create nmethod with entry_bci
  static nmethod* new_nmethod(methodHandle method,
                              int compile_id,
                              int entry_bci,
                              CodeOffsets* offsets,
                              int orig_pc_offset,
                              DebugInformationRecorder* recorder,
                              Dependencies* dependencies,
                              CodeBuffer *code_buffer,
                              int frame_size,
                              OopMapSet* oop_maps,
                              ExceptionHandlerTable* handler_table,
                              ImplicitExceptionTable* nul_chk_table,
                              AbstractCompiler* compiler,
                              int comp_level);

  static nmethod* new_native_nmethod(methodHandle method,
                                     int compile_id,
                                     CodeBuffer *code_buffer,
                                     int vep_offset,
                                     int frame_complete,
                                     int frame_size,
                                     ByteSize receiver_sp_offset,
                                     ByteSize basic_lock_sp_offset,
                                     OopMapSet* oop_maps);

#ifdef HAVE_DTRACE_H
  // The method we generate for a dtrace probe has to look
  // like an nmethod as far as the rest of the system is concerned
  // which is somewhat unfortunate.
  static nmethod* new_dtrace_nmethod(methodHandle method,
                                     CodeBuffer *code_buffer,
                                     int vep_offset,
                                     int trap_offset,
                                     int frame_complete,
                                     int frame_size);

  int trap_offset() const      { return _trap_offset; }
  address trap_address() const { return insts_begin() + _trap_offset; }

#endif // def HAVE_DTRACE_H

  // accessors
  Method* method() const                          { return _method; }
  AbstractCompiler* compiler() const              { return _compiler; }

  // type info
  bool is_nmethod() const                         { return true; }
  bool is_java_method() const                     { return !method()->is_native(); }
  bool is_native_method() const                   { return method()->is_native(); }
  bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }

  bool is_compiled_by_c1() const;
  bool is_compiled_by_c2() const;
  bool is_compiled_by_shark() const;

  // boundaries for different parts
  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
  address consts_end            () const          { return           header_begin() +  code_offset()        ; }
  address insts_begin           () const          { return           header_begin() +  code_offset()        ; }
  address insts_end             () const          { return           header_begin() + _stub_offset          ; }
  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
  address stub_end              () const          { return           header_begin() + _oops_offset          ; }
  address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
  address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
  address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
  address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
  oop*    oops_begin            () const          { return (oop*)   (header_begin() + _oops_offset)         ; }
  oop*    oops_end              () const          { return (oop*)   (header_begin() + _metadata_offset)     ; }

  Metadata** metadata_begin   () const            { return (Metadata**)  (header_begin() + _metadata_offset)     ; }
  Metadata** metadata_end     () const            { return (Metadata**)  (header_begin() + _scopes_data_offset)  ; }

  address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
  address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
  PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
  PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
  address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
  address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
  address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
  address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
  address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
  address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }

  // Sizes
  int consts_size       () const                  { return            consts_end       () -            consts_begin       (); }
  int insts_size        () const                  { return            insts_end        () -            insts_begin        (); }
  int stub_size         () const                  { return            stub_end         () -            stub_begin         (); }
  int oops_size         () const                  { return (address)  oops_end         () - (address)  oops_begin         (); }
  int metadata_size     () const                  { return (address)  metadata_end     () - (address)  metadata_begin     (); }
  int scopes_data_size  () const                  { return            scopes_data_end  () -            scopes_data_begin  (); }
  int scopes_pcs_size   () const                  { return (intptr_t) scopes_pcs_end   () - (intptr_t) scopes_pcs_begin   (); }
  int dependencies_size () const                  { return            dependencies_end () -            dependencies_begin (); }
  int handler_table_size() const                  { return            handler_table_end() -            handler_table_begin(); }
  int nul_chk_table_size() const                  { return            nul_chk_table_end() -            nul_chk_table_begin(); }

  int total_size        () const;

  void dec_hotness_counter()        { _hotness_counter--; }
  void set_hotness_counter(int val) { _hotness_counter = val; }
  int  hotness_counter() const      { return _hotness_counter; }

  // Containment
  bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
  bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
  bool stub_contains         (address addr) const { return stub_begin         () <= addr && addr < stub_end         (); }
  bool oops_contains         (oop*    addr) const { return oops_begin         () <= addr && addr < oops_end         (); }
  bool metadata_contains     (Metadata** addr) const   { return metadata_begin     () <= addr && addr < metadata_end     (); }
  bool scopes_data_contains  (address addr) const { return scopes_data_begin  () <= addr && addr < scopes_data_end  (); }
  bool scopes_pcs_contains   (PcDesc* addr) const { return scopes_pcs_begin   () <= addr && addr < scopes_pcs_end   (); }
  bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
  bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }

  // entry points
  address entry_point() const                     { return _entry_point;             } // normal entry point
  address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct

  // flag accessing and manipulation
  bool  is_in_use() const                         { return _state == in_use; }
  bool  is_alive() const                          { unsigned char s = _state; return s == in_use || s == not_entrant; }
  bool  is_not_entrant() const                    { return _state == not_entrant; }
  bool  is_zombie() const                         { return _state == zombie; }
  bool  is_unloaded() const                       { return _state == unloaded;   }

#if INCLUDE_RTM_OPT
  // rtm state accessing and manipulating
  RTMState  rtm_state() const                     { return _rtm_state; }
  void set_rtm_state(RTMState state)              { _rtm_state = state; }
#endif

  // Make the nmethod non entrant. The nmethod will continue to be
  // alive.  It is used when an uncommon trap happens.  Returns true
  // if this thread changed the state of the nmethod or false if
  // another thread performed the transition.
  bool  make_not_entrant() {
    assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
    return make_not_entrant_or_zombie(not_entrant);
  }
  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }

  // used by jvmti to track if the unload event has been reported
  bool  unload_reported()                         { return _unload_reported; }
  void  set_unload_reported()                     { _unload_reported = true; }

  void set_unloading_next(nmethod* next)          { _unloading_next = next; }
  nmethod* unloading_next()                       { return _unloading_next; }

  static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
  static void increase_unloading_clock();

  void set_unloading_clock(unsigned char unloading_clock);
  unsigned char unloading_clock();

  bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
  void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }

  void  make_unloaded(BoolObjectClosure* is_alive, oop cause);

  bool has_dependencies()                         { return dependencies_size() != 0; }
  void flush_dependencies(BoolObjectClosure* is_alive);
  bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
  void set_has_flushed_dependencies()             {
    assert(!has_flushed_dependencies(), "should only happen once");
    _has_flushed_dependencies = 1;
  }

  bool  is_marked_for_reclamation() const         { return _marked_for_reclamation; }
  void  mark_for_reclamation()                    { _marked_for_reclamation = 1; }

  bool  has_unsafe_access() const                 { return _has_unsafe_access; }
  void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }

  bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
  void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }

  bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
  void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }

  bool  has_wide_vectors() const                  { return _has_wide_vectors; }
  void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }

  int   comp_level() const                        { return _comp_level; }

  // Support for oops in scopes and relocs:
  // Note: index 0 is reserved for null.
  oop   oop_at(int index) const                   { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
  oop*  oop_addr_at(int index) const {  // for GC
    // relocation indexes are biased by 1 (because 0 is reserved)
    assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
    assert(!_oops_are_stale, "oops are stale");
    return &oops_begin()[index - 1];
  }

  // Support for meta data in scopes and relocs:
  // Note: index 0 is reserved for null.
  Metadata*     metadata_at(int index) const      { return index == 0 ? NULL: *metadata_addr_at(index); }
  Metadata**  metadata_addr_at(int index) const {  // for GC
    // relocation indexes are biased by 1 (because 0 is reserved)
    assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
    return &metadata_begin()[index - 1];
  }

  void copy_values(GrowableArray<jobject>* oops);
  void copy_values(GrowableArray<Metadata*>* metadata);

  // Relocation support
private:
  void fix_oop_relocations(address begin, address end, bool initialize_immediates);
  inline void initialize_immediate_oop(oop* dest, jobject handle);

public:
  void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
  void fix_oop_relocations()                           { fix_oop_relocations(NULL, NULL, false); }
  void verify_oop_relocations();

  bool is_at_poll_return(address pc);
  bool is_at_poll_or_poll_return(address pc);

  // Scavengable oop support
  bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
 protected:
  enum { sl_on_list = 0x01, sl_marked = 0x10 };
  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
  void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
  // assertion-checking and pruning logic uses the bits of _scavenge_root_state
#ifndef PRODUCT
  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
  // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
#endif //PRODUCT
  nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
  void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }

 public:

  // Sweeper support
  long  stack_traversal_mark()                    { return _stack_traversal_mark; }
  void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }

  // Exception cache support
  // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
  ExceptionCache* exception_cache() const         { return _exception_cache; }
  void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
  void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
  address handler_for_exception_and_pc(Handle exception, address pc);
  void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
  void clean_exception_cache(BoolObjectClosure* is_alive);

  // implicit exceptions support
  address continuation_for_implicit_exception(address pc);

  // On-stack replacement support
  int   osr_entry_bci() const                     { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
  address  osr_entry() const                      { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
  void  invalidate_osr_method();
  nmethod* osr_link() const                       { return _osr_link; }
  void     set_osr_link(nmethod *n)               { _osr_link = n; }

  // tells whether frames described by this nmethod can be deoptimized
  // note: native wrappers cannot be deoptimized.
  bool can_be_deoptimized() const { return is_java_method(); }

  // Inline cache support
  void clear_inline_caches();
  void clear_ic_stubs();
  void cleanup_inline_caches();
  bool inlinecache_check_contains(address addr) const {
    return (addr >= code_begin() && addr < verified_entry_point());
  }

  // Verify calls to dead methods have been cleaned.
  void verify_clean_inline_caches();
  // Verify and count cached icholder relocations.
  int  verify_icholder_relocations();
  // Check that all metadata is still alive
  void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);

  // unlink and deallocate this nmethod
  // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
  // expected to use any other private methods/data in this class.

 protected:
  void flush();

 public:
  // When true is returned, it is unsafe to remove this nmethod even if
  // it is a zombie, since the VM or the ServiceThread might still be
  // using it.
  bool is_locked_by_vm() const                    { return _lock_count >0; }

  // See comment at definition of _last_seen_on_stack
  void mark_as_seen_on_stack();
  bool can_convert_to_zombie();

  // Evolution support. We make old (discarded) compiled methods point to new Method*s.
  void set_method(Method* method) { _method = method; }

  // GC support
  void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
  //  The parallel versions are used by G1.
  bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
  void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);

 private:
  //  Unload a nmethod if the *root object is dead.
  bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
  bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);

  void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
  void mark_metadata_on_stack_non_relocs();

 public:
  void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                     OopClosure* f);
  void oops_do(OopClosure* f) { oops_do(f, false); }
  void oops_do(OopClosure* f, bool allow_zombie);
  bool detect_scavenge_root_oops();
  void verify_scavenge_root_oops() PRODUCT_RETURN;

  bool test_set_oops_do_mark();
  static void oops_do_marking_prologue();
  static void oops_do_marking_epilogue();
  static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
  bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }

  // ScopeDesc for an instruction
  ScopeDesc* scope_desc_at(address pc);

 private:
  ScopeDesc* scope_desc_in(address begin, address end);

  address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }

  PcDesc* find_pc_desc_internal(address pc, bool approximate);

  PcDesc* find_pc_desc(address pc, bool approximate) {
    PcDesc* desc = _pc_desc_cache.last_pc_desc();
    if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
      return desc;
    }
    return find_pc_desc_internal(pc, approximate);
  }

 public:
  // ScopeDesc retrieval operation
  PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
  // pc_desc_near returns the first PcDesc at or after the givne pc.
  PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }

 public:
  // copying of debugging information
  void copy_scopes_pcs(PcDesc* pcs, int count);
  void copy_scopes_data(address buffer, int size);

  // Deopt
  // Return true is the PC is one would expect if the frame is being deopted.
  bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
  bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
  bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
  // Accessor/mutator for the original pc of a frame before a frame was deopted.
  address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
  void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }

  static address get_deopt_original_pc(const frame* fr);

  // MethodHandle
  bool is_method_handle_return(address return_pc);

  // jvmti support:
  void post_compiled_method_load_event();
  jmethodID get_and_cache_jmethod_id();

  // verify operations
  void verify();
  void verify_scopes();
  void verify_interrupt_point(address interrupt_point);

  // printing support
  void print()                          const;
  void print_relocations()                        PRODUCT_RETURN;
  void print_pcs()                                PRODUCT_RETURN;
  void print_scopes()                             PRODUCT_RETURN;
  void print_dependencies()                       PRODUCT_RETURN;
  void print_value_on(outputStream* st) const     PRODUCT_RETURN;
  void print_calls(outputStream* st)              PRODUCT_RETURN;
  void print_handler_table()                      PRODUCT_RETURN;
  void print_nul_chk_table()                      PRODUCT_RETURN;
  void print_nmethod(bool print_code);

  // need to re-define this from CodeBlob else the overload hides it
  virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
  void print_on(outputStream* st, const char* msg) const;

  // Logging
  void log_identity(xmlStream* log) const;
  void log_new_nmethod() const;
  void log_state_change() const;

  // Prints block-level comments, including nmethod specific block labels:
  virtual void print_block_comment(outputStream* stream, address block_begin) const {
    print_nmethod_labels(stream, block_begin);
    CodeBlob::print_block_comment(stream, block_begin);
  }
  void print_nmethod_labels(outputStream* stream, address block_begin) const;

  // Prints a comment for one native instruction (reloc info, pc desc)
  void print_code_comment_on(outputStream* st, int column, address begin, address end);
  static void print_statistics()                  PRODUCT_RETURN;

  // Compiler task identification.  Note that all OSR methods
  // are numbered in an independent sequence if CICountOSR is true,
  // and native method wrappers are also numbered independently if
  // CICountNative is true.
  int  compile_id() const                         { return _compile_id; }
  const char* compile_kind() const;

  // For debugging
  // CompiledIC*    IC_at(char* p) const;
  // PrimitiveIC*   primitiveIC_at(char* p) const;
  oop embeddedOop_at(address p);

  // tells if any of this method's dependencies have been invalidated
  // (this is expensive!)
  bool check_all_dependencies();

  // tells if this compiled method is dependent on the given changes,
  // and the changes have invalidated it
  bool check_dependency_on(DepChange& changes);

  // Evolution support. Tells if this compiled method is dependent on any of
  // methods m() of class dependee, such that if m() in dependee is replaced,
  // this compiled method will have to be deoptimized.
  bool is_evol_dependent_on(Klass* dependee);

  // Fast breakpoint support. Tells if this compiled method is
  // dependent on the given method. Returns true if this nmethod
  // corresponds to the given method as well.
  bool is_dependent_on_method(Method* dependee);

  // is it ok to patch at address?
  bool is_patchable_at(address instr_address);

  // UseBiasedLocking support
  ByteSize native_receiver_sp_offset() {
    return _native_receiver_sp_offset;
  }
  ByteSize native_basic_lock_sp_offset() {
    return _native_basic_lock_sp_offset;
  }

  // support for code generation
  static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
  static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
  static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }

  // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
  // redefine classes doesn't purge it.
  static void mark_on_stack(nmethod* nm) {
    nm->metadata_do(Metadata::mark_on_stack);
  }
  void metadata_do(void f(Metadata*));
};

// Locks an nmethod so its code will not get removed and it will not
// be made into a zombie, even if it is a not_entrant method. After the
// nmethod becomes a zombie, if CompiledMethodUnload event processing
// needs to be done, then lock_nmethod() is used directly to keep the
// generated code from being reused too early.
class nmethodLocker : public StackObj {
  nmethod* _nm;

 public:

  // note: nm can be NULL
  // Only JvmtiDeferredEvent::compiled_method_unload_event()
  // should pass zombie_ok == true.
  static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
  static void unlock_nmethod(nmethod* nm); // (ditto)

  nmethodLocker(address pc); // derive nm from pc
  nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
  nmethodLocker() { _nm = NULL; }
  ~nmethodLocker() { unlock_nmethod(_nm); }

  nmethod* code() { return _nm; }
  void set_code(nmethod* new_nm) {
    unlock_nmethod(_nm);   // note:  This works even if _nm==new_nm.
    _nm = new_nm;
    lock_nmethod(_nm);
  }
};

#endif // SHARE_VM_CODE_NMETHOD_HPP
C:\hotspot-69087d08d473\src\share\vm/code/oopRecorder.cpp
/*
 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciMetadata.hpp"
#include "code/oopRecorder.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"

#ifdef ASSERT
template <class T> int ValueRecorder<T>::_find_index_calls = 0;
template <class T> int ValueRecorder<T>::_hit_indexes      = 0;
template <class T> int ValueRecorder<T>::_missed_indexes   = 0;
#endif //ASSERT


template <class T> ValueRecorder<T>::ValueRecorder(Arena* arena) {
  _handles  = NULL;
  _indexes  = NULL;
  _arena    = arena;
  _complete = false;
}

template <class T> template <class X>  ValueRecorder<T>::IndexCache<X>::IndexCache() {
  assert(first_index > 0, "initial zero state of cache must be invalid index");
  Copy::zero_to_bytes(&_cache[0], sizeof(_cache));
}

template <class T> int ValueRecorder<T>::size() {
  _complete = true;
  if (_handles == NULL)  return 0;
  return _handles->length() * sizeof(T);
}

template <class T> void ValueRecorder<T>::copy_values_to(nmethod* nm) {
  assert(_complete, "must be frozen");
  maybe_initialize();  // get non-null handles, even if we have no oops
  nm->copy_values(_handles);
}

template <class T> void ValueRecorder<T>::maybe_initialize() {
  if (_handles == NULL) {
    if (_arena != NULL) {
      _handles  = new(_arena) GrowableArray<T>(_arena, 10, 0, 0);
      _no_finds = new(_arena) GrowableArray<int>(    _arena, 10, 0, 0);
    } else {
      _handles  = new GrowableArray<T>(10, 0, 0);
      _no_finds = new GrowableArray<int>(    10, 0, 0);
    }
  }
}


template <class T> T ValueRecorder<T>::at(int index) {
  // there is always a NULL virtually present as first object
  if (index == null_index)  return NULL;
  return _handles->at(index - first_index);
}


template <class T> int ValueRecorder<T>::add_handle(T h, bool make_findable) {
  assert(!_complete, "cannot allocate more elements after size query");
  maybe_initialize();
  // indexing uses 1 as an origin--0 means null
  int index = _handles->length() + first_index;
  _handles->append(h);

  // Support correct operation of find_index().
  assert(!(make_findable && !is_real(h)), "nulls are not findable");
  if (make_findable) {
    // This index may be returned from find_index().
    if (_indexes != NULL) {
      int* cloc = _indexes->cache_location(h);
      _indexes->set_cache_location_index(cloc, index);
    } else if (index == index_cache_threshold && _arena != NULL) {
      _indexes = new(_arena) IndexCache<T>();
      for (int i = 0; i < _handles->length(); i++) {
        // Load the cache with pre-existing elements.
        int index0 = i + first_index;
        if (_no_finds->contains(index0))  continue;
        int* cloc = _indexes->cache_location(_handles->at(i));
        _indexes->set_cache_location_index(cloc, index0);
      }
    }
  } else if (is_real(h)) {
    // Remember that this index is not to be returned from find_index().
    // This case is rare, because most or all uses of allocate_index pass
    // an argument of NULL or Universe::non_oop_word.
    // Thus, the expected length of _no_finds is zero.
    _no_finds->append(index);
  }

  return index;
}


template <class T> int ValueRecorder<T>::maybe_find_index(T h) {
  debug_only(_find_index_calls++);
  assert(!_complete, "cannot allocate more elements after size query");
  maybe_initialize();
  if (h == NULL)  return null_index;
  assert(is_real(h), "must be valid");
  int* cloc = (_indexes == NULL)? NULL: _indexes->cache_location(h);
  if (cloc != NULL) {
    int cindex = _indexes->cache_location_index(cloc);
    if (cindex == 0) {
      return -1;   // We know this handle is completely new.
    }
    if (cindex >= first_index && _handles->at(cindex - first_index) == h) {
      debug_only(_hit_indexes++);
      return cindex;
    }
    if (!_indexes->cache_location_collision(cloc)) {
      return -1;   // We know the current cache occupant is unique to that cloc.
    }
  }

  // Not found in cache, due to a cache collision.  (Or, no cache at all.)
  // Do a linear search, most recent to oldest.
  for (int i = _handles->length() - 1; i >= 0; i--) {
    if (_handles->at(i) == h) {
      int findex = i + first_index;
      if (_no_finds->contains(findex))  continue;  // oops; skip this one
      if (cloc != NULL) {
        _indexes->set_cache_location_index(cloc, findex);
      }
      debug_only(_missed_indexes++);
      return findex;
    }
  }
  return -1;
}

// Explicitly instantiate these types
template class ValueRecorder<Metadata*>;
template class ValueRecorder<jobject>;
C:\hotspot-69087d08d473\src\share\vm/code/oopRecorder.hpp
/*
 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_OOPRECORDER_HPP
#define SHARE_VM_CODE_OOPRECORDER_HPP

#include "memory/universe.hpp"
#include "runtime/handles.hpp"
#include "utilities/growableArray.hpp"

// Recording and retrieval of either oop relocations or metadata in compiled code.

class CodeBlob;

template <class T> class ValueRecorder : public StackObj {
 public:
  // A two-way mapping from positive indexes to oop handles.
  // The zero index is reserved for a constant (sharable) null.
  // Indexes may not be negative.

  // Use the given arena to manage storage, if not NULL.
  // By default, uses the current ResourceArea.
  ValueRecorder(Arena* arena = NULL);

  // Generate a new index on which nmethod::oop_addr_at will work.
  // allocate_index and find_index never return the same index,
  // and allocate_index never returns the same index twice.
  // In fact, two successive calls to allocate_index return successive ints.
  int allocate_index(T h) {
    return add_handle(h, false);
  }

  // For a given jobject or Metadata*, this will return the same index
  // repeatedly. The index can later be given to nmethod::oop_at or
  // metadata_at to retrieve the oop.
  // However, the oop must not be changed via nmethod::oop_addr_at.
  int find_index(T h) {
    int index = maybe_find_index(h);
    if (index < 0) {  // previously unallocated
      index = add_handle(h, true);
    }
    return index;
  }

  // returns the size of the generated oop/metadata table, for sizing the
  // CodeBlob. Must be called after all oops are allocated!
  int size();

  // Retrieve the value at a given index.
  T at(int index);

  int count() {
    if (_handles == NULL) return 0;
    // there is always a NULL virtually present as first object
    return _handles->length() + first_index;
  }

  // Helper function; returns false for NULL or Universe::non_oop_word().
  bool is_real(T h) {
    return h != NULL && h != (T)Universe::non_oop_word();
  }

  // copy the generated table to nmethod
  void copy_values_to(nmethod* nm);

  bool is_unused() { return _handles == NULL && !_complete; }
#ifdef ASSERT
  bool is_complete() { return _complete; }
#endif

 private:
  // variant of find_index which does not allocate if not found (yields -1)
  int maybe_find_index(T h);

  // leaky hash table of handle => index, to help detect duplicate insertion
  template <class X> class IndexCache : public ResourceObj {
    // This class is only used by the ValueRecorder class.
    friend class ValueRecorder;
    enum {
      _log_cache_size = 9,
      _cache_size = (1<<_log_cache_size),
      // Index entries are ints.  The LSBit is a collision indicator.
      _collision_bit_shift = 0,
      _collision_bit = 1,
      _index_shift = _collision_bit_shift+1
    };
    int _cache[_cache_size];
    static juint cache_index(X handle) {
      juint ci = (int) (intptr_t) handle;
      ci ^= ci >> (BitsPerByte*2);
      ci += ci >> (BitsPerByte*1);
      return ci & (_cache_size-1);
    }
    int* cache_location(X handle) {
      return &_cache[ cache_index(handle) ];
    }
    static bool cache_location_collision(int* cloc) {
      return ((*cloc) & _collision_bit) != 0;
    }
    static int cache_location_index(int* cloc) {
      return (*cloc) >> _index_shift;
    }
    static void set_cache_location_index(int* cloc, int index) {
      int cval0 = (*cloc);
      int cval1 = (index << _index_shift);
      if (cval0 != 0 && cval1 != cval0)  cval1 += _collision_bit;
      (*cloc) = cval1;
    }
    IndexCache();
  };

  void maybe_initialize();
  int add_handle(T h, bool make_findable);

  enum { null_index = 0, first_index = 1, index_cache_threshold = 20 };

  GrowableArray<T>*        _handles;  // ordered list (first is always NULL)
  GrowableArray<int>*       _no_finds; // all unfindable indexes; usually empty
  IndexCache<T>*           _indexes;  // map: handle -> its probable index
  Arena*                    _arena;
  bool                      _complete;

#ifdef ASSERT
  static int _find_index_calls, _hit_indexes, _missed_indexes;
#endif
};

class OopRecorder : public ResourceObj {
 private:
  ValueRecorder<jobject>      _oops;
  ValueRecorder<Metadata*>    _metadata;
 public:
  OopRecorder(Arena* arena = NULL): _oops(arena), _metadata(arena) {}

  int allocate_oop_index(jobject h) {
    return _oops.allocate_index(h);
  }
  int find_index(jobject h) {
    return _oops.find_index(h);
  }
  jobject oop_at(int index) {
    return _oops.at(index);
  }
  int oop_size() {
    return _oops.size();
  }
  int oop_count() {
    return _oops.count();
  }
  bool is_real(jobject h) {
    return _oops.is_real(h);
  }

  int allocate_metadata_index(Metadata* oop) {
    return _metadata.allocate_index(oop);
  }
  int find_index(Metadata* h) {
    return _metadata.find_index(h);
  }
  Metadata* metadata_at(int index) {
    return _metadata.at(index);
  }
  int metadata_size() {
    return _metadata.size();
  }
  int metadata_count() {
    return _metadata.count();
  }
  bool is_real(Metadata* h) {
    return _metadata.is_real(h);
  }

  bool is_unused() {
    return _oops.is_unused() && _metadata.is_unused();
  }

  void freeze() {
    _oops.size();
    _metadata.size();
  }

  void copy_values_to(nmethod* nm) {
    if (!_oops.is_unused()) {
      _oops.copy_values_to(nm);
    }
    if (!_metadata.is_unused()) {
      _metadata.copy_values_to(nm);
    }
  }

#ifdef ASSERT
  bool is_complete() {
    assert(_oops.is_complete() == _metadata.is_complete(), "must agree");
    return _oops.is_complete();
  }
#endif
};


#endif // SHARE_VM_CODE_OOPRECORDER_HPP
C:\hotspot-69087d08d473\src\share\vm/code/pcDesc.cpp
/*
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "code/debugInfoRec.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
#include "memory/resourceArea.hpp"

PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
  _pc_offset           = pc_offset;
  _scope_decode_offset = scope_decode_offset;
  _obj_decode_offset   = obj_decode_offset;
  _flags               = 0;
}

address PcDesc::real_pc(const nmethod* code) const {
  return code->code_begin() + pc_offset();
}

void PcDesc::print(nmethod* code) {
#ifndef PRODUCT
  ResourceMark rm;
  tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags);

  if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
    return;
  }

  for (ScopeDesc* sd = code->scope_desc_at(real_pc(code));
       sd != NULL;
       sd = sd->sender()) {
    tty->print("  ");
    sd->method()->print_short_name(tty);
    tty->print("  @%d", sd->bci());
    if (sd->should_reexecute())
      tty->print("  reexecute=true");
    tty->cr();
  }
#endif
}

bool PcDesc::verify(nmethod* code) {
  //Unimplemented();
  return true;
}
C:\hotspot-69087d08d473\src\share\vm/code/pcDesc.hpp
/*
 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_PCDESC_HPP
#define SHARE_VM_CODE_PCDESC_HPP

#include "memory/allocation.hpp"

// PcDescs map a physical PC (given as offset from start of nmethod) to
// the corresponding source scope and byte code index.

class nmethod;

class PcDesc VALUE_OBJ_CLASS_SPEC {
  friend class VMStructs;
 private:
  int _pc_offset;           // offset from start of nmethod
  int _scope_decode_offset; // offset for scope in nmethod
  int _obj_decode_offset;

  enum {
    PCDESC_reexecute               = 1 << 0,
    PCDESC_is_method_handle_invoke = 1 << 1,
    PCDESC_return_oop              = 1 << 2
  };

  int _flags;

  void set_flag(int mask, bool z) {
    _flags = z ? (_flags | mask) : (_flags & ~mask);
  }

 public:
  int pc_offset() const           { return _pc_offset;   }
  int scope_decode_offset() const { return _scope_decode_offset; }
  int obj_decode_offset() const   { return _obj_decode_offset; }

  void set_pc_offset(int x)           { _pc_offset           = x; }
  void set_scope_decode_offset(int x) { _scope_decode_offset = x; }
  void set_obj_decode_offset(int x)   { _obj_decode_offset   = x; }

  // Constructor (only used for static in nmethod.cpp)
  // Also used by ScopeDesc::sender()]
  PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset);

  enum {
    // upper and lower exclusive limits real offsets:
    lower_offset_limit = -1,
    upper_offset_limit = (unsigned int)-1 >> 1
  };

  // Flags
  bool     should_reexecute()              const { return (_flags & PCDESC_reexecute) != 0; }
  void set_should_reexecute(bool z)              { set_flag(PCDESC_reexecute, z); }

  // Does pd refer to the same information as pd?
  bool is_same_info(const PcDesc* pd) {
    return _scope_decode_offset == pd->_scope_decode_offset &&
      _obj_decode_offset == pd->_obj_decode_offset &&
      _flags == pd->_flags;
  }

  bool     is_method_handle_invoke()       const { return (_flags & PCDESC_is_method_handle_invoke) != 0;     }
  void set_is_method_handle_invoke(bool z)       { set_flag(PCDESC_is_method_handle_invoke, z); }

  bool     return_oop()                    const { return (_flags & PCDESC_return_oop) != 0;     }
  void set_return_oop(bool z)                    { set_flag(PCDESC_return_oop, z); }

  // Returns the real pc
  address real_pc(const nmethod* code) const;

  void print(nmethod* code);
  bool verify(nmethod* code);
};

#endif // SHARE_VM_CODE_PCDESC_HPP
C:\hotspot-69087d08d473\src\share\vm/code/relocInfo.cpp
/*
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/copy.hpp"

PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

const RelocationHolder RelocationHolder::none; // its type is relocInfo::none


// Implementation of relocInfo

#ifdef ASSERT
relocInfo::relocInfo(relocType t, int off, int f) {
  assert(t != data_prefix_tag, "cannot build a prefix this way");
  assert((t & type_mask) == t, "wrong type");
  assert((f & format_mask) == f, "wrong format");
  assert(off >= 0 && off < offset_limit(), "offset out off bounds");
  assert((off & (offset_unit-1)) == 0, "misaligned offset");
  (*this) = relocInfo(t, RAW_BITS, off, f);
}
#endif

void relocInfo::initialize(CodeSection* dest, Relocation* reloc) {
  relocInfo* data = this+1;  // here's where the data might go
  dest->set_locs_end(data);  // sync end: the next call may read dest.locs_end
  reloc->pack_data_to(dest); // maybe write data into locs, advancing locs_end
  relocInfo* data_limit = dest->locs_end();
  if (data_limit > data) {
    relocInfo suffix = (*this);
    data_limit = this->finish_prefix((short*) data_limit);
    // Finish up with the suffix.  (Hack note: pack_data_to might edit this.)
    *data_limit = suffix;
    dest->set_locs_end(data_limit+1);
  }
}

relocInfo* relocInfo::finish_prefix(short* prefix_limit) {
  assert(sizeof(relocInfo) == sizeof(short), "change this code");
  short* p = (short*)(this+1);
  assert(prefix_limit >= p, "must be a valid span of data");
  int plen = prefix_limit - p;
  if (plen == 0) {
    debug_only(_value = 0xFFFF);
    return this;                         // no data: remove self completely
  }
  if (plen == 1 && fits_into_immediate(p[0])) {
    (*this) = immediate_relocInfo(p[0]); // move data inside self
    return this+1;
  }
  // cannot compact, so just update the count and return the limit pointer
  (*this) = prefix_relocInfo(plen);   // write new datalen
  assert(data() + datalen() == prefix_limit, "pointers must line up");
  return (relocInfo*)prefix_limit;
}


void relocInfo::set_type(relocType t) {
  int old_offset = addr_offset();
  int old_format = format();
  (*this) = relocInfo(t, old_offset, old_format);
  assert(type()==(int)t, "sanity check");
  assert(addr_offset()==old_offset, "sanity check");
  assert(format()==old_format, "sanity check");
}


void relocInfo::set_format(int f) {
  int old_offset = addr_offset();
  assert((f & format_mask) == f, "wrong format");
  _value = (_value & ~(format_mask << offset_width)) | (f << offset_width);
  assert(addr_offset()==old_offset, "sanity check");
}


void relocInfo::change_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type, relocType new_type) {
  bool found = false;
  while (itr->next() && !found) {
    if (itr->addr() == pc) {
      assert(itr->type()==old_type, "wrong relocInfo type found");
      itr->current()->set_type(new_type);
      found=true;
    }
  }
  assert(found, "no relocInfo found for pc");
}


void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type) {
  change_reloc_info_for_address(itr, pc, old_type, none);
}


// ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator

void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
  initialize_misc();

  if (nm == NULL && begin != NULL) {
    // allow nmethod to be deduced from beginning address
    CodeBlob* cb = CodeCache::find_blob(begin);
    nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
  }
  guarantee(nm != NULL, "must be able to deduce nmethod from other arguments");

  _code    = nm;
  _current = nm->relocation_begin() - 1;
  _end     = nm->relocation_end();
  _addr    = nm->content_begin();

  // Initialize code sections.
  _section_start[CodeBuffer::SECT_CONSTS] = nm->consts_begin();
  _section_start[CodeBuffer::SECT_INSTS ] = nm->insts_begin() ;
  _section_start[CodeBuffer::SECT_STUBS ] = nm->stub_begin()  ;

  _section_end  [CodeBuffer::SECT_CONSTS] = nm->consts_end()  ;
  _section_end  [CodeBuffer::SECT_INSTS ] = nm->insts_end()   ;
  _section_end  [CodeBuffer::SECT_STUBS ] = nm->stub_end()    ;

  assert(!has_current(), "just checking");
  assert(begin == NULL || begin >= nm->code_begin(), "in bounds");
  assert(limit == NULL || limit <= nm->code_end(),   "in bounds");
  set_limits(begin, limit);
}


RelocIterator::RelocIterator(CodeSection* cs, address begin, address limit) {
  initialize_misc();

  _current = cs->locs_start()-1;
  _end     = cs->locs_end();
  _addr    = cs->start();
  _code    = NULL; // Not cb->blob();

  CodeBuffer* cb = cs->outer();
  assert((int) SECT_LIMIT == CodeBuffer::SECT_LIMIT, "my copy must be equal");
  for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
    CodeSection* cs = cb->code_section(n);
    _section_start[n] = cs->start();
    _section_end  [n] = cs->end();
  }

  assert(!has_current(), "just checking");

  assert(begin == NULL || begin >= cs->start(), "in bounds");
  assert(limit == NULL || limit <= cs->end(),   "in bounds");
  set_limits(begin, limit);
}


enum { indexCardSize = 128 };
struct RelocIndexEntry {
  jint addr_offset;          // offset from header_end of an addr()
  jint reloc_offset;         // offset from header_end of a relocInfo (prefix)
};


bool RelocIterator::addr_in_const() const {
  const int n = CodeBuffer::SECT_CONSTS;
  return section_start(n) <= addr() && addr() < section_end(n);
}


static inline int num_cards(int code_size) {
  return (code_size-1) / indexCardSize;
}


int RelocIterator::locs_and_index_size(int code_size, int locs_size) {
  if (!UseRelocIndex)  return locs_size;   // no index
  code_size = round_to(code_size, oopSize);
  locs_size = round_to(locs_size, oopSize);
  int index_size = num_cards(code_size) * sizeof(RelocIndexEntry);
  // format of indexed relocs:
  //   relocation_begin:   relocInfo ...
  //   index:              (addr,reloc#) ...
  //                       indexSize           :relocation_end
  return locs_size + index_size + BytesPerInt;
}


void RelocIterator::create_index(relocInfo* dest_begin, int dest_count, relocInfo* dest_end) {
  address relocation_begin = (address)dest_begin;
  address relocation_end   = (address)dest_end;
  int     total_size       = relocation_end - relocation_begin;
  int     locs_size        = dest_count * sizeof(relocInfo);
  if (!UseRelocIndex) {
    Copy::fill_to_bytes(relocation_begin + locs_size, total_size-locs_size, 0);
    return;
  }
  int     index_size       = total_size - locs_size - BytesPerInt;      // find out how much space is left
  int     ncards           = index_size / sizeof(RelocIndexEntry);
  assert(total_size == locs_size + index_size + BytesPerInt, "checkin'");
  assert(index_size >= 0 && index_size % sizeof(RelocIndexEntry) == 0, "checkin'");
  jint*   index_size_addr  = (jint*)relocation_end - 1;

  assert(sizeof(jint) == BytesPerInt, "change this code");

  *index_size_addr = index_size;
  if (index_size != 0) {
    assert(index_size > 0, "checkin'");

    RelocIndexEntry* index = (RelocIndexEntry *)(relocation_begin + locs_size);
    assert(index == (RelocIndexEntry*)index_size_addr - ncards, "checkin'");

    // walk over the relocations, and fill in index entries as we go
    RelocIterator iter;
    const address    initial_addr    = NULL;
    relocInfo* const initial_current = dest_begin - 1;  // biased by -1 like elsewhere

    iter._code    = NULL;
    iter._addr    = initial_addr;
    iter._limit   = (address)(intptr_t)(ncards * indexCardSize);
    iter._current = initial_current;
    iter._end     = dest_begin + dest_count;

    int i = 0;
    address next_card_addr = (address)indexCardSize;
    int addr_offset = 0;
    int reloc_offset = 0;
    while (true) {
      // Checkpoint the iterator before advancing it.
      addr_offset  = iter._addr    - initial_addr;
      reloc_offset = iter._current - initial_current;
      if (!iter.next())  break;
      while (iter.addr() >= next_card_addr) {
        index[i].addr_offset  = addr_offset;
        index[i].reloc_offset = reloc_offset;
        i++;
        next_card_addr += indexCardSize;
      }
    }
    while (i < ncards) {
      index[i].addr_offset  = addr_offset;
      index[i].reloc_offset = reloc_offset;
      i++;
    }
  }
}


void RelocIterator::set_limits(address begin, address limit) {
  int index_size = 0;
  if (UseRelocIndex && _code != NULL) {
    index_size = ((jint*)_end)[-1];
    _end = (relocInfo*)( (address)_end - index_size - BytesPerInt );
  }

  _limit = limit;

  // the limit affects this next stuff:
  if (begin != NULL) {
#ifdef ASSERT
    // In ASSERT mode we do not actually use the index, but simply
    // check that its contents would have led us to the right answer.
    address addrCheck = _addr;
    relocInfo* infoCheck = _current;
#endif // ASSERT
    if (index_size > 0) {
      // skip ahead
      RelocIndexEntry* index       = (RelocIndexEntry*)_end;
      RelocIndexEntry* index_limit = (RelocIndexEntry*)((address)index + index_size);
      assert(_addr == _code->code_begin(), "_addr must be unadjusted");
      int card = (begin - _addr) / indexCardSize;
      if (card > 0) {
        if (index+card-1 < index_limit)  index += card-1;
        else                             index = index_limit - 1;
#ifdef ASSERT
        addrCheck = _addr    + index->addr_offset;
        infoCheck = _current + index->reloc_offset;
#else
        // Advance the iterator immediately to the last valid state
        // for the previous card.  Calling "next" will then advance
        // it to the first item on the required card.
        _addr    += index->addr_offset;
        _current += index->reloc_offset;
#endif // ASSERT
      }
    }

    relocInfo* backup;
    address    backup_addr;
    while (true) {
      backup      = _current;
      backup_addr = _addr;
#ifdef ASSERT
      if (backup == infoCheck) {
        assert(backup_addr == addrCheck, "must match"); addrCheck = NULL; infoCheck = NULL;
      } else {
        assert(addrCheck == NULL || backup_addr <= addrCheck, "must not pass addrCheck");
      }
#endif // ASSERT
      if (!next() || addr() >= begin) break;
    }
    assert(addrCheck == NULL || addrCheck == backup_addr, "must have matched addrCheck");
    assert(infoCheck == NULL || infoCheck == backup,      "must have matched infoCheck");
    // At this point, either we are at the first matching record,
    // or else there is no such record, and !has_current().
    // In either case, revert to the immediatly preceding state.
    _current = backup;
    _addr    = backup_addr;
    set_has_current(false);
  }
}


void RelocIterator::set_limit(address limit) {
  address code_end = (address)code() + code()->size();
  assert(limit == NULL || limit <= code_end, "in bounds");
  _limit = limit;
}

// All the strange bit-encodings are in here.
// The idea is to encode relocation data which are small integers
// very efficiently (a single extra halfword).  Larger chunks of
// relocation data need a halfword header to hold their size.
void RelocIterator::advance_over_prefix() {
  if (_current->is_datalen()) {
    _data    = (short*) _current->data();
    _datalen =          _current->datalen();
    _current += _datalen + 1;   // skip the embedded data & header
  } else {
    _databuf = _current->immediate();
    _data = &_databuf;
    _datalen = 1;
    _current++;                 // skip the header
  }
  // The client will see the following relocInfo, whatever that is.
  // It is the reloc to which the preceding data applies.
}


void RelocIterator::initialize_misc() {
  set_has_current(false);
  for (int i = (int) CodeBuffer::SECT_FIRST; i < (int) CodeBuffer::SECT_LIMIT; i++) {
    _section_start[i] = NULL;  // these will be lazily computed, if needed
    _section_end  [i] = NULL;
  }
}


Relocation* RelocIterator::reloc() {
  // (take the "switch" out-of-line)
  relocInfo::relocType t = type();
  if (false) {}
  #define EACH_TYPE(name)                             \
  else if (t == relocInfo::name##_type) {             \
    return name##_reloc();                            \
  }
  APPLY_TO_RELOCATIONS(EACH_TYPE);
  #undef EACH_TYPE
  assert(t == relocInfo::none, "must be padding");
  return new(_rh) Relocation();
}


 Methods for flyweight Relocation types


RelocationHolder RelocationHolder::plus(int offset) const {
  if (offset != 0) {
    switch (type()) {
    case relocInfo::none:
      break;
    case relocInfo::oop_type:
      {
        oop_Relocation* r = (oop_Relocation*)reloc();
        return oop_Relocation::spec(r->oop_index(), r->offset() + offset);
      }
    case relocInfo::metadata_type:
      {
        metadata_Relocation* r = (metadata_Relocation*)reloc();
        return metadata_Relocation::spec(r->metadata_index(), r->offset() + offset);
      }
    default:
      ShouldNotReachHere();
    }
  }
  return (*this);
}


void Relocation::guarantee_size() {
  guarantee(false, "Make _relocbuf bigger!");
}

    // some relocations can compute their own values
address Relocation::value() {
  ShouldNotReachHere();
  return NULL;
}


void Relocation::set_value(address x) {
  ShouldNotReachHere();
}


RelocationHolder Relocation::spec_simple(relocInfo::relocType rtype) {
  if (rtype == relocInfo::none)  return RelocationHolder::none;
  relocInfo ri = relocInfo(rtype, 0);
  RelocIterator itr;
  itr.set_current(ri);
  itr.reloc();
  return itr._rh;
}

int32_t Relocation::runtime_address_to_index(address runtime_address) {
  assert(!is_reloc_index((intptr_t)runtime_address), "must not look like an index");

  if (runtime_address == NULL)  return 0;

  StubCodeDesc* p = StubCodeDesc::desc_for(runtime_address);
  if (p != NULL && p->begin() == runtime_address) {
    assert(is_reloc_index(p->index()), "there must not be too many stubs");
    return (int32_t)p->index();
  } else {
    // Known "miscellaneous" non-stub pointers:
    // os::get_polling_page(), SafepointSynchronize::address_of_state()
    if (PrintRelocations) {
      tty->print_cr("random unregistered address in relocInfo: " INTPTR_FORMAT, runtime_address);
    }
#ifndef _LP64
    return (int32_t) (intptr_t)runtime_address;
#else
    // didn't fit return non-index
    return -1;
#endif /* _LP64 */
  }
}


address Relocation::index_to_runtime_address(int32_t index) {
  if (index == 0)  return NULL;

  if (is_reloc_index(index)) {
    StubCodeDesc* p = StubCodeDesc::desc_for_index(index);
    assert(p != NULL, "there must be a stub for this index");
    return p->begin();
  } else {
#ifndef _LP64
    // this only works on 32bit machines
    return (address) ((intptr_t) index);
#else
    fatal("Relocation::index_to_runtime_address, int32_t not pointer sized");
    return NULL;
#endif /* _LP64 */
  }
}

address Relocation::old_addr_for(address newa,
                                 const CodeBuffer* src, CodeBuffer* dest) {
  int sect = dest->section_index_of(newa);
  guarantee(sect != CodeBuffer::SECT_NONE, "lost track of this address");
  address ostart = src->code_section(sect)->start();
  address nstart = dest->code_section(sect)->start();
  return ostart + (newa - nstart);
}

address Relocation::new_addr_for(address olda,
                                 const CodeBuffer* src, CodeBuffer* dest) {
  debug_only(const CodeBuffer* src0 = src);
  int sect = CodeBuffer::SECT_NONE;
  // Look for olda in the source buffer, and all previous incarnations
  // if the source buffer has been expanded.
  for (; src != NULL; src = src->before_expand()) {
    sect = src->section_index_of(olda);
    if (sect != CodeBuffer::SECT_NONE)  break;
  }
  guarantee(sect != CodeBuffer::SECT_NONE, "lost track of this address");
  address ostart = src->code_section(sect)->start();
  address nstart = dest->code_section(sect)->start();
  return nstart + (olda - ostart);
}

void Relocation::normalize_address(address& addr, const CodeSection* dest, bool allow_other_sections) {
  address addr0 = addr;
  if (addr0 == NULL || dest->allocates2(addr0))  return;
  CodeBuffer* cb = dest->outer();
  addr = new_addr_for(addr0, cb, cb);
  assert(allow_other_sections || dest->contains2(addr),
         "addr must be in required section");
}


void CallRelocation::set_destination(address x) {
  pd_set_call_destination(x);
}

void CallRelocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
  // Usually a self-relative reference to an external routine.
  // On some platforms, the reference is absolute (not self-relative).
  // The enhanced use of pd_call_destination sorts this all out.
  address orig_addr = old_addr_for(addr(), src, dest);
  address callee    = pd_call_destination(orig_addr);
  // Reassert the callee address, this time in the new copy of the code.
  pd_set_call_destination(callee);
}


 pack/unpack methods

void oop_Relocation::pack_data_to(CodeSection* dest) {
  short* p = (short*) dest->locs_end();
  p = pack_2_ints_to(p, _oop_index, _offset);
  dest->set_locs_end((relocInfo*) p);
}


void oop_Relocation::unpack_data() {
  unpack_2_ints(_oop_index, _offset);
}

void metadata_Relocation::pack_data_to(CodeSection* dest) {
  short* p = (short*) dest->locs_end();
  p = pack_2_ints_to(p, _metadata_index, _offset);
  dest->set_locs_end((relocInfo*) p);
}


void metadata_Relocation::unpack_data() {
  unpack_2_ints(_metadata_index, _offset);
}


void virtual_call_Relocation::pack_data_to(CodeSection* dest) {
  short*  p     = (short*) dest->locs_end();
  address point =          dest->locs_point();

  normalize_address(_cached_value, dest);
  jint x0 = scaled_offset_null_special(_cached_value, point);
  p = pack_1_int_to(p, x0);
  dest->set_locs_end((relocInfo*) p);
}


void virtual_call_Relocation::unpack_data() {
  jint x0 = unpack_1_int();
  address point = addr();
  _cached_value = x0==0? NULL: address_from_scaled_offset(x0, point);
}


void static_stub_Relocation::pack_data_to(CodeSection* dest) {
  short* p = (short*) dest->locs_end();
  CodeSection* insts = dest->outer()->insts();
  normalize_address(_static_call, insts);
  p = pack_1_int_to(p, scaled_offset(_static_call, insts->start()));
  dest->set_locs_end((relocInfo*) p);
}

void static_stub_Relocation::unpack_data() {
  address base = binding()->section_start(CodeBuffer::SECT_INSTS);
  _static_call = address_from_scaled_offset(unpack_1_int(), base);
}

void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) {
  short* p = (short*) dest->locs_end();
  CodeSection* insts = dest->outer()->insts();
  normalize_address(_owner, insts);
  p = pack_1_int_to(p, scaled_offset(_owner, insts->start()));
  dest->set_locs_end((relocInfo*) p);
}

void trampoline_stub_Relocation::unpack_data() {
  address base = binding()->section_start(CodeBuffer::SECT_INSTS);
  _owner = address_from_scaled_offset(unpack_1_int(), base);
}

void external_word_Relocation::pack_data_to(CodeSection* dest) {
  short* p = (short*) dest->locs_end();
  int32_t index = runtime_address_to_index(_target);
#ifndef _LP64
  p = pack_1_int_to(p, index);
#else
  if (is_reloc_index(index)) {
    p = pack_2_ints_to(p, index, 0);
  } else {
    jlong t = (jlong) _target;
    int32_t lo = low(t);
    int32_t hi = high(t);
    p = pack_2_ints_to(p, lo, hi);
    DEBUG_ONLY(jlong t1 = jlong_from(hi, lo));
    assert(!is_reloc_index(t1) && (address) t1 == _target, "not symmetric");
  }
#endif /* _LP64 */
  dest->set_locs_end((relocInfo*) p);
}


void external_word_Relocation::unpack_data() {
#ifndef _LP64
  _target = index_to_runtime_address(unpack_1_int());
#else
  int32_t lo, hi;
  unpack_2_ints(lo, hi);
  jlong t = jlong_from(hi, lo);;
  if (is_reloc_index(t)) {
    _target = index_to_runtime_address(t);
  } else {
    _target = (address) t;
  }
#endif /* _LP64 */
}


void internal_word_Relocation::pack_data_to(CodeSection* dest) {
  short* p = (short*) dest->locs_end();
  normalize_address(_target, dest, true);

  // Check whether my target address is valid within this section.
  // If not, strengthen the relocation type to point to another section.
  int sindex = _section;
  if (sindex == CodeBuffer::SECT_NONE && _target != NULL
      && (!dest->allocates(_target) || _target == dest->locs_point())) {
    sindex = dest->outer()->section_index_of(_target);
    guarantee(sindex != CodeBuffer::SECT_NONE, "must belong somewhere");
    relocInfo* base = dest->locs_end() - 1;
    assert(base->type() == this->type(), "sanity");
    // Change the written type, to be section_word_type instead.
    base->set_type(relocInfo::section_word_type);
  }

  // Note: An internal_word relocation cannot refer to its own instruction,
  // because we reserve "0" to mean that the pointer itself is embedded
  // in the code stream.  We use a section_word relocation for such cases.

  if (sindex == CodeBuffer::SECT_NONE) {
    assert(type() == relocInfo::internal_word_type, "must be base class");
    guarantee(_target == NULL || dest->allocates2(_target), "must be within the given code section");
    jint x0 = scaled_offset_null_special(_target, dest->locs_point());
    assert(!(x0 == 0 && _target != NULL), "correct encoding of null target");
    p = pack_1_int_to(p, x0);
  } else {
    assert(_target != NULL, "sanity");
    CodeSection* sect = dest->outer()->code_section(sindex);
    guarantee(sect->allocates2(_target), "must be in correct section");
    address base = sect->start();
    jint offset = scaled_offset(_target, base);
    assert((uint)sindex < (uint)CodeBuffer::SECT_LIMIT, "sanity");
    assert(CodeBuffer::SECT_LIMIT <= (1 << section_width), "section_width++");
    p = pack_1_int_to(p, (offset << section_width) | sindex);
  }

  dest->set_locs_end((relocInfo*) p);
}


void internal_word_Relocation::unpack_data() {
  jint x0 = unpack_1_int();
  _target = x0==0? NULL: address_from_scaled_offset(x0, addr());
  _section = CodeBuffer::SECT_NONE;
}


void section_word_Relocation::unpack_data() {
  jint    x      = unpack_1_int();
  jint    offset = (x >> section_width);
  int     sindex = (x & ((1<<section_width)-1));
  address base   = binding()->section_start(sindex);

  _section = sindex;
  _target  = address_from_scaled_offset(offset, base);
}

 miscellaneous methods
oop* oop_Relocation::oop_addr() {
  int n = _oop_index;
  if (n == 0) {
    // oop is stored in the code stream
    return (oop*) pd_address_in_code();
  } else {
    // oop is stored in table at nmethod::oops_begin
    return code()->oop_addr_at(n);
  }
}


oop oop_Relocation::oop_value() {
  oop v = *oop_addr();
  // clean inline caches store a special pseudo-null
  if (v == (oop)Universe::non_oop_word())  v = NULL;
  return v;
}


void oop_Relocation::fix_oop_relocation() {
  if (!oop_is_immediate()) {
    // get the oop from the pool, and re-insert it into the instruction:
    set_value(value());
  }
}


void oop_Relocation::verify_oop_relocation() {
  if (!oop_is_immediate()) {
    // get the oop from the pool, and re-insert it into the instruction:
    verify_value(value());
  }
}

// meta data versions
Metadata** metadata_Relocation::metadata_addr() {
  int n = _metadata_index;
  if (n == 0) {
    // metadata is stored in the code stream
    return (Metadata**) pd_address_in_code();
    } else {
    // metadata is stored in table at nmethod::metadatas_begin
    return code()->metadata_addr_at(n);
    }
  }


Metadata* metadata_Relocation::metadata_value() {
  Metadata* v = *metadata_addr();
  // clean inline caches store a special pseudo-null
  if (v == (Metadata*)Universe::non_oop_word())  v = NULL;
  return v;
  }


void metadata_Relocation::fix_metadata_relocation() {
  if (!metadata_is_immediate()) {
    // get the metadata from the pool, and re-insert it into the instruction:
    pd_fix_value(value());
  }
}


void metadata_Relocation::verify_metadata_relocation() {
  if (!metadata_is_immediate()) {
    // get the metadata from the pool, and re-insert it into the instruction:
    verify_value(value());
  }
}

address virtual_call_Relocation::cached_value() {
  assert(_cached_value != NULL && _cached_value < addr(), "must precede ic_call");
  return _cached_value;
}


void virtual_call_Relocation::clear_inline_cache() {
  // No stubs for ICs
  // Clean IC
  ResourceMark rm;
  CompiledIC* icache = CompiledIC_at(this);
  icache->set_to_clean();
}


void opt_virtual_call_Relocation::clear_inline_cache() {
  // No stubs for ICs
  // Clean IC
  ResourceMark rm;
  CompiledIC* icache = CompiledIC_at(this);
  icache->set_to_clean();
}


address opt_virtual_call_Relocation::static_stub() {
  // search for the static stub who points back to this static call
  address static_call_addr = addr();
  RelocIterator iter(code());
  while (iter.next()) {
    if (iter.type() == relocInfo::static_stub_type) {
      if (iter.static_stub_reloc()->static_call() == static_call_addr) {
        return iter.addr();
      }
    }
  }
  return NULL;
}


void static_call_Relocation::clear_inline_cache() {
  // Safe call site info
  CompiledStaticCall* handler = compiledStaticCall_at(this);
  handler->set_to_clean();
}


address static_call_Relocation::static_stub() {
  // search for the static stub who points back to this static call
  address static_call_addr = addr();
  RelocIterator iter(code());
  while (iter.next()) {
    if (iter.type() == relocInfo::static_stub_type) {
      if (iter.static_stub_reloc()->static_call() == static_call_addr) {
        return iter.addr();
      }
    }
  }
  return NULL;
}

// Finds the trampoline address for a call. If no trampoline stub is
// found NULL is returned which can be handled by the caller.
address trampoline_stub_Relocation::get_trampoline_for(address call, nmethod* code) {
  // There are no relocations available when the code gets relocated
  // because of CodeBuffer expansion.
  if (code->relocation_size() == 0)
    return NULL;

  RelocIterator iter(code, call);
  while (iter.next()) {
    if (iter.type() == relocInfo::trampoline_stub_type) {
      if (iter.trampoline_stub_reloc()->owner() == call) {
        return iter.addr();
      }
    }
  }

  return NULL;
}

void static_stub_Relocation::clear_inline_cache() {
  // Call stub is only used when calling the interpreted code.
  // It does not really need to be cleared, except that we want to clean out the methodoop.
  CompiledStaticCall::set_stub_to_clean(this);
}


void external_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
  address target = _target;
  if (target == NULL) {
    // An absolute embedded reference to an external location,
    // which means there is nothing to fix here.
    return;
  }
  // Probably this reference is absolute, not relative, so the
  // following is probably a no-op.
  assert(src->section_index_of(target) == CodeBuffer::SECT_NONE, "sanity");
  set_value(target);
}


address external_word_Relocation::target() {
  address target = _target;
  if (target == NULL) {
    target = pd_get_address_from_code();
  }
  return target;
}


void internal_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
  address target = _target;
  if (target == NULL) {
    target = new_addr_for(this->target(), src, dest);
  }
  set_value(target);
}


address internal_word_Relocation::target() {
  address target = _target;
  if (target == NULL) {
    if (addr_in_const()) {
      target = *(address*)addr();
    } else {
      target = pd_get_address_from_code();
    }
  }
  return target;
}

//---------------------------------------------------------------------------------
// Non-product code

#ifndef PRODUCT

static const char* reloc_type_string(relocInfo::relocType t) {
  switch (t) {
  #define EACH_CASE(name) \
  case relocInfo::name##_type: \
    return #name;

  APPLY_TO_RELOCATIONS(EACH_CASE);
  #undef EACH_CASE

  case relocInfo::none:
    return "none";
  case relocInfo::data_prefix_tag:
    return "prefix";
  default:
    return "UNKNOWN RELOC TYPE";
  }
}


void RelocIterator::print_current() {
  if (!has_current()) {
    tty->print_cr("(no relocs)");
    return;
  }
  tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT " offset=%d",
             _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr, _current->addr_offset());
  if (current()->format() != 0)
    tty->print(" format=%d", current()->format());
  if (datalen() == 1) {
    tty->print(" data=%d", data()[0]);
  } else if (datalen() > 0) {
    tty->print(" data={");
    for (int i = 0; i < datalen(); i++) {
      tty->print("%04x", data()[i] & 0xFFFF);
    }
    tty->print("}");
  }
  tty->print("]");
  switch (type()) {
  case relocInfo::oop_type:
    {
      oop_Relocation* r = oop_reloc();
      oop* oop_addr  = NULL;
      oop  raw_oop   = NULL;
      oop  oop_value = NULL;
      if (code() != NULL || r->oop_is_immediate()) {
        oop_addr  = r->oop_addr();
        raw_oop   = *oop_addr;
        oop_value = r->oop_value();
      }
      tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
                 oop_addr, (address)raw_oop, r->offset());
      // Do not print the oop by default--we want this routine to
      // work even during GC or other inconvenient times.
      if (WizardMode && oop_value != NULL) {
        tty->print("oop_value=" INTPTR_FORMAT ": ", (address)oop_value);
        oop_value->print_value_on(tty);
      }
      break;
    }
  case relocInfo::metadata_type:
    {
      metadata_Relocation* r = metadata_reloc();
      Metadata** metadata_addr  = NULL;
      Metadata*    raw_metadata   = NULL;
      Metadata*    metadata_value = NULL;
      if (code() != NULL || r->metadata_is_immediate()) {
        metadata_addr  = r->metadata_addr();
        raw_metadata   = *metadata_addr;
        metadata_value = r->metadata_value();
      }
      tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
                 metadata_addr, (address)raw_metadata, r->offset());
      if (metadata_value != NULL) {
        tty->print("metadata_value=" INTPTR_FORMAT ": ", (address)metadata_value);
        metadata_value->print_value_on(tty);
      }
      break;
    }
  case relocInfo::external_word_type:
  case relocInfo::internal_word_type:
  case relocInfo::section_word_type:
    {
      DataRelocation* r = (DataRelocation*) reloc();
      tty->print(" | [target=" INTPTR_FORMAT "]", r->value()); //value==target
      break;
    }
  case relocInfo::static_call_type:
  case relocInfo::runtime_call_type:
    {
      CallRelocation* r = (CallRelocation*) reloc();
      tty->print(" | [destination=" INTPTR_FORMAT "]", r->destination());
      break;
    }
  case relocInfo::virtual_call_type:
    {
      virtual_call_Relocation* r = (virtual_call_Relocation*) reloc();
      tty->print(" | [destination=" INTPTR_FORMAT " cached_value=" INTPTR_FORMAT "]",
                 r->destination(), r->cached_value());
      break;
    }
  case relocInfo::static_stub_type:
    {
      static_stub_Relocation* r = (static_stub_Relocation*) reloc();
      tty->print(" | [static_call=" INTPTR_FORMAT "]", r->static_call());
      break;
    }
  case relocInfo::trampoline_stub_type:
    {
      trampoline_stub_Relocation* r = (trampoline_stub_Relocation*) reloc();
      tty->print(" | [trampoline owner=" INTPTR_FORMAT "]", r->owner());
      break;
    }
  }
  tty->cr();
}


void RelocIterator::print() {
  RelocIterator save_this = (*this);
  relocInfo* scan = _current;
  if (!has_current())  scan += 1;  // nothing to scan here!

  bool skip_next = has_current();
  bool got_next;
  while (true) {
    got_next = (skip_next || next());
    skip_next = false;

    tty->print("         @" INTPTR_FORMAT ": ", scan);
    relocInfo* newscan = _current+1;
    if (!has_current())  newscan -= 1;  // nothing to scan here!
    while (scan < newscan) {
      tty->print("%04x", *(short*)scan & 0xFFFF);
      scan++;
    }
    tty->cr();

    if (!got_next)  break;
    print_current();
  }

  (*this) = save_this;
}

// For the debugger:
extern "C"
void print_blob_locs(nmethod* nm) {
  nm->print();
  RelocIterator iter(nm);
  iter.print();
}
extern "C"
void print_buf_locs(CodeBuffer* cb) {
  FlagSetting fs(PrintRelocations, true);
  cb->print();
}
#endif // !PRODUCT
C:\hotspot-69087d08d473\src\share\vm/code/relocInfo.hpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_RELOCINFO_HPP
#define SHARE_VM_CODE_RELOCINFO_HPP

#include "memory/allocation.hpp"
#include "utilities/top.hpp"

class NativeMovConstReg;

// Types in this file:
//    relocInfo
//      One element of an array of halfwords encoding compressed relocations.
//      Also, the source of relocation types (relocInfo::oop_type, ...).
//    Relocation
//      A flyweight object representing a single relocation.
//      It is fully unpacked from the compressed relocation array.
//    metadata_Relocation, ... (subclasses of Relocation)
//      The location of some type-specific operations (metadata_addr, ...).
//      Also, the source of relocation specs (metadata_Relocation::spec, ...).
//    oop_Relocation, ... (subclasses of Relocation)
//      oops in the code stream (strings, class loaders)
//      Also, the source of relocation specs (oop_Relocation::spec, ...).
//    RelocationHolder
//      A ValueObj type which acts as a union holding a Relocation object.
//      Represents a relocation spec passed into a CodeBuffer during assembly.
//    RelocIterator
//      A StackObj which iterates over the relocations associated with
//      a range of code addresses.  Can be used to operate a copy of code.
//    BoundRelocation
//      An _internal_ type shared by packers and unpackers of relocations.
//      It pastes together a RelocationHolder with some pointers into
//      code and relocInfo streams.


// Notes on relocType:
//
// These hold enough information to read or write a value embedded in
// the instructions of an CodeBlob.  They're used to update:
//
//   1) embedded oops     (isOop()          == true)
//   2) inline caches     (isIC()           == true)
//   3) runtime calls     (isRuntimeCall()  == true)
//   4) internal word ref (isInternalWord() == true)
//   5) external word ref (isExternalWord() == true)
//
// when objects move (GC) or if code moves (compacting the code heap).
// They are also used to patch the code (if a call site must change)
//
// A relocInfo is represented in 16 bits:
//   4 bits indicating the relocation type
//  12 bits indicating the offset from the previous relocInfo address
//
// The offsets accumulate along the relocInfo stream to encode the
// address within the CodeBlob, which is named RelocIterator::addr().
// The address of a particular relocInfo always points to the first
// byte of the relevant instruction (and not to any of its subfields
// or embedded immediate constants).
//
// The offset value is scaled appropriately for the target machine.
// (See relocInfo_<arch>.hpp for the offset scaling.)
//
// On some machines, there may also be a "format" field which may provide
// additional information about the format of the instruction stream
// at the corresponding code address.  The format value is usually zero.
// Any machine (such as Intel) whose instructions can sometimes contain
// more than one relocatable constant needs format codes to distinguish
// which operand goes with a given relocation.
//
// If the target machine needs N format bits, the offset has 12-N bits,
// the format is encoded between the offset and the type, and the
// relocInfo_<arch>.hpp file has manifest constants for the format codes.
//
// If the type is "data_prefix_tag" then the offset bits are further encoded,
// and in fact represent not a code-stream offset but some inline data.
// The data takes the form of a counted sequence of halfwords, which
// precedes the actual relocation record.  (Clients never see it directly.)
// The interpetation of this extra data depends on the relocation type.
//
// On machines that have 32-bit immediate fields, there is usually
// little need for relocation "prefix" data, because the instruction stream
// is a perfectly reasonable place to store the value.  On machines in
// which 32-bit values must be "split" across instructions, the relocation
// data is the "true" specification of the value, which is then applied
// to some field of the instruction (22 or 13 bits, on SPARC).
//
// Whenever the location of the CodeBlob changes, any PC-relative
// relocations, and any internal_word_type relocations, must be reapplied.
// After the GC runs, oop_type relocations must be reapplied.
//
//
// Here are meanings of the types:
//
// relocInfo::none -- a filler record
//   Value:  none
//   Instruction: The corresponding code address is ignored
//   Data:  Any data prefix and format code are ignored
//   (This means that any relocInfo can be disabled by setting
//   its type to none.  See relocInfo::remove.)
//
// relocInfo::oop_type, relocInfo::metadata_type -- a reference to an oop or meta data
//   Value:  an oop, or else the address (handle) of an oop
//   Instruction types: memory (load), set (load address)
//   Data:  []       an oop stored in 4 bytes of instruction
//          [n]      n is the index of an oop in the CodeBlob's oop pool
//          [[N]n l] and l is a byte offset to be applied to the oop
//          [Nn Ll]  both index and offset may be 32 bits if necessary
//   Here is a special hack, used only by the old compiler:
//          [[N]n 00] the value is the __address__ of the nth oop in the pool
//   (Note that the offset allows optimal references to class variables.)
//
// relocInfo::internal_word_type -- an address within the same CodeBlob
// relocInfo::section_word_type -- same, but can refer to another section
//   Value:  an address in the CodeBlob's code or constants section
//   Instruction types: memory (load), set (load address)
//   Data:  []     stored in 4 bytes of instruction
//          [[L]l] a relative offset (see [About Offsets] below)
//   In the case of section_word_type, the offset is relative to a section
//   base address, and the section number (e.g., SECT_INSTS) is encoded
//   into the low two bits of the offset L.
//
// relocInfo::external_word_type -- a fixed address in the runtime system
//   Value:  an address
//   Instruction types: memory (load), set (load address)
//   Data:  []   stored in 4 bytes of instruction
//          [n]  the index of a "well-known" stub (usual case on RISC)
//          [Ll] a 32-bit address
//
// relocInfo::runtime_call_type -- a fixed subroutine in the runtime system
//   Value:  an address
//   Instruction types: PC-relative call (or a PC-relative branch)
//   Data:  []   stored in 4 bytes of instruction
//
// relocInfo::static_call_type -- a static call
//   Value:  an CodeBlob, a stub, or a fixup routine
//   Instruction types: a call
//   Data:  []
//   The identity of the callee is extracted from debugging information.
//   //%note reloc_3
//
// relocInfo::virtual_call_type -- a virtual call site (which includes an inline
//                                 cache)
//   Value:  an CodeBlob, a stub, the interpreter, or a fixup routine
//   Instruction types: a call, plus some associated set-oop instructions
//   Data:  []       the associated set-oops are adjacent to the call
//          [n]      n is a relative offset to the first set-oop
//          [[N]n l] and l is a limit within which the set-oops occur
//          [Nn Ll]  both n and l may be 32 bits if necessary
//   The identity of the callee is extracted from debugging information.
//
// relocInfo::opt_virtual_call_type -- a virtual call site that is statically bound
//
//    Same info as a static_call_type. We use a special type, so the handling of
//    virtuals and statics are separated.
//
//
//   The offset n points to the first set-oop.  (See [About Offsets] below.)
//   In turn, the set-oop instruction specifies or contains an oop cell devoted
//   exclusively to the IC call, which can be patched along with the call.
//
//   The locations of any other set-oops are found by searching the relocation
//   information starting at the first set-oop, and continuing until all
//   relocations up through l have been inspected.  The value l is another
//   relative offset.  (Both n and l are relative to the call's first byte.)
//
//   The limit l of the search is exclusive.  However, if it points within
//   the call (e.g., offset zero), it is adjusted to point after the call and
//   any associated machine-specific delay slot.
//
//   Since the offsets could be as wide as 32-bits, these conventions
//   put no restrictions whatever upon code reorganization.
//
//   The compiler is responsible for ensuring that transition from a clean
//   state to a monomorphic compiled state is MP-safe.  This implies that
//   the system must respond well to intermediate states where a random
//   subset of the set-oops has been correctly from the clean state
//   upon entry to the VEP of the compiled method.  In the case of a
//   machine (Intel) with a single set-oop instruction, the 32-bit
//   immediate field must not straddle a unit of memory coherence.
//   //%note reloc_3
//
// relocInfo::static_stub_type -- an extra stub for each static_call_type
//   Value:  none
//   Instruction types: a virtual call:  { set_oop; jump; }
//   Data:  [[N]n]  the offset of the associated static_call reloc
//   This stub becomes the target of a static call which must be upgraded
//   to a virtual call (because the callee is interpreted).
//   See [About Offsets] below.
//   //%note reloc_2
//
// For example:
//
//   INSTRUCTIONS                        RELOC: TYPE    PREFIX DATA
//   ------------                               ----    -----------
// sethi      %hi(myObject),  R               oop_type [n(myObject)]
// ld      [R+%lo(myObject)+fldOffset], R2    oop_type [n(myObject) fldOffset]
// add R2, 1, R2
// st  R2, [R+%lo(myObject)+fldOffset]        oop_type [n(myObject) fldOffset]
//%note reloc_1
//
// This uses 4 instruction words, 8 relocation halfwords,
// and an entry (which is sharable) in the CodeBlob's oop pool,
// for a total of 36 bytes.
//
// Note that the compiler is responsible for ensuring the "fldOffset" when
// added to "%lo(myObject)" does not overflow the immediate fields of the
// memory instructions.
//
//
// [About Offsets] Relative offsets are supplied to this module as
// positive byte offsets, but they may be internally stored scaled
// and/or negated, depending on what is most compact for the target
// system.  Since the object pointed to by the offset typically
// precedes the relocation address, it is profitable to store
// these negative offsets as positive numbers, but this decision
// is internal to the relocation information abstractions.
//

class Relocation;
class CodeBuffer;
class CodeSection;
class RelocIterator;

class relocInfo VALUE_OBJ_CLASS_SPEC {
  friend class RelocIterator;
 public:
  enum relocType {
    none                    =  0, // Used when no relocation should be generated
    oop_type                =  1, // embedded oop
    virtual_call_type       =  2, // a standard inline cache call for a virtual send
    opt_virtual_call_type   =  3, // a virtual call that has been statically bound (i.e., no IC cache)
    static_call_type        =  4, // a static send
    static_stub_type        =  5, // stub-entry for static send  (takes care of interpreter case)
    runtime_call_type       =  6, // call to fixed external routine
    external_word_type      =  7, // reference to fixed external address
    internal_word_type      =  8, // reference within the current code blob
    section_word_type       =  9, // internal, but a cross-section reference
    poll_type               = 10, // polling instruction for safepoints
    poll_return_type        = 11, // polling instruction for safepoints at return
    metadata_type           = 12, // metadata that used to be oops
    trampoline_stub_type    = 13, // stub-entry for trampoline
    yet_unused_type_1       = 14, // Still unused
    data_prefix_tag         = 15, // tag for a prefix (carries data arguments)
    type_mask               = 15  // A mask which selects only the above values
  };

 protected:
  unsigned short _value;

  enum RawBitsToken { RAW_BITS };
  relocInfo(relocType type, RawBitsToken ignore, int bits)
    : _value((type << nontype_width) + bits) { }

  relocInfo(relocType type, RawBitsToken ignore, int off, int f)
    : _value((type << nontype_width) + (off / (unsigned)offset_unit) + (f << offset_width)) { }

 public:
  // constructor
  relocInfo(relocType type, int offset, int format = 0)
#ifndef ASSERT
  {
    (*this) = relocInfo(type, RAW_BITS, offset, format);
  }
#else
  // Put a bunch of assertions out-of-line.
  ;
#endif

  #define APPLY_TO_RELOCATIONS(visitor) \
    visitor(oop) \
    visitor(metadata) \
    visitor(virtual_call) \
    visitor(opt_virtual_call) \
    visitor(static_call) \
    visitor(static_stub) \
    visitor(runtime_call) \
    visitor(external_word) \
    visitor(internal_word) \
    visitor(poll) \
    visitor(poll_return) \
    visitor(section_word) \
    visitor(trampoline_stub) \


 public:
  enum {
    value_width             = sizeof(unsigned short) * BitsPerByte,
    type_width              = 4,   // == log2(type_mask+1)
    nontype_width           = value_width - type_width,
    datalen_width           = nontype_width-1,
    datalen_tag             = 1 << datalen_width,  // or-ed into _value
    datalen_limit           = 1 << datalen_width,
    datalen_mask            = (1 << datalen_width)-1
  };

  // accessors
 public:
  relocType  type()       const { return (relocType)((unsigned)_value >> nontype_width); }
  int  format()           const { return format_mask==0? 0: format_mask &
                                         ((unsigned)_value >> offset_width); }
  int  addr_offset()      const { assert(!is_prefix(), "must have offset");
                                  return (_value & offset_mask)*offset_unit; }

 protected:
  const short* data()     const { assert(is_datalen(), "must have data");
                                  return (const short*)(this + 1); }
  int          datalen()  const { assert(is_datalen(), "must have data");
                                  return (_value & datalen_mask); }
  int         immediate() const { assert(is_immediate(), "must have immed");
                                  return (_value & datalen_mask); }
 public:
  static int addr_unit()        { return offset_unit; }
  static int offset_limit()     { return (1 << offset_width) * offset_unit; }

  void set_type(relocType type);
  void set_format(int format);

  void remove() { set_type(none); }

 protected:
  bool is_none()                const { return type() == none; }
  bool is_prefix()              const { return type() == data_prefix_tag; }
  bool is_datalen()             const { assert(is_prefix(), "must be prefix");
                                        return (_value & datalen_tag) != 0; }
  bool is_immediate()           const { assert(is_prefix(), "must be prefix");
                                        return (_value & datalen_tag) == 0; }

 public:
  // Occasionally records of type relocInfo::none will appear in the stream.
  // We do not bother to filter these out, but clients should ignore them.
  // These records serve as "filler" in three ways:
  //  - to skip large spans of unrelocated code (this is rare)
  //  - to pad out the relocInfo array to the required oop alignment
  //  - to disable old relocation information which is no longer applicable

  inline friend relocInfo filler_relocInfo();

  // Every non-prefix relocation may be preceded by at most one prefix,
  // which supplies 1 or more halfwords of associated data.  Conventionally,
  // an int is represented by 0, 1, or 2 halfwords, depending on how
  // many bits are required to represent the value.  (In addition,
  // if the sole halfword is a 10-bit unsigned number, it is made
  // "immediate" in the prefix header word itself.  This optimization
  // is invisible outside this module.)

  inline friend relocInfo prefix_relocInfo(int datalen);

 protected:
  // an immediate relocInfo optimizes a prefix with one 10-bit unsigned value
  static relocInfo immediate_relocInfo(int data0) {
    assert(fits_into_immediate(data0), "data0 in limits");
    return relocInfo(relocInfo::data_prefix_tag, RAW_BITS, data0);
  }
  static bool fits_into_immediate(int data0) {
    return (data0 >= 0 && data0 < datalen_limit);
  }

 public:
  // Support routines for compilers.

  // This routine takes an infant relocInfo (unprefixed) and
  // edits in its prefix, if any.  It also updates dest.locs_end.
  void initialize(CodeSection* dest, Relocation* reloc);

  // This routine updates a prefix and returns the limit pointer.
  // It tries to compress the prefix from 32 to 16 bits, and if
  // successful returns a reduced "prefix_limit" pointer.
  relocInfo* finish_prefix(short* prefix_limit);

  // bit-packers for the data array:

  // As it happens, the bytes within the shorts are ordered natively,
  // but the shorts within the word are ordered big-endian.
  // This is an arbitrary choice, made this way mainly to ease debugging.
  static int data0_from_int(jint x)         { return x >> value_width; }
  static int data1_from_int(jint x)         { return (short)x; }
  static jint jint_from_data(short* data) {
    return (data[0] << value_width) + (unsigned short)data[1];
  }

  static jint short_data_at(int n, short* data, int datalen) {
    return datalen > n ? data[n] : 0;
  }

  static jint jint_data_at(int n, short* data, int datalen) {
    return datalen > n+1 ? jint_from_data(&data[n]) : short_data_at(n, data, datalen);
  }

  // Update methods for relocation information
  // (since code is dynamically patched, we also need to dynamically update the relocation info)
  // Both methods takes old_type, so it is able to performe sanity checks on the information removed.
  static void change_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type, relocType new_type);
  static void remove_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type);

  // Machine dependent stuff
#ifdef TARGET_ARCH_x86
# include "relocInfo_x86.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "relocInfo_aarch64.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "relocInfo_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "relocInfo_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "relocInfo_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "relocInfo_ppc.hpp"
#endif


 protected:
  // Derived constant, based on format_width which is PD:
  enum {
    offset_width       = nontype_width - format_width,
    offset_mask        = (1<<offset_width) - 1,
    format_mask        = (1<<format_width) - 1
  };
 public:
  enum {
    // Conservatively large estimate of maximum length (in shorts)
    // of any relocation record.
    // Extended format is length prefix, data words, and tag/offset suffix.
    length_limit       = 1 + 1 + (3*BytesPerWord/BytesPerShort) + 1,
    have_format        = format_width > 0
  };
};

#define FORWARD_DECLARE_EACH_CLASS(name)              \
class name##_Relocation;
APPLY_TO_RELOCATIONS(FORWARD_DECLARE_EACH_CLASS)
#undef FORWARD_DECLARE_EACH_CLASS



inline relocInfo filler_relocInfo() {
  return relocInfo(relocInfo::none, relocInfo::offset_limit() - relocInfo::offset_unit);
}

inline relocInfo prefix_relocInfo(int datalen = 0) {
  assert(relocInfo::fits_into_immediate(datalen), "datalen in limits");
  return relocInfo(relocInfo::data_prefix_tag, relocInfo::RAW_BITS, relocInfo::datalen_tag | datalen);
}


// Holder for flyweight relocation objects.
// Although the flyweight subclasses are of varying sizes,
// the holder is "one size fits all".
class RelocationHolder VALUE_OBJ_CLASS_SPEC {
  friend class Relocation;
  friend class CodeSection;

 private:
  // this preallocated memory must accommodate all subclasses of Relocation
  // (this number is assertion-checked in Relocation::operator new)
  enum { _relocbuf_size = 5 };
  void* _relocbuf[ _relocbuf_size ];

 public:
  Relocation* reloc() const { return (Relocation*) &_relocbuf[0]; }
  inline relocInfo::relocType type() const;

  // Add a constant offset to a relocation.  Helper for class Address.
  RelocationHolder plus(int offset) const;

  inline RelocationHolder();                // initializes type to none

  inline RelocationHolder(Relocation* r);   // make a copy

  static const RelocationHolder none;
};

// A RelocIterator iterates through the relocation information of a CodeBlob.
// It is a variable BoundRelocation which is able to take on successive
// values as it is advanced through a code stream.
// Usage:
//   RelocIterator iter(nm);
//   while (iter.next()) {
//     iter.reloc()->some_operation();
//   }
// or:
//   RelocIterator iter(nm);
//   while (iter.next()) {
//     switch (iter.type()) {
//      case relocInfo::oop_type          :
//      case relocInfo::ic_type           :
//      case relocInfo::prim_type         :
//      case relocInfo::uncommon_type     :
//      case relocInfo::runtime_call_type :
//      case relocInfo::internal_word_type:
//      case relocInfo::external_word_type:
//      ...
//     }
//   }

class RelocIterator : public StackObj {
  enum { SECT_LIMIT = 3 };  // must be equal to CodeBuffer::SECT_LIMIT, checked in ctor
  friend class Relocation;
  friend class relocInfo;       // for change_reloc_info_for_address only
  typedef relocInfo::relocType relocType;

 private:
  address    _limit;   // stop producing relocations after this _addr
  relocInfo* _current; // the current relocation information
  relocInfo* _end;     // end marker; we're done iterating when _current == _end
  nmethod*   _code;    // compiled method containing _addr
  address    _addr;    // instruction to which the relocation applies
  short      _databuf; // spare buffer for compressed data
  short*     _data;    // pointer to the relocation's data
  short      _datalen; // number of halfwords in _data
  char       _format;  // position within the instruction

  // Base addresses needed to compute targets of section_word_type relocs.
  address    _section_start[SECT_LIMIT];
  address    _section_end  [SECT_LIMIT];

  void set_has_current(bool b) {
    _datalen = !b ? -1 : 0;
    debug_only(_data = NULL);
  }
  void set_current(relocInfo& ri) {
    _current = &ri;
    set_has_current(true);
  }

  RelocationHolder _rh; // where the current relocation is allocated

  relocInfo* current() const { assert(has_current(), "must have current");
                               return _current; }

  void set_limits(address begin, address limit);

  void advance_over_prefix();    // helper method

  void initialize_misc();

  void initialize(nmethod* nm, address begin, address limit);

  RelocIterator() { initialize_misc(); }

 public:
  // constructor
  RelocIterator(nmethod* nm,     address begin = NULL, address limit = NULL);
  RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);

  // get next reloc info, return !eos
  bool next() {
    _current++;
    assert(_current <= _end, "must not overrun relocInfo");
    if (_current == _end) {
      set_has_current(false);
      return false;
    }
    set_has_current(true);

    if (_current->is_prefix()) {
      advance_over_prefix();
      assert(!current()->is_prefix(), "only one prefix at a time");
    }

    _addr += _current->addr_offset();

    if (_limit != NULL && _addr >= _limit) {
      set_has_current(false);
      return false;
    }

    if (relocInfo::have_format)  _format = current()->format();
    return true;
  }

  // accessors
  address      limit()        const { return _limit; }
  void     set_limit(address x);
  relocType    type()         const { return current()->type(); }
  int          format()       const { return (relocInfo::have_format) ? current()->format() : 0; }
  address      addr()         const { return _addr; }
  nmethod*     code()         const { return _code; }
  short*       data()         const { return _data; }
  int          datalen()      const { return _datalen; }
  bool     has_current()      const { return _datalen >= 0; }

  void       set_addr(address addr) { _addr = addr; }
  bool   addr_in_const()      const;

  address section_start(int n) const {
    assert(_section_start[n], "must be initialized");
    return _section_start[n];
  }
  address section_end(int n) const {
    assert(_section_end[n], "must be initialized");
    return _section_end[n];
  }

  // The address points to the affected displacement part of the instruction.
  // For RISC, this is just the whole instruction.
  // For Intel, this is an unaligned 32-bit word.

  // type-specific relocation accessors:  oop_Relocation* oop_reloc(), etc.
  #define EACH_TYPE(name)                               \
  inline name##_Relocation* name##_reloc();
  APPLY_TO_RELOCATIONS(EACH_TYPE)
  #undef EACH_TYPE
  // generic relocation accessor; switches on type to call the above
  Relocation* reloc();

  // CodeBlob's have relocation indexes for faster random access:
  static int locs_and_index_size(int code_size, int locs_size);
  // Store an index into [dest_start+dest_count..dest_end).
  // At dest_start[0..dest_count] is the actual relocation information.
  // Everything else up to dest_end is free space for the index.
  static void create_index(relocInfo* dest_begin, int dest_count, relocInfo* dest_end);

#ifndef PRODUCT
 public:
  void print();
  void print_current();
#endif
};


// A Relocation is a flyweight object allocated within a RelocationHolder.
// It represents the relocation data of relocation record.
// So, the RelocIterator unpacks relocInfos into Relocations.

class Relocation VALUE_OBJ_CLASS_SPEC {
  friend class RelocationHolder;
  friend class RelocIterator;

 private:
  static void guarantee_size();

  // When a relocation has been created by a RelocIterator,
  // this field is non-null.  It allows the relocation to know
  // its context, such as the address to which it applies.
  RelocIterator* _binding;

 protected:
  RelocIterator* binding() const {
    assert(_binding != NULL, "must be bound");
    return _binding;
  }
  void set_binding(RelocIterator* b) {
    assert(_binding == NULL, "must be unbound");
    _binding = b;
    assert(_binding != NULL, "must now be bound");
  }

  Relocation() {
    _binding = NULL;
  }

  static RelocationHolder newHolder() {
    return RelocationHolder();
  }

 public:
  void* operator new(size_t size, const RelocationHolder& holder) throw() {
    if (size > sizeof(holder._relocbuf)) guarantee_size();
    assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
    return holder.reloc();
  }

  // make a generic relocation for a given type (if possible)
  static RelocationHolder spec_simple(relocInfo::relocType rtype);

  // here is the type-specific hook which writes relocation data:
  virtual void pack_data_to(CodeSection* dest) { }

  // here is the type-specific hook which reads (unpacks) relocation data:
  virtual void unpack_data() {
    assert(datalen()==0 || type()==relocInfo::none, "no data here");
  }

  static bool is_reloc_index(intptr_t index) {
    return 0 < index && index < os::vm_page_size();
  }

 protected:
  // Helper functions for pack_data_to() and unpack_data().

  // Most of the compression logic is confined here.
  // (The "immediate data" mechanism of relocInfo works independently
  // of this stuff, and acts to further compress most 1-word data prefixes.)

  // A variable-width int is encoded as a short if it will fit in 16 bits.
  // The decoder looks at datalen to decide whether to unpack short or jint.
  // Most relocation records are quite simple, containing at most two ints.

  static bool is_short(jint x) { return x == (short)x; }
  static short* add_short(short* p, int x)  { *p++ = x; return p; }
  static short* add_jint (short* p, jint x) {
    *p++ = relocInfo::data0_from_int(x); *p++ = relocInfo::data1_from_int(x);
    return p;
  }
  static short* add_var_int(short* p, jint x) {   // add a variable-width int
    if (is_short(x))  p = add_short(p, x);
    else              p = add_jint (p, x);
    return p;
  }

  static short* pack_1_int_to(short* p, jint x0) {
    // Format is one of:  [] [x] [Xx]
    if (x0 != 0)  p = add_var_int(p, x0);
    return p;
  }
  int unpack_1_int() {
    assert(datalen() <= 2, "too much data");
    return relocInfo::jint_data_at(0, data(), datalen());
  }

  // With two ints, the short form is used only if both ints are short.
  short* pack_2_ints_to(short* p, jint x0, jint x1) {
    // Format is one of:  [] [x y?] [Xx Y?y]
    if (x0 == 0 && x1 == 0) {
      // no halfwords needed to store zeroes
    } else if (is_short(x0) && is_short(x1)) {
      // 1-2 halfwords needed to store shorts
      p = add_short(p, x0); if (x1!=0) p = add_short(p, x1);
    } else {
      // 3-4 halfwords needed to store jints
      p = add_jint(p, x0);             p = add_var_int(p, x1);
    }
    return p;
  }
  void unpack_2_ints(jint& x0, jint& x1) {
    int    dlen = datalen();
    short* dp  = data();
    if (dlen <= 2) {
      x0 = relocInfo::short_data_at(0, dp, dlen);
      x1 = relocInfo::short_data_at(1, dp, dlen);
    } else {
      assert(dlen <= 4, "too much data");
      x0 = relocInfo::jint_data_at(0, dp, dlen);
      x1 = relocInfo::jint_data_at(2, dp, dlen);
    }
  }

 protected:
  // platform-dependent utilities for decoding and patching instructions
  void       pd_set_data_value       (address x, intptr_t off, bool verify_only = false); // a set or mem-ref
  void       pd_verify_data_value    (address x, intptr_t off) { pd_set_data_value(x, off, true); }
  address    pd_call_destination     (address orig_addr = NULL);
  void       pd_set_call_destination (address x);

  // this extracts the address of an address in the code stream instead of the reloc data
  address* pd_address_in_code       ();

  // this extracts an address from the code stream instead of the reloc data
  address  pd_get_address_from_code ();

  // these convert from byte offsets, to scaled offsets, to addresses
  static jint scaled_offset(address x, address base) {
    int byte_offset = x - base;
    int offset = -byte_offset / relocInfo::addr_unit();
    assert(address_from_scaled_offset(offset, base) == x, "just checkin'");
    return offset;
  }
  static jint scaled_offset_null_special(address x, address base) {
    // Some relocations treat offset=0 as meaning NULL.
    // Handle this extra convention carefully.
    if (x == NULL)  return 0;
    assert(x != base, "offset must not be zero");
    return scaled_offset(x, base);
  }
  static address address_from_scaled_offset(jint offset, address base) {
    int byte_offset = -( offset * relocInfo::addr_unit() );
    return base + byte_offset;
  }

  // these convert between indexes and addresses in the runtime system
  static int32_t runtime_address_to_index(address runtime_address);
  static address index_to_runtime_address(int32_t index);

  // helpers for mapping between old and new addresses after a move or resize
  address old_addr_for(address newa, const CodeBuffer* src, CodeBuffer* dest);
  address new_addr_for(address olda, const CodeBuffer* src, CodeBuffer* dest);
  void normalize_address(address& addr, const CodeSection* dest, bool allow_other_sections = false);

 public:
  // accessors which only make sense for a bound Relocation
  address  addr()         const { return binding()->addr(); }
  nmethod* code()         const { return binding()->code(); }
  bool     addr_in_const() const { return binding()->addr_in_const(); }
 protected:
  short*   data()         const { return binding()->data(); }
  int      datalen()      const { return binding()->datalen(); }
  int      format()       const { return binding()->format(); }

 public:
  virtual relocInfo::relocType type()            { return relocInfo::none; }

  // is it a call instruction?
  virtual bool is_call()                         { return false; }

  // is it a data movement instruction?
  virtual bool is_data()                         { return false; }

  // some relocations can compute their own values
  virtual address  value();

  // all relocations are able to reassert their values
  virtual void set_value(address x);

  virtual void clear_inline_cache()              { }

  // This method assumes that all virtual/static (inline) caches are cleared (since for static_call_type and
  // ic_call_type is not always posisition dependent (depending on the state of the cache)). However, this is
  // probably a reasonable assumption, since empty caches simplifies code reloacation.
  virtual void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { }

  void print();
};


// certain inlines must be deferred until class Relocation is defined:

inline RelocationHolder::RelocationHolder() {
  // initialize the vtbl, just to keep things type-safe
  new(*this) Relocation();
}


inline RelocationHolder::RelocationHolder(Relocation* r) {
  // wordwise copy from r (ok if it copies garbage after r)
  for (int i = 0; i < _relocbuf_size; i++) {
    _relocbuf[i] = ((void**)r)[i];
  }
}


relocInfo::relocType RelocationHolder::type() const {
  return reloc()->type();
}

// A DataRelocation always points at a memory or load-constant instruction..
// It is absolute on most machines, and the constant is split on RISCs.
// The specific subtypes are oop, external_word, and internal_word.
// By convention, the "value" does not include a separately reckoned "offset".
class DataRelocation : public Relocation {
 public:
  bool          is_data()                      { return true; }

  // both target and offset must be computed somehow from relocation data
  virtual int    offset()                      { return 0; }
  address         value()                      = 0;
  void        set_value(address x)             { set_value(x, offset()); }
  void        set_value(address x, intptr_t o) {
    if (addr_in_const())
      *(address*)addr() = x;
    else
      pd_set_data_value(x, o);
  }
  void        verify_value(address x) {
    if (addr_in_const())
      assert(*(address*)addr() == x, "must agree");
    else
      pd_verify_data_value(x, offset());
  }

  // The "o" (displacement) argument is relevant only to split relocations
  // on RISC machines.  In some CPUs (SPARC), the set-hi and set-lo ins'ns
  // can encode more than 32 bits between them.  This allows compilers to
  // share set-hi instructions between addresses that differ by a small
  // offset (e.g., different static variables in the same class).
  // On such machines, the "x" argument to set_value on all set-lo
  // instructions must be the same as the "x" argument for the
  // corresponding set-hi instructions.  The "o" arguments for the
  // set-hi instructions are ignored, and must not affect the high-half
  // immediate constant.  The "o" arguments for the set-lo instructions are
  // added into the low-half immediate constant, and must not overflow it.
};

// A CallRelocation always points at a call instruction.
// It is PC-relative on most machines.
class CallRelocation : public Relocation {
 public:
  bool is_call() { return true; }

  address  destination()                    { return pd_call_destination(); }
  void     set_destination(address x); // pd_set_call_destination

  void     fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
  address  value()                          { return destination();  }
  void     set_value(address x)             { set_destination(x); }
};

class oop_Relocation : public DataRelocation {
  relocInfo::relocType type() { return relocInfo::oop_type; }

 public:
  // encode in one of these formats:  [] [n] [n l] [Nn l] [Nn Ll]
  // an oop in the CodeBlob's oop pool
  static RelocationHolder spec(int oop_index, int offset = 0) {
    assert(oop_index > 0, "must be a pool-resident oop");
    RelocationHolder rh = newHolder();
    new(rh) oop_Relocation(oop_index, offset);
    return rh;
  }
  // an oop in the instruction stream
  static RelocationHolder spec_for_immediate() {
    const int oop_index = 0;
    const int offset    = 0;    // if you want an offset, use the oop pool
    RelocationHolder rh = newHolder();
    new(rh) oop_Relocation(oop_index, offset);
    return rh;
  }

 private:
  jint _oop_index;                  // if > 0, index into CodeBlob::oop_at
  jint _offset;                     // byte offset to apply to the oop itself

  oop_Relocation(int oop_index, int offset) {
    _oop_index = oop_index; _offset = offset;
  }

  friend class RelocIterator;
  oop_Relocation() { }

 public:
  int oop_index() { return _oop_index; }
  int offset()    { return _offset; }

  // data is packed in "2_ints" format:  [i o] or [Ii Oo]
  void pack_data_to(CodeSection* dest);
  void unpack_data();

  void fix_oop_relocation();        // reasserts oop value

  void verify_oop_relocation();

  address value()  { return (address) *oop_addr(); }

  bool oop_is_immediate()  { return oop_index() == 0; }

  oop* oop_addr();                  // addr or &pool[jint_data]
  oop  oop_value();                 // *oop_addr
  // Note:  oop_value transparently converts Universe::non_oop_word to NULL.
};


// copy of oop_Relocation for now but may delete stuff in both/either
class metadata_Relocation : public DataRelocation {
  relocInfo::relocType type() { return relocInfo::metadata_type; }

 public:
  // encode in one of these formats:  [] [n] [n l] [Nn l] [Nn Ll]
  // an metadata in the CodeBlob's metadata pool
  static RelocationHolder spec(int metadata_index, int offset = 0) {
    assert(metadata_index > 0, "must be a pool-resident metadata");
    RelocationHolder rh = newHolder();
    new(rh) metadata_Relocation(metadata_index, offset);
    return rh;
  }
  // an metadata in the instruction stream
  static RelocationHolder spec_for_immediate() {
    const int metadata_index = 0;
    const int offset    = 0;    // if you want an offset, use the metadata pool
    RelocationHolder rh = newHolder();
    new(rh) metadata_Relocation(metadata_index, offset);
    return rh;
  }

 private:
  jint _metadata_index;            // if > 0, index into nmethod::metadata_at
  jint _offset;                     // byte offset to apply to the metadata itself

  metadata_Relocation(int metadata_index, int offset) {
    _metadata_index = metadata_index; _offset = offset;
  }

  friend class RelocIterator;
  metadata_Relocation() { }

  // Fixes a Metadata pointer in the code. Most platforms embeds the
  // Metadata pointer in the code at compile time so this is empty
  // for them.
  void pd_fix_value(address x);

 public:
  int metadata_index() { return _metadata_index; }
  int offset()    { return _offset; }

  // data is packed in "2_ints" format:  [i o] or [Ii Oo]
  void pack_data_to(CodeSection* dest);
  void unpack_data();

  void fix_metadata_relocation();        // reasserts metadata value

  void verify_metadata_relocation();

  address value()  { return (address) *metadata_addr(); }

  bool metadata_is_immediate()  { return metadata_index() == 0; }

  Metadata**   metadata_addr();                  // addr or &pool[jint_data]
  Metadata*    metadata_value();                 // *metadata_addr
  // Note:  metadata_value transparently converts Universe::non_metadata_word to NULL.
};


class virtual_call_Relocation : public CallRelocation {
  relocInfo::relocType type() { return relocInfo::virtual_call_type; }

 public:
  // "cached_value" points to the first associated set-oop.
  // The oop_limit helps find the last associated set-oop.
  // (See comments at the top of this file.)
  static RelocationHolder spec(address cached_value) {
    RelocationHolder rh = newHolder();
    new(rh) virtual_call_Relocation(cached_value);
    return rh;
  }

  virtual_call_Relocation(address cached_value) {
    _cached_value = cached_value;
    assert(cached_value != NULL, "first oop address must be specified");
  }

 private:
  address _cached_value;               // location of set-value instruction

  friend class RelocIterator;
  virtual_call_Relocation() { }


 public:
  address cached_value();

  // data is packed as scaled offsets in "2_ints" format:  [f l] or [Ff Ll]
  // oop_limit is set to 0 if the limit falls somewhere within the call.
  // When unpacking, a zero oop_limit is taken to refer to the end of the call.
  // (This has the effect of bringing in the call's delay slot on SPARC.)
  void pack_data_to(CodeSection* dest);
  void unpack_data();

  void clear_inline_cache();
};


class opt_virtual_call_Relocation : public CallRelocation {
  relocInfo::relocType type() { return relocInfo::opt_virtual_call_type; }

 public:
  static RelocationHolder spec() {
    RelocationHolder rh = newHolder();
    new(rh) opt_virtual_call_Relocation();
    return rh;
  }

 private:
  friend class RelocIterator;
  opt_virtual_call_Relocation() { }

 public:
  void clear_inline_cache();

  // find the matching static_stub
  address static_stub();
};


class static_call_Relocation : public CallRelocation {
  relocInfo::relocType type() { return relocInfo::static_call_type; }

 public:
  static RelocationHolder spec() {
    RelocationHolder rh = newHolder();
    new(rh) static_call_Relocation();
    return rh;
  }

 private:
  friend class RelocIterator;
  static_call_Relocation() { }

 public:
  void clear_inline_cache();

  // find the matching static_stub
  address static_stub();
};

class static_stub_Relocation : public Relocation {
  relocInfo::relocType type() { return relocInfo::static_stub_type; }

 public:
  static RelocationHolder spec(address static_call) {
    RelocationHolder rh = newHolder();
    new(rh) static_stub_Relocation(static_call);
    return rh;
  }

 private:
  address _static_call;             // location of corresponding static_call

  static_stub_Relocation(address static_call) {
    _static_call = static_call;
  }

  friend class RelocIterator;
  static_stub_Relocation() { }

 public:
  void clear_inline_cache();

  address static_call() { return _static_call; }

  // data is packed as a scaled offset in "1_int" format:  [c] or [Cc]
  void pack_data_to(CodeSection* dest);
  void unpack_data();
};

class runtime_call_Relocation : public CallRelocation {
  relocInfo::relocType type() { return relocInfo::runtime_call_type; }

 public:
  static RelocationHolder spec() {
    RelocationHolder rh = newHolder();
    new(rh) runtime_call_Relocation();
    return rh;
  }

 private:
  friend class RelocIterator;
  runtime_call_Relocation() { }

 public:
};

// Trampoline Relocations.
// A trampoline allows to encode a small branch in the code, even if there
// is the chance that this branch can not reach all possible code locations.
// If the relocation finds that a branch is too far for the instruction
// in the code, it can patch it to jump to the trampoline where is
// sufficient space for a far branch. Needed on PPC.
class trampoline_stub_Relocation : public Relocation {
  relocInfo::relocType type() { return relocInfo::trampoline_stub_type; }

 public:
  static RelocationHolder spec(address static_call) {
    RelocationHolder rh = newHolder();
    return (new (rh) trampoline_stub_Relocation(static_call));
  }

 private:
  address _owner;    // Address of the NativeCall that owns the trampoline.

  trampoline_stub_Relocation(address owner) {
    _owner = owner;
  }

  friend class RelocIterator;
  trampoline_stub_Relocation() { }

 public:

  // Return the address of the NativeCall that owns the trampoline.
  address owner() { return _owner; }

  void pack_data_to(CodeSection * dest);
  void unpack_data();

  // Find the trampoline stub for a call.
  static address get_trampoline_for(address call, nmethod* code);
};

class external_word_Relocation : public DataRelocation {
  relocInfo::relocType type() { return relocInfo::external_word_type; }

 public:
  static RelocationHolder spec(address target) {
    assert(target != NULL, "must not be null");
    RelocationHolder rh = newHolder();
    new(rh) external_word_Relocation(target);
    return rh;
  }

  // Use this one where all 32/64 bits of the target live in the code stream.
  // The target must be an intptr_t, and must be absolute (not relative).
  static RelocationHolder spec_for_immediate() {
    RelocationHolder rh = newHolder();
    new(rh) external_word_Relocation(NULL);
    return rh;
  }

  // Some address looking values aren't safe to treat as relocations
  // and should just be treated as constants.
  static bool can_be_relocated(address target) {
    return target != NULL && !is_reloc_index((intptr_t)target);
  }

 private:
  address _target;                  // address in runtime

  external_word_Relocation(address target) {
    _target = target;
  }

  friend class RelocIterator;
  external_word_Relocation() { }

 public:
  // data is packed as a well-known address in "1_int" format:  [a] or [Aa]
  // The function runtime_address_to_index is used to turn full addresses
  // to short indexes, if they are pre-registered by the stub mechanism.
  // If the "a" value is 0 (i.e., _target is NULL), the address is stored
  // in the code stream.  See external_word_Relocation::target().
  void pack_data_to(CodeSection* dest);
  void unpack_data();

  void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
  address  target();        // if _target==NULL, fetch addr from code stream
  address  value()          { return target(); }
};

class internal_word_Relocation : public DataRelocation {
  relocInfo::relocType type() { return relocInfo::internal_word_type; }

 public:
  static RelocationHolder spec(address target) {
    assert(target != NULL, "must not be null");
    RelocationHolder rh = newHolder();
    new(rh) internal_word_Relocation(target);
    return rh;
  }

  // use this one where all the bits of the target can fit in the code stream:
  static RelocationHolder spec_for_immediate() {
    RelocationHolder rh = newHolder();
    new(rh) internal_word_Relocation(NULL);
    return rh;
  }

  internal_word_Relocation(address target) {
    _target  = target;
    _section = -1;  // self-relative
  }

 protected:
  address _target;                  // address in CodeBlob
  int     _section;                 // section providing base address, if any

  friend class RelocIterator;
  internal_word_Relocation() { }

  // bit-width of LSB field in packed offset, if section >= 0
  enum { section_width = 2 }; // must equal CodeBuffer::sect_bits

 public:
  // data is packed as a scaled offset in "1_int" format:  [o] or [Oo]
  // If the "o" value is 0 (i.e., _target is NULL), the offset is stored
  // in the code stream.  See internal_word_Relocation::target().
  // If _section is not -1, it is appended to the low bits of the offset.
  void pack_data_to(CodeSection* dest);
  void unpack_data();

  void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
  address  target();        // if _target==NULL, fetch addr from code stream
  int      section()        { return _section;   }
  address  value()          { return target();   }
};

class section_word_Relocation : public internal_word_Relocation {
  relocInfo::relocType type() { return relocInfo::section_word_type; }

 public:
  static RelocationHolder spec(address target, int section) {
    RelocationHolder rh = newHolder();
    new(rh) section_word_Relocation(target, section);
    return rh;
  }

  section_word_Relocation(address target, int section) {
    assert(target != NULL, "must not be null");
    assert(section >= 0, "must be a valid section");
    _target  = target;
    _section = section;
  }

  //void pack_data_to -- inherited
  void unpack_data();

 private:
  friend class RelocIterator;
  section_word_Relocation() { }
};


class poll_Relocation : public Relocation {
  bool          is_data()                      { return true; }
  relocInfo::relocType type() { return relocInfo::poll_type; }
  void     fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
};

class poll_return_Relocation : public Relocation {
  bool          is_data()                      { return true; }
  relocInfo::relocType type() { return relocInfo::poll_return_type; }
  void     fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
};

// We know all the xxx_Relocation classes, so now we can define these:
#define EACH_CASE(name)                                         \
inline name##_Relocation* RelocIterator::name##_reloc() {       \
  assert(type() == relocInfo::name##_type, "type must agree");  \
  /* The purpose of the placed "new" is to re-use the same */   \
  /* stack storage for each new iteration. */                   \
  name##_Relocation* r = new(_rh) name##_Relocation();          \
  r->set_binding(this);                                         \
  r->name##_Relocation::unpack_data();                          \
  return r;                                                     \
}
APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE

inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
  initialize(nm, begin, limit);
}

#endif // SHARE_VM_CODE_RELOCINFO_HPP
C:\hotspot-69087d08d473\src\share\vm/code/scopeDesc.cpp
/*
 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "code/debugInfoRec.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"

PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop) {
  _code          = code;
  _decode_offset = decode_offset;
  _objects       = decode_object_values(obj_decode_offset);
  _reexecute     = reexecute;
  _return_oop    = return_oop;
  decode_body();
}

ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop) {
  _code          = code;
  _decode_offset = decode_offset;
  _objects       = decode_object_values(DebugInformationRecorder::serialized_null);
  _reexecute     = reexecute;
  _return_oop    = return_oop;
  decode_body();
}


ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
  _code          = parent->_code;
  _decode_offset = parent->_sender_decode_offset;
  _objects       = parent->_objects;
  _reexecute     = false; //reexecute only applies to the first scope
  _return_oop    = false;
  decode_body();
}


void ScopeDesc::decode_body() {
  if (decode_offset() == DebugInformationRecorder::serialized_null) {
    // This is a sentinel record, which is only relevant to
    // approximate queries.  Decode a reasonable frame.
    _sender_decode_offset = DebugInformationRecorder::serialized_null;
    _method = _code->method();
    _bci = InvocationEntryBci;
    _locals_decode_offset = DebugInformationRecorder::serialized_null;
    _expressions_decode_offset = DebugInformationRecorder::serialized_null;
    _monitors_decode_offset = DebugInformationRecorder::serialized_null;
  } else {
    // decode header
    DebugInfoReadStream* stream  = stream_at(decode_offset());

    _sender_decode_offset = stream->read_int();
    _method = stream->read_method();
    _bci    = stream->read_bci();

    // decode offsets for body and sender
    _locals_decode_offset      = stream->read_int();
    _expressions_decode_offset = stream->read_int();
    _monitors_decode_offset    = stream->read_int();
  }
}


GrowableArray<ScopeValue*>* ScopeDesc::decode_scope_values(int decode_offset) {
  if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
  DebugInfoReadStream* stream = stream_at(decode_offset);
  int length = stream->read_int();
  GrowableArray<ScopeValue*>* result = new GrowableArray<ScopeValue*> (length);
  for (int index = 0; index < length; index++) {
    result->push(ScopeValue::read_from(stream));
  }
  return result;
}

GrowableArray<ScopeValue*>* ScopeDesc::decode_object_values(int decode_offset) {
  if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
  GrowableArray<ScopeValue*>* result = new GrowableArray<ScopeValue*>();
  DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result);
  int length = stream->read_int();
  for (int index = 0; index < length; index++) {
    // Objects values are pushed to 'result' array during read so that
    // object's fields could reference it (OBJECT_ID_CODE).
    (void)ScopeValue::read_from(stream);
  }
  assert(result->length() == length, "inconsistent debug information");
  return result;
}


GrowableArray<MonitorValue*>* ScopeDesc::decode_monitor_values(int decode_offset) {
  if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
  DebugInfoReadStream* stream  = stream_at(decode_offset);
  int length = stream->read_int();
  GrowableArray<MonitorValue*>* result = new GrowableArray<MonitorValue*> (length);
  for (int index = 0; index < length; index++) {
    result->push(new MonitorValue(stream));
  }
  return result;
}

DebugInfoReadStream* ScopeDesc::stream_at(int decode_offset) const {
  return new DebugInfoReadStream(_code, decode_offset, _objects);
}

GrowableArray<ScopeValue*>* ScopeDesc::locals() {
  return decode_scope_values(_locals_decode_offset);
}

GrowableArray<ScopeValue*>* ScopeDesc::expressions() {
  return decode_scope_values(_expressions_decode_offset);
}

GrowableArray<MonitorValue*>* ScopeDesc::monitors() {
  return decode_monitor_values(_monitors_decode_offset);
}

GrowableArray<ScopeValue*>* ScopeDesc::objects() {
  return _objects;
}

bool ScopeDesc::is_top() const {
 return _sender_decode_offset == DebugInformationRecorder::serialized_null;
}

ScopeDesc* ScopeDesc::sender() const {
  if (is_top()) return NULL;
  return new ScopeDesc(this);
}


#ifndef PRODUCT

void ScopeDesc::print_value_on(outputStream* st) const {
  tty->print("   ");
  method()->print_short_name(st);
  int lineno = method()->line_number_from_bci(bci());
  if (lineno != -1) {
    st->print_cr("@%d (line %d)", bci(), lineno);
  } else {
    st->print_cr("@%d", bci());
  }
}

void ScopeDesc::print_on(outputStream* st) const {
  print_on(st, NULL);
}

void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
  // header
  if (pd != NULL) {
    tty->print_cr("ScopeDesc(pc=" PTR_FORMAT " offset=%x):", pd->real_pc(_code), pd->pc_offset());
  }

  print_value_on(st);
  // decode offsets
  if (WizardMode) {
    st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->content_begin());
    st->print_cr(" offset:     %d",    _decode_offset);
    st->print_cr(" bci:        %d",    bci());
    st->print_cr(" reexecute:  %s",    should_reexecute() ? "true" : "false");
    st->print_cr(" locals:     %d",    _locals_decode_offset);
    st->print_cr(" stack:      %d",    _expressions_decode_offset);
    st->print_cr(" monitor:    %d",    _monitors_decode_offset);
    st->print_cr(" sender:     %d",    _sender_decode_offset);
  }
  // locals
  { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals();
    if (l != NULL) {
      tty->print_cr("   Locals");
      for (int index = 0; index < l->length(); index++) {
        st->print("    - l%d: ", index);
        l->at(index)->print_on(st);
        st->cr();
      }
    }
  }
  // expressions
  { GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions();
    if (l != NULL) {
      st->print_cr("   Expression stack");
      for (int index = 0; index < l->length(); index++) {
        st->print("   - @%d: ", index);
        l->at(index)->print_on(st);
        st->cr();
      }
    }
  }
  // monitors
  { GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors();
    if (l != NULL) {
      st->print_cr("   Monitor stack");
      for (int index = 0; index < l->length(); index++) {
        st->print("    - @%d: ", index);
        l->at(index)->print_on(st);
        st->cr();
      }
    }
  }

#ifdef COMPILER2
  if (DoEscapeAnalysis && is_top() && _objects != NULL) {
    tty->print_cr("Objects");
    for (int i = 0; i < _objects->length(); i++) {
      ObjectValue* sv = (ObjectValue*) _objects->at(i);
      tty->print(" - %d: ", sv->id());
      sv->print_fields_on(tty);
      tty->cr();
    }
  }
#endif // COMPILER2
}

#endif

void ScopeDesc::verify() {
  ResourceMark rm;
  guarantee(method()->is_method(), "type check");

  // check if we have any illegal elements on the expression stack
  { GrowableArray<ScopeValue*>* l = expressions();
    if (l != NULL) {
      for (int index = 0; index < l->length(); index++) {
       //guarantee(!l->at(index)->is_illegal(), "expression element cannot be illegal");
      }
    }
  }
}
C:\hotspot-69087d08d473\src\share\vm/code/scopeDesc.hpp
/*
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#ifndef SHARE_VM_CODE_SCOPEDESC_HPP
#define SHARE_VM_CODE_SCOPEDESC_HPP

#include "code/debugInfo.hpp"
#include "code/pcDesc.hpp"
#include "oops/method.hpp"
#include "utilities/growableArray.hpp"

// SimpleScopeDesc is used when all you need to extract from
// a given pc,nmethod pair is a Method* and a bci. This is
// quite a bit faster than allocating a full ScopeDesc, but
// very limited in abilities.

class SimpleScopeDesc : public StackObj {
 private:
  Method* _method;
  int _bci;

 public:
  SimpleScopeDesc(nmethod* code,address pc) {
    PcDesc* pc_desc = code->pc_desc_at(pc);
    assert(pc_desc != NULL, "Must be able to find matching PcDesc");
    DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
    int ignore_sender = buffer.read_int();
    _method           = buffer.read_method();
    _bci              = buffer.read_bci();
  }

  Method* method() { return _method; }
  int bci() { return _bci; }
};

// ScopeDescs contain the information that makes source-level debugging of
// nmethods possible; each scopeDesc describes a method activation

class ScopeDesc : public ResourceObj {
 public:
  // Constructor
  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool return_oop);

  // Calls above, giving default value of "serialized_null" to the
  // "obj_decode_offset" argument.  (We don't use a default argument to
  // avoid a .hpp-.hpp dependency.)
  ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop);

  // JVM state
  Method* method()      const { return _method; }
  int          bci()      const { return _bci;    }
  bool should_reexecute() const { return _reexecute; }
  bool return_oop()       const { return _return_oop; }

  GrowableArray<ScopeValue*>*   locals();
  GrowableArray<ScopeValue*>*   expressions();
  GrowableArray<MonitorValue*>* monitors();
  GrowableArray<ScopeValue*>*   objects();

  // Stack walking, returns NULL if this is the outer most scope.
  ScopeDesc* sender() const;

  // Returns where the scope was decoded
  int decode_offset() const { return _decode_offset; }

  // Tells whether sender() returns NULL
  bool is_top() const;
  // Tells whether sd is equal to this
  bool is_equal(ScopeDesc* sd) const;

 private:
  // Alternative constructor
  ScopeDesc(const ScopeDesc* parent);

  // JVM state
  Method*       _method;
  int           _bci;
  bool          _reexecute;
  bool          _return_oop;

  // Decoding offsets
  int _decode_offset;
  int _sender_decode_offset;
  int _locals_decode_offset;
  int _expressions_decode_offset;
  int _monitors_decode_offset;

  // Object pool
  GrowableArray<ScopeValue*>* _objects;

  // Nmethod information
  const nmethod* _code;

  // Decoding operations
  void decode_body();
  GrowableArray<ScopeValue*>* decode_scope_values(int decode_offset);
  GrowableArray<MonitorValue*>* decode_monitor_values(int decode_offset);
  GrowableArray<ScopeValue*>* decode_object_values(int decode_offset);

  DebugInfoReadStream* stream_at(int decode_offset) const;


 public:
  // Verification
  void verify();

#ifndef PRODUCT
 public:
  // Printing support
  void print_on(outputStream* st) const;
  void print_on(outputStream* st, PcDesc* pd) const;
  void print_value_on(outputStream* st) const;
#endif
};

#endif // SHARE_VM_CODE_SCOPEDESC_HPP
C:\hotspot-69087d08d473\src\share\vm/code/stubs.cpp
/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/stubs.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"


// Implementation of StubQueue
//
// Standard wrap-around queue implementation; the queue dimensions
// are specified by the _queue_begin & _queue_end indices. The queue
// can be in two states (transparent to the outside):
//
// a) contiguous state: all queue entries in one block (or empty)
//
// Queue: |...|XXXXXXX|...............|
//        ^0  ^begin  ^end            ^size = limit
//            |_______|
//            one block
//
// b) non-contiguous state: queue entries in two blocks
//
// Queue: |XXX|.......|XXXXXXX|.......|
//        ^0  ^end    ^begin  ^limit  ^size
//        |___|       |_______|
//         1st block  2nd block
//
// In the non-contiguous state, the wrap-around point is
// indicated via the _buffer_limit index since the last
// queue entry may not fill up the queue completely in
// which case we need to know where the 2nd block's end
// is to do the proper wrap-around. When removing the
// last entry of the 2nd block, _buffer_limit is reset
// to _buffer_size.
//
// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!


StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
                     Mutex* lock, const char* name) : _mutex(lock) {
  intptr_t size = round_to(buffer_size, 2*BytesPerWord);
  BufferBlob* blob = BufferBlob::create(name, size);
  if( blob == NULL) {
    vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name));
  }
  _stub_interface  = stub_interface;
  _buffer_size     = blob->content_size();
  _buffer_limit    = blob->content_size();
  _stub_buffer     = blob->content_begin();
  _queue_begin     = 0;
  _queue_end       = 0;
  _number_of_stubs = 0;
  register_queue(this);
}


StubQueue::~StubQueue() {
  // Note: Currently StubQueues are never destroyed so nothing needs to be done here.
  //       If we want to implement the destructor, we need to release the BufferBlob
  //       allocated in the constructor (i.e., we need to keep it around or look it
  //       up via CodeCache::find_blob(...).
  Unimplemented();
}


Stub* StubQueue::stub_containing(address pc) const {
  if (contains(pc)) {
    for (Stub* s = first(); s != NULL; s = next(s)) {
      if (stub_contains(s, pc)) return s;
    }
  }
  return NULL;
}


Stub* StubQueue::request_committed(int code_size) {
  Stub* s = request(code_size);
  CodeStrings strings;
  if (s != NULL) commit(code_size, strings);
  return s;
}


Stub* StubQueue::request(int requested_code_size) {
  assert(requested_code_size > 0, "requested_code_size must be > 0");
  if (_mutex != NULL) _mutex->lock();
  Stub* s = current_stub();
  int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
  if (requested_size <= available_space()) {
    if (is_contiguous()) {
      // Queue: |...|XXXXXXX|.............|
      //        ^0  ^begin  ^end          ^size = limit
      assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
      if (_queue_end + requested_size <= _buffer_size) {
        // code fits in at the end => nothing to do
        CodeStrings strings;
        stub_initialize(s, requested_size, strings);
        return s;
      } else {
        // stub doesn't fit in at the queue end
        // => reduce buffer limit & wrap around
        assert(!is_empty(), "just checkin'");
        _buffer_limit = _queue_end;
        _queue_end = 0;
      }
    }
  }
  if (requested_size <= available_space()) {
    assert(!is_contiguous(), "just checkin'");
    assert(_buffer_limit <= _buffer_size, "queue invariant broken");
    // Queue: |XXX|.......|XXXXXXX|.......|
    //        ^0  ^end    ^begin  ^limit  ^size
    s = current_stub();
    CodeStrings strings;
    stub_initialize(s, requested_size, strings);
    return s;
  }
  // Not enough space left
  if (_mutex != NULL) _mutex->unlock();
  return NULL;
}


void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
  assert(committed_code_size > 0, "committed_code_size must be > 0");
  int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
  Stub* s = current_stub();
  assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
  stub_initialize(s, committed_size, strings);
  _queue_end += committed_size;
  _number_of_stubs++;
  if (_mutex != NULL) _mutex->unlock();
  debug_only(stub_verify(s);)
}


void StubQueue::remove_first() {
  if (number_of_stubs() == 0) return;
  Stub* s = first();
  debug_only(stub_verify(s);)
  stub_finalize(s);
  _queue_begin += stub_size(s);
  assert(_queue_begin <= _buffer_limit, "sanity check");
  if (_queue_begin == _queue_end) {
    // buffer empty
    // => reset queue indices
    _queue_begin  = 0;
    _queue_end    = 0;
    _buffer_limit = _buffer_size;
  } else if (_queue_begin == _buffer_limit) {
    // buffer limit reached
    // => reset buffer limit & wrap around
    _buffer_limit = _buffer_size;
    _queue_begin = 0;
  }
  _number_of_stubs--;
}


void StubQueue::remove_first(int n) {
  int i = MIN2(n, number_of_stubs());
  while (i-- > 0) remove_first();
}


void StubQueue::remove_all(){
  debug_only(verify();)
  remove_first(number_of_stubs());
  assert(number_of_stubs() == 0, "sanity check");
}


enum { StubQueueLimit = 10 };  // there are only a few in the world
static StubQueue* registered_stub_queues[StubQueueLimit];

void StubQueue::register_queue(StubQueue* sq) {
  for (int i = 0; i < StubQueueLimit; i++) {
    if (registered_stub_queues[i] == NULL) {
      registered_stub_queues[i] = sq;
      return;
    }
  }
  ShouldNotReachHere();
}


void StubQueue::queues_do(void f(StubQueue* sq)) {
  for (int i = 0; i < StubQueueLimit; i++) {
    if (registered_stub_queues[i] != NULL) {
      f(registered_stub_queues[i]);
    }
  }
}


void StubQueue::stubs_do(void f(Stub* s)) {
  debug_only(verify();)
  MutexLockerEx lock(_mutex);
  for (Stub* s = first(); s != NULL; s = next(s)) f(s);
}


void StubQueue::verify() {
  // verify only if initialized
  if (_stub_buffer == NULL) return;
  MutexLockerEx lock(_mutex);
  // verify index boundaries
  guarantee(0 <= _buffer_size, "buffer size must be positive");
  guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
  guarantee(0 <= _queue_begin  && _queue_begin  <  _buffer_limit, "_queue_begin out of bounds");
  guarantee(0 <= _queue_end    && _queue_end    <= _buffer_limit, "_queue_end   out of bounds");
  // verify alignment
  guarantee(_buffer_size  % CodeEntryAlignment == 0, "_buffer_size  not aligned");
  guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
  guarantee(_queue_begin  % CodeEntryAlignment == 0, "_queue_begin  not aligned");
  guarantee(_queue_end    % CodeEntryAlignment == 0, "_queue_end    not aligned");
  // verify buffer limit/size relationship
  if (is_contiguous()) {
    guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
  }
  // verify contents
  int n = 0;
  for (Stub* s = first(); s != NULL; s = next(s)) {
    stub_verify(s);
    n++;
  }
  guarantee(n == number_of_stubs(), "number of stubs inconsistent");
  guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值