java层Throwable->nativeFillInStackTrace分析

背景:

1.研究java层堆栈获取原理
2.进一步提升自己对art的理解
本文选择Throwable->nativeFillInStackTrace这个切面入手

源码lineage-18.1

admin@C02D7132MD6R art % grep nativeFillInStackTrace -rn *
runtime/native/java_lang_Throwable.cc:28:static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) {
runtime/native/java_lang_Throwable.cc:42:  FAST_NATIVE_METHOD(Throwable, nativeFillInStackTrace, "()Ljava/lang/Object;"),

关注点,native函数

//runtime/native/java_lang_Throwable.cc
static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) {
  ScopedFastNativeObjectAccess soa(env);
  return soa.Self()->CreateInternalStackTrace<false>(soa);
}

static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) {
  if (javaStackState == nullptr) {
      return nullptr;
  }
  ScopedFastNativeObjectAccess soa(env);
  return Thread::InternalStackTraceToStackTraceElementArray(soa, javaStackState);
}

static JNINativeMethod gMethods[] = {
  FAST_NATIVE_METHOD(Throwable, nativeFillInStackTrace, "()Ljava/lang/Object;"),
  FAST_NATIVE_METHOD(Throwable, nativeGetStackTrace, "(Ljava/lang/Object;)[Ljava/lang/StackTraceElement;"),
};

void register_java_lang_Throwable(JNIEnv* env) {
  REGISTER_NATIVE_METHODS("java/lang/Throwable");
}

ScopedFastNativeObjectAccess声明

//runtime/native/scoped_fast_native_object_access.h
// Variant of ScopedObjectAccess that does no runnable transitions. Should only be used by "fast"
// JNI methods.
//没有线程切换的ScopedObjectAccess变体。仅用做”fast“
class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
 public:
  ALWAYS_INLINE explicit ScopedFastNativeObjectAccess(JNIEnv* env)
    REQUIRES(!Locks::thread_suspend_count_lock_)
    SHARED_LOCK_FUNCTION(Locks::mutator_lock_);

  ALWAYS_INLINE ~ScopedFastNativeObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) {}

 private:
  DISALLOW_COPY_AND_ASSIGN(ScopedFastNativeObjectAccess);
};

ScopedObjectAccessAlreadyRunnable声明

class ScopedObjectAccessAlreadyRunnable : public ValueObject {
 public:
  Thread* Self() const {
    return self_;
  }

  JNIEnvExt* Env() const {
    return env_;
  }

  JavaVMExt* Vm() const {
    return vm_;
  }

  bool ForceCopy() const;

ScopedFastNativeObjectAccess构造函数

//runtime/native/scoped_fast_native_object_access-inl.h
inline ScopedFastNativeObjectAccess::ScopedFastNativeObjectAccess(JNIEnv* env)
    : ScopedObjectAccessAlreadyRunnable(env) {
    
  //主要是check,与本文关注点无关,不用关注
  Locks::mutator_lock_->AssertSharedHeld(Self());
  DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
  // Don't work with raw objects in non-runnable states.
  DCHECK_EQ(Self()->GetState(), kRunnable);
}

ScopedObjectAccessAlreadyRunnable构造函数:

inline ScopedObjectAccessAlreadyRunnable::ScopedObjectAccessAlreadyRunnable(JNIEnv* env)
    : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->GetVm()) {}

ThreadForEnv函数:

//runtime/thread-inl.h

// Quickly access the current thread from a JNIEnv.
static inline Thread* ThreadForEnv(JNIEnv* env) {
  JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
  return full_env->GetSelf();
}

down_cast声明:

//compiler/optimizing/code_generator_arm64.cc
template<typename To, typename From>     // use like this: down_cast<T*>(foo);
inline To down_cast(From* f) {                   // so we only accept pointers
  static_assert(std::is_base_of<From, typename std::remove_pointer<To>::type>::value,
                "down_cast unsafe as To is not a subtype of From");

  return static_cast<To>(f);
}

template<typename To, typename From>     // use like this: down_cast<T&>(foo);
inline To down_cast(From& f) {           // so we only accept references
  static_assert(std::is_base_of<From, typename std::remove_reference<To>::type>::value,
                "down_cast unsafe as To is not a subtype of From");

  return static_cast<To>(f);
}

JNIEnvExt类:

class JNIEnvExt : public JNIEnv {
 public:
  static JNIEnvExt* Create(Thread* self, JavaVMExt* vm, std::string* error_msg);
  static Offset SegmentStateOffset(size_t pointer_size);
  static Offset LocalRefCookieOffset(size_t pointer_size);
  static Offset SelfOffset(size_t pointer_size);
  static jint GetEnvHandler(JavaVMExt* vm, /*out*/void** out, jint version);
  ~JNIEnvExt();
  void SetCheckJniEnabled(bool enabled) REQUIRES(!Locks::jni_function_table_lock_);
  //栈
  void PushFrame(int capacity) REQUIRES_SHARED(Locks::mutator_lock_);
  void PopFrame() REQUIRES_SHARED(Locks::mutator_lock_);

  template<typename T>
  T AddLocalReference(ObjPtr<mirror::Object> obj)
      REQUIRES_SHARED(Locks::mutator_lock_)
      REQUIRES(!Locks::alloc_tracker_lock_);

  void UpdateLocal(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_) {
    locals_.Update(iref, obj);
  }
  
  //我们关注的是GetSelf函数
  Thread* GetSelf() const { return self_; }
  
  // Link to Thread::Current().
  Thread* const self_;

再回到最开始关注的点:

static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) {
  ScopedFastNativeObjectAccess soa(env);
  return soa.Self()->CreateInternalStackTrace<false>(soa);
}

Soa.Self()函数返回的是Thread, thread的初始化是通过ThreadForEnv函数获取的。

Thread类的声明

admin@C02D7132MD6R art % grep "class Thread {" -rn *
grep: libnativebridge/.clang-format: No such file or directory
grep: libnativeloader/.clang-format: No such file or directory
runtime/thread.h:171:class Thread {
//runtime/thread.h
class Thread {
 public:
  static const size_t kStackOverflowImplicitCheckSize;
  static constexpr bool kVerifyStack = kIsDebugBuild;

  // Creates a new native thread corresponding to the given managed peer.
  // Used to implement Thread.start.
  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
  ...
  ...
  ...
}
//runtime/thread.cc
template<bool kTransactionActive>
jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
  //计算栈的depth, 可能的话可以将frame保存防止再次重新计算
  // Compute depth of stack, save frames if possible to avoid needing to recompute many.
  constexpr size_t kMaxSavedFrames = 256;
  std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]);
  FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this),
                                       &saved_frames[0],
                                       kMaxSavedFrames);
  count_visitor.WalkStack();
  const uint32_t depth = count_visitor.GetDepth();
  const uint32_t skip_depth = count_visitor.GetSkipDepth();

  // Build internal stack trace.
  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
                                                                         const_cast<Thread*>(this),
                                                                         skip_depth);
  if (!build_trace_visitor.Init(depth)) {
    return nullptr;  // Allocation failed.
  }
  // If we saved all of the frames we don't even need to do the actual stack walk. This is faster
  // than doing the stack walk twice.
  if (depth < kMaxSavedFrames) {
    //此处不需要重新计算栈了,直接将saved_frames添加进去。
    for (size_t i = 0; i < depth; ++i) {
      build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second);
    }
  } else {
    build_trace_visitor.WalkStack();
  }

  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
  if (kIsDebugBuild) {
    ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
    // Second half of trace_methods is dex PCs.
    for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
      auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
          i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
      CHECK(method != nullptr);
    }
  }
  return soa.AddLocalReference<jobject>(trace);
}

FetchStackTraceVisitor声明

//runtime/thread.cc
class FetchStackTraceVisitor : public StackVisitor {
 public:
  explicit FetchStackTraceVisitor(Thread* thread,
                                  ArtMethodDexPcPair* saved_frames = nullptr,
                                  size_t max_saved_frames = 0)
      REQUIRES_SHARED(Locks::mutator_lock_)
      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
        saved_frames_(saved_frames),
        max_saved_frames_(max_saved_frames) {}

  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
    // We want to skip frames up to and including the exception's constructor.
    // Note we also skip the frame if it doesn't have a method (namely the callee
    // save frame)
    ArtMethod* m = GetMethod();
    if (skipping_ && !m->IsRuntimeMethod() &&
        !GetClassRoot<mirror::Throwable>()->IsAssignableFrom(m->GetDeclaringClass())) {
      skipping_ = false;
    }
    if (!skipping_) {
      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
        if (depth_ < max_saved_frames_) {
          saved_frames_[depth_].first = m;
          saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc();
        }
        ++depth_;
      }
    } else {
      ++skip_depth_;
    }
    return true;
  }

  uint32_t GetDepth() const {
    return depth_;
  }

  uint32_t GetSkipDepth() const {
    return skip_depth_;
  }

 private:
  uint32_t depth_ = 0;
  uint32_t skip_depth_ = 0;
  bool skipping_ = true;
  ArtMethodDexPcPair* saved_frames_;
  const size_t max_saved_frames_;

  DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor);
};

StackVisitor声明

//runtime/stack.h
class StackVisitor {
 public:
  // This enum defines a flag to control whether inlined frames are included
  // when walking the stack.
  enum class StackWalkKind {
    kIncludeInlinedFrames,
    kSkipInlinedFrames,
  };

 protected:
  StackVisitor(Thread* thread,
               Context* context,
               StackWalkKind walk_kind,
               bool check_suspended = true);

  bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

 public:
  virtual ~StackVisitor() {}
  StackVisitor(const StackVisitor&) = default;
  StackVisitor(StackVisitor&&) = default;

  // Return 'true' if we should continue to visit more frames, 'false' to stop.
  virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;

  enum class CountTransitions {
    kYes,
    kNo,
  };


WalkStack函数会调用纯虚函数VisitFrame

VisitFrame返回true,表示继续,返回false则表示walkStack结束。

framework中使用StackVisitor的地方还有很多,比如hiddenapi AccessContext查找,很多绕过hiddenapi的方案都可能要研究一下这里

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值