Android其他4-art相关的学习

一、代码分析

1.1.Java环境启动过程

1.1.1.从zygote开始

AppRuntime runtime(argv[0], computeArgBlockSize(argc, argv));
runtime.start("com.android.internal.os.ZygoteInit", args, zygote);
class AppRuntime : public AndroidRuntime
{
public:
}

AndroidRuntime.cpp
frameworks\base\core\jni

void AndroidRuntime::start(const char* className, const Vector<String8>& options, bool zygote)
{
    ....
    JniInvocation jni_invocation;
    jni_invocation.Init(NULL);
    JNIEnv* env;
    if (startVm(&mJavaVM, &env, zygote) != 0) {
        return;
    }
    onVmCreated(env);

  ...
    char* slashClassName = toSlashClassName(className);
    jclass startClass = env->FindClass(slashClassName);
    if (startClass == NULL) {
...
    } else {
        jmethodID startMeth = env->GetStaticMethodID(startClass, "main",
            "([Ljava/lang/String;)V");
        if (startMeth == NULL) {
            ALOGE("JavaVM unable to find main() in '%s'\n", className);
            /* keep going */
        } else {
            env->CallStaticVoidMethod(startClass, startMeth, strArray);

....
        }
    }
...
}
  • 首先是要看下 JniInvocation 里面的初始化的东西
  • 然后是要看下JavaVM 和 JNIEnv 的初始化,就是java环境的建立。
  • 然后看下JNI_CreateJavaVM 对运行时 oat文件等等,runtime等等的建立
  • 最后看下FindClass 怎么找到我们的ZygoteInit类,已经怎么连接本地代码和怎么调用。

1.1.2.JniInvocation 里面的初始化过程

JniInvocation jni_invocation;
jni_invocation.Init(NULL);

JniInvocation.cpp
\libnativehelper

JniInvocation::JniInvocation() :
    handle_(NULL),
    JNI_GetDefaultJavaVMInitArgs_(NULL),
    JNI_CreateJavaVM_(NULL),
    JNI_GetCreatedJavaVMs_(NULL) {

  LOG_ALWAYS_FATAL_IF(jni_invocation_ != NULL, "JniInvocation instance already initialized");
  jni_invocation_ = this;
}

构造函数 没有做太多事情。

bool JniInvocation::Init(const char* library) {

  library = GetLibrary(library, buffer);
  handle_ = dlopen(library, kDlopenFlags);
  const int kDlopenFlags = RTLD_NOW | RTLD_NODELETE;
  if (!FindSymbol(reinterpret_cast<void**>(&JNI_GetDefaultJavaVMInitArgs_),
                  "JNI_GetDefaultJavaVMInitArgs")) {
    return false;
  }
  if (!FindSymbol(reinterpret_cast<void**>(&JNI_CreateJavaVM_),
                  "JNI_CreateJavaVM")) {
    return false;
  }
  if (!FindSymbol(reinterpret_cast<void**>(&JNI_GetCreatedJavaVMs_),
                  "JNI_GetCreatedJavaVMs")) {
    return false;
  }
  return true;
}

哦,那其实很简单,就是拿到library 这个就是 “libart.so”。
然后dlopen 打开这个so,从符号表里面找到这三个函数。

  • JNI_GetDefaultJavaVMInitArgs
  • JNI_CreateJavaVM
  • JNI_GetCreatedJavaVMs

1.1.3.JavaVM 和 JNIEnv 的初始化-就是java环境的建立

  JNIEnv* env;
  JavaVM* mJavaVM;
if (startVm(&mJavaVM, &env, zygote) != 0) {
        return;
    }

首先我们得要先看下 这两个结构体。

typedef _JavaVM JavaVM;
struct _JavaVM {
    const struct JNIInvokeInterface* functions;

#if defined(__cplusplus)
    jint DestroyJavaVM()
    { return functions->DestroyJavaVM(this); }
    jint AttachCurrentThread(JNIEnv** p_env, void* thr_args)
    { return functions->AttachCurrentThread(this, p_env, thr_args); }
    jint DetachCurrentThread()
    { return functions->DetachCurrentThread(this); }
    jint GetEnv(void** env, jint version)
    { return functions->GetEnv(this, env, version); }
    jint AttachCurrentThreadAsDaemon(JNIEnv** p_env, void* thr_args)
    { return functions->AttachCurrentThreadAsDaemon(this, p_env, thr_args); }
#endif /*__cplusplus*/
};

其实是接,主要是看下这个functions怎么去赋值,然后上层就可以用了。

typedef _JNIEnv JNIEnv;
struct _JNIEnv {
    /* do not rename this; it does not seem to be entirely opaque */
    const struct JNINativeInterface* functions;

#if defined(__cplusplus)

    jint GetVersion()
    { return functions->GetVersion(this); }

    jclass DefineClass(const char *name, jobject loader, const jbyte* buf,
        jsize bufLen)
    { return functions->DefineClass(this, name, loader, buf, bufLen); }

    jclass FindClass(const char* name)
    { return functions->FindClass(this, name); }
...
}

也是一样的。接下看下怎么初始化这两个东东。

int AndroidRuntime::startVm(JavaVM** pJavaVM, JNIEnv** pEnv, bool zygote)
{  if (JNI_CreateJavaVM(pJavaVM, pEnv, &initArgs) < 0) {
        ALOGE("JNI_CreateJavaVM failed\n");
        return -1;
    }

    return 0;
}

这个函数最终会到art里面,刚才在so赋值的,
/art/art/runtime/java_vm_ext.cc

extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
....
 if (!Runtime::Create(options, ignore_unrecognized)) {
    return JNI_ERR;
  }
  Runtime* runtime = Runtime::Current();
  bool started = runtime->Start();
....
  *p_env = Thread::Current()->GetJniEnv();
  *p_vm = runtime->GetJavaVM();
  return JNI_OK;
}

建立一个Runtime 然后start,然后我们需要关注的两个指针最后都给赋值了。

bool Runtime::Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
  RuntimeArgumentMap runtime_options;
  return ParseOptions(raw_options, ignore_unrecognized, &runtime_options) &&
      Create(std::move(runtime_options));
}
bool Runtime::Create(RuntimeArgumentMap&& runtime_options) {
....
  instance_ = new Runtime;
  if (!instance_->Init(std::move(runtime_options))) {
    instance_ = nullptr;
    return false;
  }
  return true;
}

这边的create 很正常 单例指针什么的,然后init函数。

bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
  RuntimeArgumentMap runtime_options(std::move(runtime_options_in));
  ScopedTrace trace(__FUNCTION__);
  CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);

  MemMap::Init();

 ...

  oat_file_manager_ = new OatFileManager;

 ...
  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
 ...
  heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
      ...
      runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
           runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));

  ...

  java_vm_ = new JavaVMExt(this, runtime_options);
    Thread::Startup();


  Thread* self = Thread::Attach("main", false, nullptr, false);
  ...
  class_linker_ = new ClassLinker(intern_table_);
  ...
      OpenDexFiles(dex_filenames,
                   dex_locations,
                   runtime_options.GetOrDefault(Opt::Image),
                   &boot_class_path);
   ...

这里的内容好多的。包括Heap的家里 ClassLinker的建立等等,这个等到下一节再讲吧。
我们关注的java_vm_ = new JavaVMExt(this, runtime_options);
已经明显得出来了。
那另外以个JniEnv呢?

其实是在Thread::Attach里面。

Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
                       bool create_peer) {
  Runtime* runtime = Runtime::Current();
...
      Runtime::Current()->StartThreadBirth();
      self = new Thread(as_daemon);
      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
      Runtime::Current()->EndThreadBirth();
      if (!init_success) {
        delete self;
        return nullptr;
      }
...
  }

如果Thread 还没有建立,会新建立一个然后Init。

bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
 ...
    tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
    if (tlsPtr_.jni_env == nullptr) {
      return false;
    }
  }

...
  return true;
}

好的jni_env 也有了。

然后总结下,假设一个例子,JNIEnv调用一个FindClass接口,最终会调用到JNIEnvExt来对不对,不对,应该是function->

struct _JNIEnv {
    const struct JNINativeInterface* functions;

#if defined(__cplusplus)

    jint GetVersion()
    { return functions->GetVersion(this); }

    jclass FineClass(const char *name, )
    { return functions->FineClass(this, name, ..); }

所以还要继续往下分析

struct JNIEnvExt : public JNIEnv {
}

是继承JNIEnv的。就看cpp中哪里赋值了。

JNIEnvExt* JNIEnvExt::Create(Thread* self_in, JavaVMExt* vm_in) {
  std::unique_ptr<JNIEnvExt> ret(new JNIEnvExt(self_in, vm_in));
  if (CheckLocalsValid(ret.get())) {
    return ret.release();
  }
  return nullptr;
}

JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
    : self(self_in),
...
      monitors("monitors", kMonitorsInitial, kMonitorsMax) {
  functions = unchecked_functions = GetJniNativeInterface();
...
}

在构造函数里赋值了。

const JNINativeInterface* GetJniNativeInterface() {
  return &gJniNativeInterface;
}
const JNINativeInterface gJniNativeInterface = {
  nullptr,  // reserved0.
  nullptr,  // reserved1.
  nullptr,  // reserved2.
  nullptr,  // reserved3.
  JNI::GetVersion,
  JNI::DefineClass,
  JNI::FindClass,
  JNI::FromReflectedMethod,
  JNI::FromReflectedField,
  JNI::ToReflectedMethod,
  JNI::GetSuperclass,
  JNI::IsAssignableFrom,
  JNI::ToReflectedField,
  JNI::Throw,
  JNI::ThrowNew,
  }
art/runtime/jni_internal.cc

OK,最终定义在这个全局变量里面。这样如果以后上层调用FindClass,直接到这个文件,这个全局变量开始。

1.1.4.JNI_CreateJavaVM 对运行时 oat文件的加载

我们从

startVm————》JNI_CreateJavaVM---》Runtime::Create---》Runtime::Init

一路走来的,然后我们看下Init 里面如何建立heap和oat文件的加载。

bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
 heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
 .
 .
 }

这里新建这个heap。
/art/runtime/gc/heap.cc

Heap::Heap(size_t initial_size,
           size_t growth_limit,
           ....
{
               
uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
    for (size_t index = 0; index < image_file_names.size(); ++index) {
      std::string& image_name = image_file_names[index];
      std::string error_msg;
      space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage(
          image_name.c_str(),
          image_instruction_set,
          index > 0,
          &error_msg);
      if (boot_image_space != nullptr) {
        AddSpace(boot_image_space);
        ...

        if (index == 0) {
...
          const OatFile* boot_oat_file = boot_image_space->GetOatFile();
          if (boot_oat_file == nullptr) {
            continue;
          }

          const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
          const char* boot_classpath =
              boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
          if (boot_classpath == nullptr) {
            continue;
          }

          space::ImageSpace::CreateMultiImageLocations(image_file_name,
                                                       boot_classpath,
                                                       &image_file_names);
        }
      } else {
....
      }
    }
  }
    
    
}

目前我只关注这个

CreateBootImage
ImageSpace* ImageSpace::CreateBootImage(const char* image_location,
                                        const InstructionSet image_isa,
                                        bool secondary_image,
                                        std::string* error_msg) {
  ScopedTrace trace(__FUNCTION__);
  std::string system_filename;
  bool has_system = false;
  std::string cache_filename;
  bool has_cache = false;
  bool dalvik_cache_exists = false;
  bool is_global_cache = true;
  bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
                                       &has_system, &cache_filename, &dalvik_cache_exists,
                                       &has_cache, &is_global_cache);

先找到 boot.art 文件。怎么找可以看这个函数具体的。

ImageSpace* space;
       space = ImageSpace::Init(image_filename->c_str(),
                               image_location,
                               !(is_system || relocated_version_used),
                               /* oat_file */nullptr,
                               error_msg);
ImageSpace* ImageSpace::Init(const char* image_filename,
                             const char* image_location,{
                                 
ImageHeader temp_image_header;
  ImageHeader* image_header = &temp_image_header;
  {
    TimingLogger::ScopedTiming timing("ReadImageHeader", &logger);
    bool success = file->ReadFully(image_header, sizeof(*image_header));
    if (!success || !image_header->IsValid()) {
      *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
      return nullptr;
    }
  }

这个Init 里面先拿到这个boot.art文件的头。里面标记了oat的起止地址和oatdata段和oatexe。就是数据段的执行代码的位置。

std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                   image_location,
                                                   map.release(),
                                                   bitmap.release(),
                                                   image_end));


  if (oat_file == nullptr) {
    TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
    space->oat_file_.reset(space->OpenOatFile(image_filename, error_msg));

然后 新建一个ImageSpace 然后 OpenOatFile。

OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg) const {
  const ImageHeader& image_header = GetImageHeader();
  std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);

  CHECK(image_header.GetOatDataBegin() != nullptr);

  OatFile* oat_file = OatFile::Open(oat_filename,
                                    oat_filename,
                                    image_header.GetOatDataBegin(),
                                    image_header.GetOatFileBegin(),
....
                                    error_msg);
 ...
...

  return oat_file;
}

这里就会用到我们刚才的image_header的信息。

OatFile* OatFile::Open(const std::string& filename,
                       const std::string& location,
                       uint8_t* requested_base,
                       uint8_t* oat_file_begin,
                       bool executable,
                       bool low_4gb,
                       const char* abs_dex_location,
                       std::string* error_msg) {
  ...
  OatFile* with_dlopen = OatFileBase::OpenOatFile<DlOpenOatFile>(filename,
                                                                location,
                                                                 requested_base,
                                                                 oat_file_begin,
                                                                 
     .....
  if (with_dlopen != nullptr) {
    return with_dlopen;
  }

这里的OpenOatFile 是个模版,有点绕,

template <typename kOatFileBaseSubType>
OatFileBase* OatFileBase::OpenOatFile(const std::string& elf_filename,
                                      const std::string& location,
                                      uint8_t* requested_base,
                                      uint8_t* oat_file_begin,
                                      bool writable,
                                      bool executable,
                                      bool low_4gb,
                                      const char* abs_dex_location,
                                      std::string* error_msg) {
  std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(location, executable));

  

  if (!ret->Load(elf_filename,
                 oat_file_begin,
                 writable,
                 executable,
                 low_4gb,
                 error_msg)) {
    return nullptr;
  }


  return ret.release();
}
bool DlOpenOatFile::Load(const std::string& elf_filename,
                         uint8_t* oat_file_begin,
    ....

  bool success = Dlopen(elf_filename, oat_file_begin, error_msg);
  DCHECK(dlopen_handle_ != nullptr || !success);

  return success;
}
bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
                           uint8_t* oat_file_begin,}
      
    if (oat_file_begin != nullptr) {                            //
      extinfo.flags |= ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS;     // Use the requested addr if
      extinfo.reserved_addr = oat_file_begin;                   // vaddr = 0.
    }                                                           //   (pic boot image).
    dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);

}

最终它加载的位置在oatdatabegin的位置。
整个就加载完成。

1.1.5.JNI_CreateJavaVM 对运行时 classlinker的建立

startVm————》JNI_CreateJavaVM---》Runtime::Create---》Runtime::Init

classlinker的建立

bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
...
    intern_table_ = new InternTable;
  class_linker_ = new ClassLinker(intern_table_);
  if (GetHeap()->HasBootImageSpace()) {
    std::string error_msg;
    bool result = class_linker_->InitFromBootImage(&error_msg);
    ....
...
}

还是从Runtime::Init开始。在前面中我们分析了oat文件的加载,涉及到space空间的建立的部分没有讲的很细。总之在heap的建立过程中,关注oat文件的加载,和镜像空间的建立认真看。现在我们来看下ClassLinker的建立。

ClassLinker::ClassLinker(InternTable* intern_table)
    // dex_lock_ is recursive as it may be used in stack dumping.
    : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
...
      intern_table_(intern_table),
      quick_resolution_trampoline_(nullptr),
      quick_imt_conflict_trampoline_(nullptr),
      quick_generic_jni_trampoline_(nullptr),
      quick_to_interpreter_bridge_trampoline_(nullptr),
      image_pointer_size_(sizeof(void*)) {
  CHECK(intern_table_ != nullptr);
  static_assert(kFindArrayCacheSize == arraysize(find_array_class_cache_),
                "Array cache size wrong.");
  std::fill_n(find_array_class_cache_, kFindArrayCacheSize, GcRoot<mirror::Class>(nullptr));
}

构造函数并没有做太多事情。

if (GetHeap()->HasBootImageSpace()) {
    std::string error_msg;
    bool result = class_linker_->InitFromBootImage(&error_msg);

这个heap是上篇文章讲的,所以肯定有的。我们看下InitFromBootImage。会做什么事情。

bool ClassLinker::InitFromBootImage(std::string* error_msg) {
  VLOG(startup) << __FUNCTION__ << " entering";
  CHECK(!init_done_);

  Runtime* const runtime = Runtime::Current();
  Thread* const self = Thread::Current();
  gc::Heap* const heap = runtime->GetHeap();
  std::vector<gc::space::ImageSpace*> spaces = heap->GetBootImageSpaces();
  CHECK(!spaces.empty());
  image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
  if (!ValidPointerSize(image_pointer_size_)) {
    *error_msg = StringPrintf("Invalid image pointer size: %zu", image_pointer_size_);
    return false;
  }

  dex_cache_boot_image_class_lookup_required_ = true;
  std::vector<const OatFile*> oat_files =
      runtime->GetOatFileManager().RegisterImageOatFiles(spaces);

  const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
  CHECK_EQ(default_oat_header.GetImageFileLocationOatChecksum(), 0U);
  CHECK_EQ(default_oat_header.GetImageFileLocationOatDataBegin(), 0U);
  const char* image_file_location = oat_files[0]->GetOatHeader().
      GetStoreValueByKey(OatHeader::kImageLocationKey);
  CHECK(image_file_location == nullptr || *image_file_location == 0);
  quick_resolution_trampoline_ = default_oat_header.GetQuickResolutionTrampoline();
  quick_imt_conflict_trampoline_ = default_oat_header.GetQuickImtConflictTrampoline();
  quick_generic_jni_trampoline_ = default_oat_header.GetQuickGenericJniTrampoline();
  quick_to_interpreter_bridge_trampoline_ = default_oat_header.GetQuickToInterpreterBridge();

  class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(
      down_cast<mirror::ObjectArray<mirror::Class>*>(
          spaces[0]->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)));
  mirror::Class::SetClassClass(class_roots_.Read()->Get(kJavaLangClass));

 
  mirror::String::SetClass(GetClassRoot(kJavaLangString));
  mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
  mirror::Field::SetArrayClass(GetClassRoot(kJavaLangReflectFieldArrayClass));
  mirror::Constructor::SetClass(GetClassRoot(kJavaLangReflectConstructor));
  mirror::Constructor::SetArrayClass(GetClassRoot(kJavaLangReflectConstructorArrayClass));
  mirror::Method::SetClass(GetClassRoot(kJavaLangReflectMethod));
  mirror::Method::SetArrayClass(GetClassRoot(kJavaLangReflectMethodArrayClass));
  mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
  mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
  mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
  mirror::CharArray::SetArrayClass(GetClassRoot(kCharArrayClass));
  mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
  mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
  mirror::IntArray::SetArrayClass(GetClassRoot(kIntArrayClass));
  mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
  mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
  mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
  mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));

  for (gc::space::ImageSpace* image_space : spaces) {
    // Boot class loader, use a null handle.
    std::vector<std::unique_ptr<const DexFile>> dex_files;
    if (!AddImageSpace(image_space,
                       ScopedNullHandle<mirror::ClassLoader>(),
                       /*dex_elements*/nullptr,
                       /*dex_location*/nullptr,
                       /*out*/&dex_files,
                       error_msg)) {
      return false;
    }
    boot_dex_files_.insert(boot_dex_files_.end(),
                           std::make_move_iterator(dex_files.begin()),
                           std::make_move_iterator(dex_files.end()));
  }

  return true;
}

这里面做了几件事情

  • 1、把oatfile注册到otamanager中
  • 2、mirror了好多基础类,虽然不知道这样做的是干什么的。
  • 3、addImageSpace

1.2.java运行时环境的建立

1.2.1.从Runtime.start开始

我们回到这个函数。

extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
  ScopedTrace trace(__FUNCTION__);
 ...
  bool ignore_unrecognized = args->ignoreUnrecognized;
  if (!Runtime::Create(options, ignore_unrecognized)) {
    return JNI_ERR;
  }
...

  Runtime* runtime = Runtime::Current();
  bool started = runtime->Start();
  *p_env = Thread::Current()->GetJniEnv();
  *p_vm = runtime->GetJavaVM();
  return JNI_OK;
}

其实我们在 Runtime::Create 中已经分析了好多了。
Runtime::Create 也就是 Runtime::Init 。这里做了好多内容主要包括:

  • 1、javaVm和JNIEnv的初始化
  • 2、ota文件加载以及整个heap的建立
  • 3、classlinker的建立

接下来我们来看 runtime->Start()。

bool Runtime::Start() {

  Thread* self = Thread::Current();

  self->TransitionFromRunnableToSuspended(kNative);

  started_ = true;

  if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
    std::string error_msg;
    if (!IsZygote()) {
      CreateJit();
    } else if (jit_options_->UseJitCompilation()) {

    }
  }

  {
    ScopedTrace trace2("InitNativeMethods");
    InitNativeMethods();
  }

  InitThreadGroups(self);

  system_class_loader_ = CreateSystemClassLoader(this);

  if (is_zygote_) {
    if (!InitZygote()) {
      return false;
    }
  } else {
    if (is_native_bridge_loaded_) {
      PreInitializeNativeBridge(".");
    }
    InitNonZygoteOrPostFork(self->GetJniEnv(),
                            /* is_system_server */ false,
                            action,
                            GetInstructionSetString(kRuntimeISA));
  }

  StartDaemonThreads();

  return true;
}

这个函数份量好重啊!

  • 1、jit的建立
  • 2、initnativeMethods java代码so的加载
  • 3、systemClassLoader的建立
  • 4、daemonThreads 的建立 ----垃圾回收主要线程。

1.2.2.jit的建立

其实分两部分,我们从一部分进入看下整个jit线程的建立的过程。

JNI_CreateJavaVM --》runtime->Start()

start函数里有这么一段代码。

if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
    std::string error_msg;
    if (!IsZygote()) {
    // If we are the zygote then we need to wait until after forking to create the code cache
    // due to SELinux restrictions on r/w/x memory regions.
      CreateJit();
    } else if (jit_options_->UseJitCompilation()) {
      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
        // Try to load compiler pre zygote to reduce PSS. b/27744947
        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
      }
    }
  }

如果不是zygote就启动jit。

void Runtime::CreateJit() {
  CHECK(!IsAotCompiler());
  if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
    DCHECK(!jit_options_->UseJitCompilation());
  }
  std::string error_msg;
  jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
  if (jit_.get() == nullptr) {
    LOG(WARNING) << "Failed to create JIT " << error_msg;
  }
}

调用Jit的Create函数。

Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
 ....
  jit->hot_method_threshold_ = options->GetCompileThreshold();
  jit->warm_method_threshold_ = options->GetWarmupThreshold();
  jit->osr_method_threshold_ = options->GetOsrThreshold();
  jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
  jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight();

  jit->CreateThreadPool();

  // Notify native debugger about the classes already loaded before the creation of the jit.
  jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
  return jit.release();
}

前面进行一些jit编译环境的参数和其他的操作,最终会调用jit->CreateThreadPool();进行创建jit线程。

void Jit::CreateThreadPool() {
...
  thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
  thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
  thread_pool_->StartWorkers(Thread::Current());
}

进行jit的建立。
其实还有第二部分,在Runtime::Start函数的另外一段代码:

if (is_zygote_) {
    if (!InitZygote()) {
      return false;
    }
  } else {

    InitNonZygoteOrPostFork(self->GetJniEnv(),
                            /* is_system_server */ false,
                            action,
                            GetInstructionSetString(kRuntimeISA));
  }

我们看下如果不是zygote也就是if 的下半部分。

void Runtime::InitNonZygoteOrPostFork(
    JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
 
  if (!is_system_server &&
      !safe_mode_ &&
      (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
      jit_.get() == nullptr) {
    // Note that when running ART standalone (not zygote, nor zygote fork),
    // the jit may have already been created.
    CreateJit();
  }

  StartSignalCatcher();

  Dbg::StartJdwp();
}

这里也会CreateJit ,这里说的是什么, 说的是一个进程可能不是由于zygote创建的,那么这里单独去创建jit。另外启动这个Jdwp调试线程。

到这里jit的建立就完成了。后面有时间和需求的话会对整个线程的工作原理做一个大篇幅的讲解。

1.2.3.InitNativeMethods的建立

// InitNativeMethods needs to be after started_ so that the classes
  // it touches will have methods linked to the oat file if necessary.
  {
    ScopedTrace trace2("InitNativeMethods");
    InitNativeMethods();
  }

在Runtime::Start()函数中有一段上述代码。

void Runtime::InitNativeMethods() {
  VLOG(startup) << "Runtime::InitNativeMethods entering";
  Thread* self = Thread::Current();
  JNIEnv* env = self->GetJniEnv();

  // Must be in the kNative state for calling native methods (JNI_OnLoad code).
  CHECK_EQ(self->GetState(), kNative);

  // First set up JniConstants, which is used by both the runtime's built-in native
  // methods and libcore.
  JniConstants::init(env);

  // Then set up the native methods provided by the runtime itself.
  RegisterRuntimeNativeMethods(env);

  // Initialize classes used in JNI. The initialization requires runtime native
  // methods to be loaded first.
  WellKnownClasses::Init(env);

  // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
  // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
  // libcore can't because it's the library that implements System.loadLibrary!
  {
    std::string error_msg;
    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, &error_msg)) {
      LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
    }
  }
  {
    constexpr const char* kOpenJdkLibrary = kIsDebugBuild
                                                ? "libopenjdkd.so"
                                                : "libopenjdk.so";
    std::string error_msg;
    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, nullptr, &error_msg)) {
      LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
    }
  }

  // Initialize well known classes that may invoke runtime native methods.
  WellKnownClasses::LateInit(env);

  VLOG(startup) << "Runtime::InitNativeMethods exiting";
}

这段代码讲述的是javaVm,去加载以下so

  • libjavacore
  • libopenjdk

就这样了,由于对于java和jdk没有一个感官上的认知所以,理解没有深度,后期了解了以后再看下。

1.2.4.CreateSystemClassLoader的建立

static jobject CreateSystemClassLoader(Runtime* runtime) {
  if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
    return nullptr;
  }

  ScopedObjectAccess soa(Thread::Current());
  ClassLinker* cl = Runtime::Current()->GetClassLinker();
  auto pointer_size = cl->GetImagePointerSize();

  StackHandleScope<2> hs(soa.Self());
  Handle<mirror::Class> class_loader_class(
      hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));

  ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod(
      "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
  CHECK(getSystemClassLoader != nullptr);

  JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
  JNIEnv* env = soa.Self()->GetJniEnv();
  ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
  CHECK(system_class_loader.get() != nullptr);

  soa.Self()->SetClassLoaderOverride(system_class_loader.get());

  Handle<mirror::Class> thread_class(
      hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));

  ArtField* contextClassLoader =
      thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
  CHECK(contextClassLoader != nullptr);

  // We can't run in a transaction yet.
  contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
                                       soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));

  return env->NewGlobalRef(system_class_loader.get());
}

很明显通过调用java代码来实现的,
这里面的java的ClassLoader的内容看《ClassLoader梳理》的相关文章

1.2.5.Daemon的建立

StartDaemonThreads();
void Runtime::StartDaemonThreads() {


  Thread* self = Thread::Current();


  JNIEnv* env = self->GetJniEnv();
  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
                            WellKnownClasses::java_lang_Daemons_start);
  if (env->ExceptionCheck()) {
    env->ExceptionDescribe();
    LOG(FATAL) << "Error starting java.lang.Daemons";
  }

  VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}

也是通过调用java代码实现的,

public final class Daemons {
    private static final int NANOS_PER_MILLI = 1000 * 1000;
    private static final int NANOS_PER_SECOND = NANOS_PER_MILLI * 1000;
    private static final long MAX_FINALIZE_NANOS = 40L * NANOS_PER_SECOND;

    public static void start() {
        ReferenceQueueDaemon.INSTANCE.start();
        FinalizerDaemon.INSTANCE.start();
        FinalizerWatchdogDaemon.INSTANCE.start();
        HeapTaskDaemon.INSTANCE.start();
    }

开启了四个线程。

两个方面需要注意

1、一个这些线程的功能,需要一段时间研究
2、我们这个是zygote进程

root 9745 265 1173156 125508 futex_wait 7d08fefcb0 S ReferenceQueueD
root 9746 265 1173156 125508 futex_wait 7d08fefcb0 S FinalizerDaemon
root 9747 265 1173156 125508 futex_wait 7d08fefcb0 S FinalizerWatchd
root 9748 265 1173156 125508 futex_wait 7d08fefcb0 S HeapTaskDaemon

应用进程 都会有。

u0_a61 8920 8912 1043288 67760 futex_wait 7d08fefcb0 S ReferenceQueueD
u0_a61 8921 8912 1043288 67760 futex_wait 7d08fefcb0 S FinalizerDaemon
u0_a61 8922 8912 1043288 67760 futex_wait 7d08fefcb0 S FinalizerWatchd
u0_a61 8923 8912 1043288 67760 futex_wait 7d08fefcb0 S HeapTaskDaemon

1.3.运行时准备总结

通过前面的,启动环境的准备和运行时环境的准备。我们已经准备好了,可以随时的家在或者释放java的东西。所以在Android中首先就用来加载下面的—ZygoteInit。

runtime.start("com.android.internal.os.ZygoteInit", args, zygote);    
   
   char* slashClassName = toSlashClassName(className);//com/android/internal/os/ZygoteInit
    jclass startClass = env->FindClass(slashClassName);
    if (startClass == NULL) {
        ALOGE("JavaVM unable to locate class '%s'\n", slashClassName);
    } else {
        jmethodID startMeth = env->GetStaticMethodID(startClass, "main", "([Ljava/lang/String;)V");
        if (startMeth == NULL) {
            ALOGE("JavaVM unable to find main() in '%s'\n", className);
        } else {
            env->CallStaticVoidMethod(startClass, startMeth, strArray);
        }
    }
    free(slashClassName);

在前面的章节中,我们已经梳理了,虚拟机的启动的准备过程。其中包括两个重要的阶段:

  • 初始化阶段:主要是对oat文件的加载和初始化java的写环境
  • 启动阶段: 主要是准备好各种现场,比如jit线程和daemon等。

这样子整个虚拟机的整体的环境已经准备好了。可以运行java类了。
接下来就开始看下如何运行一个类的方法,包括如何查找类,然后找到类的方法,最后运行这个方法。

  • FindClass
  • GetStaticMethodID
  • CallStaticVoidMethod

1.4 java代码运行

1.4.1.FindClass代码分析

char* slashClassName = toSlashClassName(className);//com/android/internal/os/ZygoteInit
    jclass startClass = env->FindClass(slashClassName);

接下来我们就来看下,如何查找类。

根据前面的分析env的FindClass 最终会到

JNI::FindClass
/art/runtime/jni_internal.cc
const JNINativeInterface gJniNativeInterface = {
...
  JNI::FindClass,
...
}
static jclass FindClass(JNIEnv* env, const char* name) {
    Runtime* runtime = Runtime::Current();
    ClassLinker* class_linker = runtime->GetClassLinker();
    std::string descriptor(NormalizeJniClassDescriptor(name));
    ScopedObjectAccess soa(env);
    mirror::Class* c = nullptr;

    StackHandleScope<1> hs(soa.Self());
    Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetClassLoader(soa)));
    c = class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader);

这里主要的是获取ClassLoader,有一段逻辑,后面就交给class_linker。
这个GetClassLoader获得的就是system_class_loader_,就是之前初始化的。

class_linker->FindClass
mirror::Class* ClassLinker::FindClass(Thread* self,
                                      const char* descriptor,
                                      Handle<mirror::ClassLoader> class_loader) {

  const size_t hash = ComputeModifiedUtf8Hash(descriptor);
  mirror::Class* klass = LookupClass(self, descriptor, hash, class_loader.Get());
  if (klass != nullptr) {
    return EnsureResolved(self, descriptor, klass);
  }
  if (descriptor[0] == '[') {
    return CreateArrayClass(self, descriptor, hash, class_loader);
  } else if (class_loader.Get() == nullptr) {
    ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
    if (pair.second != nullptr) {
      return DefineClass(self,
                         descriptor,
                         hash,
                         ScopedNullHandle<mirror::ClassLoader>(),
                         *pair.first,
                         *pair.second);
    } else {
      mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
      self->SetException(pre_allocated);
      return nullptr;
    }
  } else {
    ScopedObjectAccessUnchecked soa(self);
    mirror::Class* cp_klass;
    if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
      if (cp_klass != nullptr) {
        return cp_klass;
      }
    }
    ...
}

这里的逻辑比较多,但是比较清晰的。

  • 1.将传进来的字符hash
  • 2.LookupClass 是从以前缓存的classTable中查找,如果找到类就返回。EnsureResolved 确保class是初始化好的。
  • 3.如果传进来的字符是[开头,说明是数组。那么创建数组返回
  • 4.如果class_loader是空的,就从boot_class_path_中去找,
  • 5.以上都不是就从PathClassLoader中找。

所以这里有三个找

  • LookupClass
  • FindInClassPath --boot_class_path_
  • FindClassInPathClassLoader

我们一个个看。

LookupClass
mirror::Class* ClassLinker::LookupClass(Thread* self,
                                        const char* descriptor,
                                        size_t hash,
                                        mirror::ClassLoader* class_loader) {
  {
    ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
    ClassTable* const class_table = ClassTableForClassLoader(class_loader);
    if (class_table != nullptr) {
      mirror::Class* result = class_table->Lookup(descriptor, hash);
      if (result != nullptr) {
        return result;
      }
    }
  }
  if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
    return nullptr;
  }
  // Lookup failed but need to search dex_caches_.
  mirror::Class* result = LookupClassFromBootImage(descriptor);
  if (result != nullptr) {
    result = InsertClass(descriptor, result, hash);
  } else {
    constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
    if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
      AddBootImageClassesToClassTable();
    }
  }
  return result;
}

这里面:
1.ClassTable中找,找到就返回
2.没有找到就从BootImage中找
3.找到就插入到classtable
很顺利的逻辑。也没什么可以说的,

其实这个class_table的建立也有必要去看下,按照我的理解,class_table一开始在初始化的时候,
去解析oat的文件的时候就添加了一些类,
第二个主要想讲下中间的那个步奏,它和dexcache有关。

mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
  ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
  std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
      GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
  for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
    for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
      mirror::DexCache* dex_cache = dex_caches->Get(i);
      const DexFile* dex_file = dex_cache->GetDexFile();
      // Try binary searching the type index by descriptor.
      const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
      if (type_id != nullptr) {
        uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
        mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
        if (klass != nullptr) {
          return klass;
        }
      }
    }
  }
  return nullptr;
}

从dex中拿到这个类,这边设计到前面初始化中一些没有讲的细节和对oat文件的进一步理解,或者说oat文件中的dex的进一步理解。
我们分开一章节介绍。

FindInClassPath --boot_class_path_
ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
  return DefineClass(self,
                     descriptor,
                     hash,
                     ScopedNullHandle<mirror::ClassLoader>(),
                     *pair.first,
                     *pair.second);
}
  • ClassPathEntry pair 找到这个对象
  • 然后还需要定义一下DefineClass
ClassPathEntry FindInClassPath(const char* descriptor,
                               size_t hash, const std::vector<const DexFile*>& class_path) {
  for (const DexFile* dex_file : class_path) {
    const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor, hash);
    if (dex_class_def != nullptr) {
      return ClassPathEntry(dex_file, dex_class_def);
    }
  }
  return ClassPathEntry(nullptr, nullptr);
}

从DexFlie中找到这个类。

const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t hash) const {
  if (LIKELY(lookup_table_ != nullptr)) {
    const uint32_t class_def_idx = lookup_table_->Lookup(descriptor, hash);
    return (class_def_idx != DexFile::kDexNoIndex) ? &GetClassDef(class_def_idx) : nullptr;
  }

  const TypeId* type_id = FindTypeId(descriptor);
  if (type_id != nullptr) {
    uint16_t type_idx = GetIndexForTypeId(*type_id);
    for (size_t i = 0; i < num_class_defs; ++i) {
      const ClassDef& class_def = GetClassDef(i);
      if (class_def.class_idx_ == type_idx) {
        return &class_def;
      }
    }
  }
  return nullptr;
}
  • 在这里又有个lookup_table_ 也是一个缓存的列表,通过hash匹配。
  • 当然刚开始是没有这个缓存的,那么在下面的去获取type_id和type_idx,
  • 然后获取到这个class_def。这个class_def。是在编译class文件的时候编译进去的。
const ClassDef& GetClassDef(uint16_t idx) const {
    return class_defs_[idx];
  }

就是这个数组,在我们启动的时候,我解析出来,

DexFile::DexFile(const uint8_t* base, size_t size,
                 const std::string& location,
                 uint32_t location_checksum,
                 MemMap* mem_map,
                 const OatDexFile* oat_dex_file)
    : begin_(base),
...
      class_defs_(reinterpret_cast<const ClassDef*>(base + header_->class_defs_off_)),

它是依据头的一些偏移量确定。
ok,最终返回到上面一层,

return ClassPathEntry(dex_file, dex_class_def);

返回的是一个dex_file和dex_class_def的结构体。

return DefineClass(self,
                         descriptor,
                         hash,
                         ScopedNullHandle<mirror::ClassLoader>(),
                         *pair.first,
                         *pair.second);

然后去对这个class进行定义。pair.first 是dex_flie . *pair.second 是dex_class_def.

mirror::Class* ClassLinker::DefineClass(Thread* self,
                                        const char* descriptor,
                                        size_t hash,
                                        Handle<mirror::ClassLoader> class_loader,
                                        const DexFile& dex_file,
                                        const DexFile::ClassDef& dex_class_def) {
  StackHandleScope<3> hs(self);
  auto klass = hs.NewHandle<mirror::Class>(nullptr);

  if (klass.Get() == nullptr) {
    klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
  }
  if (UNLIKELY(klass.Get() == nullptr)) {
    self->AssertPendingOOMException();
    return nullptr;
  }
  mirror::DexCache* dex_cache = RegisterDexFile(dex_file, class_loader.Get());

  klass->SetDexCache(dex_cache);
  SetupClass(dex_file, dex_class_def, klass, class_loader.Get());

  // Add the newly loaded class to the loaded classes table.
  mirror::Class* existing = InsertClass(descriptor, klass.Get(), hash);

  LoadClass(self, dex_file, dex_class_def, klass);
 
  jit::Jit::NewTypeLoadedIfUsingJit(h_new_class.Get());

  return h_new_class.Get();
}

类定义:

  • 对类进行allocClass—这里又有一堆的知识,就是关于space和malloc一个类空间,然后怎么去组织这个空间,为以后的gc做准备。
  • 然后RegistreDexFile
  • 然后对Class进行一些设置
  • 然后插入到class_table中,下次查询就方便。
  • 然后LoadClass加载这个类,这边用装载会更好一些
  • 最后又必要的话启动jit

这里面allocClass另外起一篇去小分析下。
然后我们着重看下LoadClass。

void ClassLinker::LoadClass(Thread* self,
                            const DexFile& dex_file,
                            const DexFile::ClassDef& dex_class_def,
                            Handle<mirror::Class> klass) {
  const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
  bool has_oat_class = false;
  if (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) {
    OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
                                               &has_oat_class);
    if (has_oat_class) {
      LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
    }
  }
  if (!has_oat_class) {
    LoadClassMembers(self, dex_file, class_data, klass, nullptr);
  }

这里拿oat文件,如果是oat的就带入到LoadClassMembers中。

void ClassLinker::LoadClassMembers(Thread* self,
                                   const DexFile& dex_file,
                                   const uint8_t* class_data,
                                   Handle<mirror::Class> klass,
                                   const OatFile::OatClass* oat_class) {
  {
    LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
    ClassDataItemIterator it(dex_file, class_data);
    LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self,
                                                                allocator,
                                                                it.NumStaticFields());
    size_t num_sfields = 0;
    uint32_t last_field_idx = 0u;
    for (; it.HasNextStaticField(); it.Next()) {
      uint32_t field_idx = it.GetMemberIndex();
      if (num_sfields == 0 || LIKELY(field_idx > last_field_idx)) {
        LoadField(it, klass, &sfields->At(num_sfields));
        ++num_sfields;
        last_field_idx = field_idx;
      }
    }
    LengthPrefixedArray<ArtField>* ifields = AllocArtFieldArray(self,
                                                                allocator,
                                                                it.NumInstanceFields());
    size_t num_ifields = 0u;
    last_field_idx = 0u;
    for (; it.HasNextInstanceField(); it.Next()) {
      uint32_t field_idx = it.GetMemberIndex();
      if (num_ifields == 0 || LIKELY(field_idx > last_field_idx)) {
        LoadField(it, klass, &ifields->At(num_ifields));
        ++num_ifields;
        last_field_idx = field_idx;
      }
    }

    klass->SetSFieldsPtr(sfields);
    klass->SetIFieldsPtr(ifields);
   
    klass->SetMethodsPtr(
        AllocArtMethodArray(self, allocator, it.NumDirectMethods() + it.NumVirtualMethods()),
        it.NumDirectMethods(),
        it.NumVirtualMethods());
    size_t class_def_method_index = 0;
    uint32_t last_dex_method_index = DexFile::kDexNoIndex;
    size_t last_class_def_method_index = 0;
    for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
      ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_);
      LoadMethod(self, dex_file, it, klass, method);
      LinkCode(method, oat_class, class_def_method_index);
      uint32_t it_method_index = it.GetMemberIndex();
      method->SetMethodIndex(last_class_def_method_index);
      class_def_method_index++;
    }
    for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
      ArtMethod* method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
      LoadMethod(self, dex_file, it, klass, method);
      LinkCode(method, oat_class, class_def_method_index);
      class_def_method_index++;
    }
  }
}

这个函数内容可就多了。

  • 1.处理普通fields — 涉及LoadField
  • 2.处理静态sfileds — 涉及LoadField
  • 3.处理初始化ifiels — 涉及LoadField
  • 4.处理普通methods — 涉及LoadMethod LinkCode
  • 5.处理虚拟methods — 涉及LoadMethod LinkCode
void ClassLinker::LoadField(const ClassDataItemIterator& it,
                            Handle<mirror::Class> klass,
                            ArtField* dst) {
  const uint32_t field_idx = it.GetMemberIndex();
  dst->SetDexFieldIndex(field_idx);
  dst->SetDeclaringClass(klass.Get());
  dst->SetAccessFlags(it.GetFieldAccessFlags());
}

LoadField 很简单。该设置的设置下。

void ClassLinker::LoadMethod(Thread* self,
                             const DexFile& dex_file,
                             const ClassDataItemIterator& it,
                             Handle<mirror::Class> klass,
                             ArtMethod* dst) {
  uint32_t dex_method_idx = it.GetMemberIndex();
  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
  const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);

  ScopedAssertNoThreadSuspension ants(self, "LoadMethod");
  dst->SetDexMethodIndex(dex_method_idx);
  dst->SetDeclaringClass(klass.Get());
  dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());

  dst->SetDexCacheResolvedMethods(klass->GetDexCache()->GetResolvedMethods(), image_pointer_size_);
  dst->SetDexCacheResolvedTypes(klass->GetDexCache()->GetResolvedTypes(), image_pointer_size_);
...
                             
}

**LoadMethod **也是一样的。

void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
                           uint32_t class_def_method_index) {
  Runtime* const runtime = Runtime::Current();
  if (oat_class != nullptr) {
    const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
    oat_method.LinkMethod(method);
  }

  const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
  bool enter_interpreter = ShouldUseInterpreterEntrypoint(method, quick_code);

  if (method->IsStatic() && !method->IsConstructor()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
  } else if (quick_code == nullptr && method->IsNative()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
  } else if (enter_interpreter) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
  }

  if (method->IsNative()) {
    method->UnregisterNative();

    if (enter_interpreter || quick_code == nullptr) {
      const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
    }
  }
}

LinkCode 看代码来说也是很简洁的。但是这里的设置的方法的入口点,等我们调用这个方法的时候是有点绕的。
也是比较重要的。
这里我们先放在这边,等讲方法调用的时候一起梳理。

这样其实在FindClass在boot_path 中我们已经看到了很多关于类定义类加载和类连接的内容了。
接下来的的 FindClassInPathClassLoader 我们就轻松了。

FindClassInPathClassLoader
bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                             Thread* self,
                                             const char* descriptor,
                                             size_t hash,
                                             Handle<mirror::ClassLoader> class_loader,
                                             mirror::Class** result) {
  if (IsBootClassLoader(soa, class_loader.Get())) {
    ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
    if (pair.second != nullptr) {
      mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
      if (klass != nullptr) {
        *result = EnsureResolved(self, descriptor, klass);
      } else {
        *result = DefineClass(self,
                              descriptor,
                              hash,
                              ScopedNullHandle<mirror::ClassLoader>(),
                              *pair.first,
                              *pair.second);
      }
      if (*result == nullptr) {
        CHECK(self->IsExceptionPending()) << descriptor;
        self->ClearException();
      }
    } else {
      *result = nullptr;
    }
    return true;
  }

  Handle<mirror::ClassLoader> h_parent(hs.NewHandle(class_loader->GetParent()));
  bool recursive_result = FindClassInPathClassLoader(soa, self, descriptor, hash, h_parent, result);
  if (!recursive_result) {
    // Something wrong up the chain.
    return false;
  }

  if (*result != nullptr) {
    // Found the class up the chain.
    return true;
  }
  
  ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
  ArtField* const dex_file_field =
      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
  mirror::Object* dex_path_list =
      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
      GetObject(class_loader.Get());
  if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
    mirror::Object* dex_elements_obj =
        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
        GetObject(dex_path_list);
    if (dex_elements_obj != nullptr) {
      Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
          hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
      for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
        mirror::Object* element = dex_elements->GetWithoutChecks(i);
        mirror::Object* dex_file = dex_file_field->GetObject(element);
        if (dex_file != nullptr) {
          mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
          if (long_array == nullptr) {
            break;
          }
          int32_t long_array_size = long_array->GetLength();
          for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
            const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
                long_array->GetWithoutChecks(j)));
            const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
            if (dex_class_def != nullptr) {
              mirror::Class* klass = DefineClass(self,
                                                 descriptor,
                                                 hash,
                                                 class_loader,
                                                 *cp_dex_file,
                                                 *dex_class_def);
              if (klass == nullptr) {
                self->ClearException();
                return true;
              }
              *result = klass;
              return true;
            }
          }
        }
      }
    }
    self->AssertNoPendingException();
  }
  return true;
}

这个函数的逻辑我们看下,我们假设我们的ClassLoader是一个普通的应用的ClassLoader。

  • 1.进来不是BootClassLoader
  • 2.获取ParentClassLoader 递归调用下这个FindClassInPathClassLoader
  • 3.如果找到类下面两个判断,就返回,如果没有找到,就继续往下
  • 4.往下就一堆判断逻辑大概是这样的
  • 1.DexPathList
  • 2.dex_elements
  • 3.DexFile

一层层找到这个Class,我们看到最终还是在DexFile中用dex_class_def。和上面的内容相似。而且无论是不是BootClassLoader找到的class_def都要进行DefindClass一下。

到这里就结束了类FindClass的过程。

总结下就是

1、从三个方面找这个类,
  • 从缓存的class_table
  • 从boot的镜像文件中
  • 从你自己的path路径中
2、然后对这个类进行加载
  • fields的加载
  • method的加载
  • method代码的linkcode 连接。

1.4.2.FindClass–LookupClassFromBootImage

LookupClassFromBootImage
mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
  ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
  std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
      GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
  for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
    for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
      mirror::DexCache* dex_cache = dex_caches->Get(i);
      const DexFile* dex_file = dex_cache->GetDexFile();
      // Try binary searching the type index by descriptor.
      const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
      if (type_id != nullptr) {
        uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
        mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
        if (klass != nullptr) {
          return klass;
        }
      }
    }
  }
  return nullptr;
}
std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
      GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());

先看Runtime::Current()->GetHeap()->GetBootImageSpaces();

const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
    return boot_image_spaces_;
  }

这个就是获取我们前面建立的boot_image_spaces_。

GetImageDexCaches 我们在看下 GetImageDexCaches 是怎么从这个镜像空间里面拿到dexCaches,
这个dexCaches又是什么东西呢?

static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
    std::vector<gc::space::ImageSpace*> image_spaces) SHARED_REQUIRES(Locks::mutator_lock_) {

  std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
  for (gc::space::ImageSpace* image_space : image_spaces) {
    mirror::Object* root =image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
    dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());
  }
  return dex_caches_vector;
}

for循环,我们假设只有一个spaces,简单化一下。

mirror::Object* root =image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);

这个怎么去理解呢?
需要了解下dex文件的格式了。

template <ReadBarrierOption kReadBarrierOption>
inline mirror::Object* ImageHeader::GetImageRoot(ImageRoot image_root) const {
  mirror::ObjectArray<mirror::Object>* image_roots = GetImageRoots<kReadBarrierOption>();
  return image_roots->Get<kVerifyNone, kReadBarrierOption>(static_cast<int32_t>(image_root));
}

template <ReadBarrierOption kReadBarrierOption>
inline mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const {
....
  return image_roots;
}
mirror::ObjectArray<mirror::Object>* image_roots
enum ImageRoot {
    kDexCaches,
    kClassRoots,
    kImageRootsMax,
  };

应该这么理解,在刚开始我们解析ImageHeader的时候,这个文件的头部会有个image_roots 这个地址 是一个数组有三个ImageRoot。是这么一个数组。
比如我们现在取的是kDexCaches,取得的ImageRoot的指针,指向的是一个DexCache数组对象。DexCache是定义在 libcore/libart/src/main/java/lang/DexCache.java中的一个核心对象。

然后就放到这面去了

dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());

然后去遍历这个vector

for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
    for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
      mirror::DexCache* dex_cache = dex_caches->Get(i);
      const DexFile* dex_file = dex_cache->GetDexFile();
      const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
      if (type_id != nullptr) {
        uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
        mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
        if (klass != nullptr) {
          return klass;
        }
      }
    }
  }
  return nullptr;
}

for循环里面:

  • 获取第i个DexCache
  • 对这个DexCache获取DexFile
  • 对这个DexFile获取我需要的类的type_id
  • 然后DexFile再获取type_idx
  • 最后通过这个type_idex在DexCache中获取到这个类。

疑问,

从ImageHead中获取到这个地址,意思是说在Image中,就是art这个镜像文件中就缓存好类很多这个class文件?
就是镜像里面已经有好多准备好的类,不需要去硬盘重新解析。???是这个意思吗
???

1.4.3.FindClass–AllocClass

mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) {
  return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size) {
  gc::Heap* heap = Runtime::Current()->GetHeap();
  mirror::Class::InitializeClassVisitor visitor(class_size);
  mirror::Object* k = kMovingClasses ?
      heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
      heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
  return k->AsClass();
}
template <bool kInstrumented, typename PreFenceVisitor>
  mirror::Object* AllocObject(Thread* self,
                              mirror::Class* klass,
                              size_t num_bytes,
                              const PreFenceVisitor& pre_fence_visitor)
      SHARED_REQUIRES(Locks::mutator_lock_)
      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
               !Roles::uninterruptible_) {
    return AllocObjectWithAllocator<kInstrumented, true>(
        self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
  }

我们不太关心具体的细节,只是看看流程

template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
                                                      mirror::Class* klass,
                                                      size_t byte_count,
                                                      AllocatorType allocator,
                                                      const PreFenceVisitor& pre_fence_visitor) {
...
  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
    obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
                                                           pre_fence_visitor);
   ...
  if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) &&
      byte_count <= self->TlabSize()) {
    obj = self->AllocTlab(byte_count);
   ...
  } else {
    size_t bytes_tl_bulk_allocated = 0;
    obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
                                              &usable_size, &bytes_tl_bulk_allocated);
    if (UNLIKELY(obj == nullptr)) {
      obj = AllocateInternalWithGc(self,
                                   allocator,
                                   kInstrumented,
                                   byte_count,
                                   &bytes_allocated,
                                   &usable_size,
                                   &bytes_tl_bulk_allocated, &klass);
      if (obj == nullptr) {
        if (!self->IsExceptionPending()) {
          return AllocObject</*kInstrumented*/true>(self,
                                                    klass,
                                                    byte_count,
                                                    pre_fence_visitor);
        }
        return nullptr;
      }
    }
  ...
  }
}

这里有好几个alloc

  • 1.如果是非常大的Object,就用AllocLargeObject -----至于什么是大Object后面有文章讲述。
  • 2.如果是tlab类的,就用AllocTlab
  • 3.最常用的三部曲,先TryToAllocate尝试分配,如果不足AllocateInternalWithGc尝试GC然后还不行再分配,这个和linux内核页机制分配差不多,但也是常有的思维吧。

我们看一个TryToAllocate流程。

template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self,
                                           AllocatorType allocator_type,
                                           size_t alloc_size,
                                           size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated) {
...
  switch (allocator_type) {
    case kAllocatorTypeBumpPointer: {
...
      break;
    }
    case kAllocatorTypeRosAlloc: {
...
      break;
    }
    case kAllocatorTypeDlMalloc: {
      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
        ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                     bytes_tl_bulk_allocated);
      } else {
        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
                                               bytes_tl_bulk_allocated);
      }
      break;
    }
    case kAllocatorTypeNonMoving: {
      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                     bytes_tl_bulk_allocated);
      break;
    }
    case kAllocatorTypeLOS: {
      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                       bytes_tl_bulk_allocated);
...
    }
    case kAllocatorTypeTLAB: {
  ...
    }
    case kAllocatorTypeRegion: {
...
    }
    case kAllocatorTypeRegionTLAB: {
 ...
        } else {
    ...
    default: {
      LOG(FATAL) << "Invalid allocator type";
      ret = nullptr;
    }
  }
  return ret;
}

这里又分了好几种,什么大Object,什么non_move不可移动的空间等等,我们找一种最简单的dlmalloc空间分配,进行分析。
/art/runtime/gc/space/dlmalloc_space.h

virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                size_t* usable_size, size_t* bytes_tl_bulk_allocated)
      OVERRIDE REQUIRES(!lock_) {
    return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                           bytes_tl_bulk_allocated);
  }
inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
                                                      size_t* bytes_allocated,
                                                      size_t* usable_size,
                                                      size_t* bytes_tl_bulk_allocated) {
  mirror::Object* obj;
  {
    MutexLock mu(self, lock_);
    obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
                                   bytes_tl_bulk_allocated);
  }
  if (LIKELY(obj != nullptr)) {
    memset(obj, 0, num_bytes);
  }
  return obj;
}

这里其实是一个虚拟的接口。每个不一样的实现会不一样的函数。

inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
    Thread* /*self*/, size_t num_bytes,
    size_t* bytes_allocated,
    size_t* usable_size,
    size_t* bytes_tl_bulk_allocated) {
  mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
    size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
    *bytes_allocated = allocation_size;
    *bytes_tl_bulk_allocated = allocation_size;
  }
  return result;
}

这个space空间也会根据不同的对象是不一样的,这个要看初始化的内容。

void* mspace_malloc(mspace msp, size_t bytes) {
  mstate ms = (mstate)msp;
 ....

    mem = sys_alloc(ms, nb);

  postaction:
    POSTACTION(ms);
    return mem;
  }

  return 0;
}

其实中间很多过程都是省略了。

static void* sys_alloc(mstate m, size_t nb) {
  char* tbase = CMFAIL;
...
    void* mem = mmap_alloc(m, nb);
    if (mem != 0)
      return mem;
  }
static void* mmap_alloc(mstate m, size_t nb) {
  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  if (m->footprint_limit != 0) {
    size_t fp = m->footprint + mmsize;
    if (fp <= m->footprint || fp > m->footprint_limit)
      return 0;
  }
  if (mmsize > nb) {     /* Check for wrap around 0 */
    char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
    if (mm != CMFAIL) {
      size_t offset = align_offset(chunk2mem(mm));
      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
   ..
      return chunk2mem(p);
    }
  }
  return 0;
}

计算偏移什么的。

所以总结来说,art使用空间的概念,自己申请一个space,然后自己实现malloc机制,实现空间的分配,之所以这么做,是可以记录每个object对一在空间的的占用,然后用一个bitmap去记录,实现释放和GC。

1.4.4.GetStaticMethodID

jmethodID startMeth = env->GetStaticMethodID(startClass, "main", "([Ljava/lang/String;)V");

找到类以后我们再来找到这个方法。

art/runtime/jni_internal.cc
static jmethodID GetStaticMethodID(JNIEnv* env, jclass java_class, const char* name,
                                     const char* sig) {
    return FindMethodID(soa, java_class, name, sig, true);
  }
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
                              const char* name, const char* sig, bool is_static)
    SHARED_REQUIRES(Locks::mutator_lock_) {
  mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class));

  ArtMethod* method = nullptr;
  auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
  if (is_static) {
    method = c->FindDirectMethod(name, sig, pointer_size);
  } else if (c->IsInterface()) {
    method = c->FindInterfaceMethod(name, sig, pointer_size);
  } else {
    method = c->FindVirtualMethod(name, sig, pointer_size);
  }
  if (method == nullptr || method->IsStatic() != is_static) {
    ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
    return nullptr;
  }
  return soa.EncodeMethod(method);
}
  • 1.确定这个类是初始化好的,EnsureInitialized
  • 2.通过这个类找到artMethod
    我们的是静态类,使用FindDirectMethod。
ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature,
                                   size_t pointer_size) {
  for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
    ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
    if (method != nullptr) {
      return method;
    }
  }
  return nullptr;
}

遍历每一个superClass。

ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
                                           size_t pointer_size) {
  for (auto& method : GetDirectMethods(pointer_size)) {
    if (name == method.GetName() && signature == method.GetSignature()) {
      return &method;
    }
  }
  return nullptr;
}

获取method,如果找到名字相等和签名相等的就返回。
那么GetDirectMethods是怎么获取呢?

inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
  CheckPointerSize(pointer_size);
  return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
}
inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(size_t pointer_size) {
  return ArraySlice<ArtMethod>(GetMethodsPtr(),
                               GetDirectMethodsStartOffset(),
                               GetVirtualMethodsStartOffset(),
                               ArtMethod::Size(pointer_size),
                               ArtMethod::Alignment(pointer_size));
}

就这样了。不深究了。

1.4.5.CallStaticVoidMethod

env->CallStaticVoidMethod(startClass, startMeth, strArray);
art/runtime/jni_internal.cc
static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
    va_list ap;
    va_start(ap, mid);
    ScopedObjectAccess soa(env);
    InvokeWithVarArgs(soa, nullptr, mid, ap);
    va_end(ap);
  }
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
                         va_list args)
    SHARED_REQUIRES(Locks::mutator_lock_) {

  ArtMethod* method = soa.DecodeMethod(mid);
...
  InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
  if (is_string_init) {

    UpdateReference(soa.Self(), obj, result.GetL());
  }
  return result;
}

通过mid找到method。

static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
                               ArtMethod* method, ArgArray* arg_array, JValue* result,
                               const char* shorty)
    SHARED_REQUIRES(Locks::mutator_lock_) {
...
  method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}

这个method 调用invoke函数。

void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                       const char* shorty) {
 ...

    constexpr bool kLogInvocationStartAndReturn = false;
    bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
    if (LIKELY(have_quick_code)) {
      if (!IsStatic()) {
        (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
      } else {
        (*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);
      }
    } else {
      if (result != nullptr) {
        result->SetJ(0);
      }
    }
  }

这里判断是否有quick_code意思是是否有编译好的代码,我们这里选择是。
如果没有的话,这里result->SetJ就返回了。这里的流程没有深入研究。


我们这里有本地代码。那么就调用art_quick_invoke_stub 或者 art_quick_invoke_static_stub
区别就是是不是静态函数。


那么就这么结束了吗???
其实这里面还有流程是要梳理的。

所有的方法都是 调用这个统一的入口函数的,那么它是怎么调用到我的zygoteinit.main函数的呢?
我这边看到传入类我们现在这个main函数的artmethod的this指针。

接下来汇编部分我要借助老罗的代码,毕竟汇编不是很懂,也还没有对照自己的代码进行分析。所以借助下老罗的代码进行梳理。

我们假设是art_quick_invoke_stub调用这个函数

/*
     * Quick invocation stub.
     * On entry:
     *   r0 = method pointer
     *   r1 = argument array or NULL for no argument methods
     *   r2 = size of argument array in bytes
     *   r3 = (managed) thread pointer
     *   [sp] = JValue* result
     *   [sp + 4] = result type char
     */
ENTRY art_quick_invoke_stub
    push   {r0, r4, r5, r9, r11, lr}       @ spill regs
    .save  {r0, r4, r5, r9, r11, lr}
    .pad #24
    .cfi_adjust_cfa_offset 24
    .cfi_rel_offset r0, 0
    .cfi_rel_offset r4, 4
    .cfi_rel_offset r5, 8
    .cfi_rel_offset r9, 12
    .cfi_rel_offset r11, 16
    .cfi_rel_offset lr, 20
    mov    r11, sp                         @ save the stack pointer
    .cfi_def_cfa_register r11
    mov    r9, r3                          @ move managed thread pointer into r9
    mov    r4, #SUSPEND_CHECK_INTERVAL     @ reset r4 to suspend check interval
    add    r5, r2, #16                     @ create space for method pointer in frame
    and    r5, #0xFFFFFFF0                 @ align frame size to 16 bytes
    sub    sp, r5                          @ reserve stack space for argument array
    add    r0, sp, #4                      @ pass stack pointer + method ptr as dest for memcpy
    bl     memcpy                          @ memcpy (dest, src, bytes)
    ldr    r0, [r11]                       @ restore method*
    ldr    r1, [sp, #4]                    @ copy arg value for r1
    ldr    r2, [sp, #8]                    @ copy arg value for r2
    ldr    r3, [sp, #12]                   @ copy arg value for r3
    mov    ip, #0                          @ set ip to 0
    str    ip, [sp]                        @ store NULL for method* at bottom of frame
    ldr    ip, [r0, #METHOD_CODE_OFFSET]   @ get pointer to the code
    blx    ip                              @ call the method
    mov    sp, r11                         @ restore the stack pointer
    ldr    ip, [sp, #24]                   @ load the result pointer
    strd   r0, [ip]                        @ store r0/r1 into result pointer
    pop    {r0, r4, r5, r9, r11, lr}       @ restore spill regs
    .cfi_adjust_cfa_offset -24
    bx     lr
END art_quick_invoke_stub

---------------------

我们看 带过来的参数分别存放在

r0,r1,r2,r3.

然后我们关注下r0 。

ldr    ip, [r0, #METHOD_CODE_OFFSET]   @ get pointer to the code

加载的ip指针指向的是r0,也就是说是method的地址的偏移40;

METHOD_CODE_OFFSET的值定义在文件art/runtime/asm_support.h中,值为40,如下所示:
// Offset of field Method::entry_point_from_compiled_code_
#define METHOD_CODE_OFFSET 40

它这里注释说这个指针偏移40后就是 entry_point_from_compiled_code_ ,这个是我们FindClass。SetEntryPointFromCompiledCode,设置进去的。
这样的话这里就调用用到这个方法的 SetEntryPointFromCompiledCode 设置的入口去了。我们回忆下这个设置的代码:

if (method->IsStatic() && !method->IsConstructor()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
  } else if (quick_code == nullptr && method->IsNative()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
  } else if (enter_interpreter) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
  }

这又是个统一的入口。假设我们选择一个GetQuickResolutionStub()。

extern "C" void art_quick_resolution_trampoline(ArtMethod*);
static inline const void* GetQuickResolutionStub() {
  return reinterpret_cast<const void*>(art_quick_resolution_trampoline);
}

它获取的art_quick_resolution_trampoline又是一个统一的指针,

.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
    mov     r2, r9                 @ pass Thread::Current
    mov     r3, sp                 @ pass SP
    blx     artQuickResolutionTrampoline  @ (Method* called, receiver, Thread*, SP)
    cbz     r0, 1f                 @ is code pointer null? goto exception
    mov     r12, r0
    ldr  r0, [sp, #0]              @ load resolved method in r0
    ldr  r1, [sp, #8]              @ restore non-callee save r1
    ldrd r2, [sp, #12]             @ restore non-callee saves r2-r3
    ldr  lr, [sp, #44]             @ restore lr
    add  sp, #48                   @ rewind sp
    .cfi_adjust_cfa_offset -48
    bx      r12                    @ tail-call into actual code
1:
    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
    DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline

它是汇编函数调用另外一个 artQuickResolutionTrampoline 来获取真正的main 函数的本地代码地址,

extern "C" const void* artQuickResolutionTrampoline(
    ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
    SHARED_REQUIRES(Locks::mutator_lock_) {
...
        code = called->GetEntryPointFromQuickCompiledCode();
      }
 ...

  return code;
}

最终返回code的地址。然后运行。

这样转来转去,终于完成了,又很多地方还没有分析清楚的,先就这样吧!

1.4.6.CallStaticVoidMethod–LinkCode


void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
                           uint32_t class_def_method_index) {
if (oat_class != nullptr) {
    oat_method.LinkMethod(method);
}
if (method->IsStatic() && !method->IsConstructor()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
  } else if (quick_code == nullptr && method->IsNative()) {
    method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
  } else if (enter_interpreter) {
    // Set entry point from compiled code if there's no code or in interpreter only mode.
    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
  }

是这样子的,之前LinkCode的时候不是设置了这三种不同的入口吗?然后我看下调用的时候是怎么执行的。

static jobject CallStaticObjectMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
    va_list ap;
    va_start(ap, mid);
    CHECK_NON_NULL_ARGUMENT(mid);
    ScopedObjectAccess soa(env);
    JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
    jobject local_result = soa.AddLocalReference<jobject>(result.GetL());
    va_end(ap);
    return local_result;
  }

过程就省略了,最终都会到

method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                       const char* shorty) {
 
      if (!IsStatic()) {
        (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
      } else {
        (*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);
      }
}

现在就假设执行这个函数指针,art_quick_invoke_stub

extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
                                      Thread* self, JValue* result, const char* shorty) {
  quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
}
template <bool kIsStatic>
static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
                                   Thread* self, JValue* result, const char* shorty) {
  art_quick_invoke_stub_internal(method, args, args_size, self, result, result_in_float,
      core_reg_args, fp_reg_args);
}
extern "C" void art_quick_invoke_stub_internal(ArtMethod*, uint32_t*, uint32_t,
                                               Thread* self, JValue* result, uint32_t, uint32_t*,
                                               uint32_t*);
ENTRY art_quick_invoke_stub_internal
    SPILL_ALL_CALLEE_SAVE_GPRS             @ spill regs (9)
    mov    r11, sp                         @ save the stack pointer
    .cfi_def_cfa_register r11

    mov    r9, r3                          @ move managed thread pointer into r9

    add    r4, r2, #4                      @ create space for method pointer in frame
    sub    r4, sp, r4                      @ reserve & align *stack* to 16 bytes: native calling
    and    r4, #0xFFFFFFF0                 @ convention only aligns to 8B, so we have to ensure ART
    mov    sp, r4                          @ 16B alignment ourselves.

    mov    r4, r0                          @ save method*
    add    r0, sp, #4                      @ pass stack pointer + method ptr as dest for memcpy
    bl     memcpy                          @ memcpy (dest, src, bytes)
    mov    ip, #0                          @ set ip to 0
    str    ip, [sp]                        @ store null for method* at bottom of frame

    ldr    ip, [r11, #48]                  @ load fp register argument array pointer
    vldm   ip, {s0-s15}                    @ copy s0 - s15

    ldr    ip, [r11, #44]                  @ load core register argument array pointer
    mov    r0, r4                          @ restore method*
    add    ip, ip, #4                      @ skip r0
    ldm    ip, {r1-r3}                     @ copy r1 - r3

#ifdef ARM_R4_SUSPEND_FLAG
    mov    r4, #SUSPEND_CHECK_INTERVAL     @ reset r4 to suspend check interval
#endif

    ldr    ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]  @ get pointer to the code
    blx    ip                              @ call the method

    mov    sp, r11                         @ restore the stack pointer
    .cfi_def_cfa_register sp

    ldr    r4, [sp, #40]                   @ load result_is_float
    ldr    r9, [sp, #36]                   @ load the result pointer
    cmp    r4, #0
    ite    eq
    strdeq r0, [r9]                        @ store r0/r1 into result pointer
    vstrne d0, [r9]                        @ store s0-s1/d0 into result pointer

    pop    {r4, r5, r6, r7, r8, r9, r10, r11, pc}               @ restore spill regs
END art_quick_invoke_stub_internal
ldr    ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]  @ get pointer to the code
    blx    ip                              @ call the method

这个就是前面

SetEntryPointFromQuickCompiledCode();

设置的入口点,OK,

1、如果我们的是oat文件。

我们假设** oat_method.LinkMethod(method);**

void OatFile::OatMethod::LinkMethod(ArtMethod* method) const {
  method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
}
inline const void* OatFile::OatMethod::GetQuickCode() const {
  return GetOatPointer<const void*>(GetCodeOffset());
}
inline uint32_t OatFile::OatMethod::GetCodeOffset() const {
  return (GetQuickCodeSize() == 0) ? 0 : code_offset_;
}

我们看到这个最终指向代码偏移,然后blx ip @ call the method
直接跳向这里执行。

2、我们再分析一个quickToInterpreter

我们假设 GetQuickToInterpreterBridge();

extern "C" void art_quick_to_interpreter_bridge(ArtMethod*);
static inline const void* GetQuickToInterpreterBridge() {
  return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
}
ENTRY art_quick_to_interpreter_bridge
    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1, r2
    mov     r1, r9                 @ pass Thread::Current
    mov     r2, sp                 @ pass SP
    blx     artQuickToInterpreterBridge    @ (Method* method, Thread*, SP)
    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
    // Tear down the callee-save frame. Skip arg registers.
    add     sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
    .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
    cbnz    r2, 1f                 @ success if no exception is pending
    vmov    d0, r0, r1             @ store into fpr, for when it's a fpr return...
    bx      lr                     @ return on success
1:
    DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge

artQuickToInterpreterBridge
在这个函数里,会dex包,去建

extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
    SHARED_REQUIRES(Locks::mutator_lock_) {

  JValue tmp_value;
  ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
      StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame, false);
  ManagedStack fragment;

  DCHECK(!method->IsNative()) << PrettyMethod(method);
  uint32_t shorty_len = 0;
  ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
  const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();


  if (deopt_frame != nullptr) {
    // Coming from single-frame deopt.

 
 
    bool from_code = false;
    self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
    CHECK(from_code);

    // Push a transition back into managed code onto the linked list in thread.
    self->PushManagedStackFragment(&fragment);


    interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result);
  

  // Pop transition.
  self->PopManagedStackFragment(fragment);

  // Request a stack deoptimization if needed
  ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
  if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
    // Push the context of the deoptimization stack so we can restore the return value and the
    // exception before executing the deoptimized frames.
    self->PushDeoptimizationContext(
        result, shorty[0] == 'L', /* from_code */ false, self->GetException());

    // Set special exception to cause deoptimization.
    self->SetException(Thread::GetDeoptimizationException());
  }

  // No need to restore the args since the method has already been run by the interpreter.
  return result.GetJ();
}

这里面最终,从dex文件中获取CodeItem,建立fragment的管理,最终进入解释器去执行。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

沈万三djh

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值