java signal SIGQUIT的输出是线程:Signal Catcher完成
"SIG_0", /* 0 */ "SIGHUP", /* 1 */ "SIGINT", /* 2 */ "SIGQUIT", /* 3 */ "SIGILL", /* 4 */ "SIGTRAP", /* 5 */ "SIGABRT", /* 6 */ "SIGBUS", /* 7 */ "SIGFPE", /* 8 */ "SIGKILL", /* 9 */ "SIGUSR1", /* 10 */ "SIGSEGV", /* 11 */ "SIGUSR2", /* 12 */ "SIGPIPE", /* 13 */ "SIGALRM", /* 14 */ "SIGTERM", /* 15 */ "SIGSTKFLT", /* 16 */ "SIGCHLD", /* 17 */ "SIGCONT", /* 18 */ "SIGSTOP", /* 19 */ "SIGTSTP", /* 20 */ "SIGTTIN", /* 21 */ "SIGTTOU", /* 22 */ "SIGURG", /* 23 */ "SIGXCPU", /* 24 */ "SIGXFSZ", /* 25 */ "SIGVTALRM", /* 26 */ "SIGPROF", /* 27 */ "SIGWINCH", /* 28 */ "SIGIO", /* 29 */ "SIGPWR", /* 30 */ "SIGSYS", /* 31 */ "SIGRTMIN", /* 32 */
root@scorpio:/data/anr # ps -t | grep 4501
system 4501 667 1608988 64804 SyS_epoll_ 7fac377434 S com.securitycore
system 4507 4501 1608988 64804 do_sigtime 7fac377614 S Signal Catcher
system 4508 4501 1608988 64804 unix_strea 7fac377f8c S JDWP
system 4509 4501 1608988 64804 futex_wait 7fac32b038 S ReferenceQueueD
system 4510 4501 1608988 64804 futex_wait 7fac32b038 S FinalizerDaemon
system 4511 4501 1608988 64804 futex_wait 7fac32b038 S FinalizerWatchd
system 4513 4501 1608988 64804 futex_wait 7fac32b038 S HeapTaskDaemon
system 4514 4501 1608988 64804 binder_thr 7fac377524 S Binder_1
system 4515 4501 1608988 64804 binder_thr 7fac377524 S Binder_2
system 4517 4501 1608988 64804 SyS_epoll_ 7fac377434 S local_job_dispa
system 4519 4501 1608988 64804 SyS_epoll_ 7fac377434 S remote_job_disp
system 4520 4501 1608988 64804 SyS_epoll_ 7fac377434 S BindServiceThre
system 4522 4501 1608988 64804 futex_wait 7fac32b038 S pool-2-thread-1
system 4537 4501 1608988 64804 futex_wait 7fac32b038 S Okio Watchdog
system 4539 4501 1608988 64804 futex_wait 7fac32b038 S pool-3-thread-1
system 6989 4501 1608988 64804 SyS_epoll_ 7fac377434 S WifiManager
/art/runtime/signal_catcher.cc
SignalCatcher::SignalCatcher(const std::string& stack_trace_file)
: stack_trace_file_(stack_trace_file),
lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
thread_(nullptr) {
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
CHECK_PTHREAD_CALL(pthread_create, (&pthread_, nullptr, &Run, this), "signal catcher thread");
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
while (thread_ == nullptr) {
cond_.Wait(self);
}
}
void* SignalCatcher::Run(void* arg) {
SignalCatcher* signal_catcher = reinterpret_cast<SignalCatcher*>(arg);
CHECK(signal_catcher != nullptr);
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
!runtime->IsAotCompiler()));
Thread* self = Thread::Current();
DCHECK_NE(self->GetState(), kRunnable);
{
MutexLock mu(self, signal_catcher->lock_);
signal_catcher->thread_ = self;
signal_catcher->cond_.Broadcast(self);
}
// Set up mask with signals we want to handle.
SignalSet signals;
signals.Add(SIGQUIT);
signals.Add(SIGUSR1);
while (true) {
int signal_number = signal_catcher->WaitForSignal(self, signals);
if (signal_catcher->ShouldHalt()) {
runtime->DetachCurrentThread();
return nullptr;
}
switch (signal_number) {
case SIGQUIT:
signal_catcher->HandleSigQuit();
break;
case SIGUSR1:
signal_catcher->HandleSigUsr1();
break;
default:
LOG(ERROR) << "Unexpected signal %d" << signal_number;
break;
}
}
}
trace文件的控制框架:
void SignalCatcher::HandleSigQuit() {
Runtime* runtime = Runtime::Current();
std::ostringstream os;
os << "\n"
<< "----- pid " << getpid() << " at " << GetIsoDate() << " -----\n";
DumpCmdLine(os);
// Note: The strings "Build fingerprint:" and "ABI:" are chosen to match the format used by
// debuggerd. This allows, for example, the stack tool to work.
std::string fingerprint = runtime->GetFingerprint();
os << "Build fingerprint: '" << (fingerprint.empty() ? "unknown" : fingerprint) << "'\n";
os << "ABI: '" << GetInstructionSetString(runtime->GetInstructionSet()) << "'\n";
os << "Build type: " << (kIsDebugBuild ? "debug" : "optimized") << "\n";
runtime->DumpForSigQuit(os);
if ((false)) {
std::string maps;
if (ReadFileToString("/proc/self/maps", &maps)) {
os << "/proc/self/maps:\n" << maps;
}
}
os << "----- end " << getpid() << " -----\n";
Output(os.str());
}
void Runtime::DumpForSigQuit(std::ostream& os) {
GetClassLinker()->DumpForSigQuit(os);
GetInternTable()->DumpForSigQuit(os);
GetJavaVM()->DumpForSigQuit(os);
GetHeap()->DumpForSigQuit(os);
TrackedAllocators::Dump(os);
os << "\n";
thread_list_->DumpForSigQuit(os);
BaseMutex::DumpAll(os);
}
void ClassLinker::DumpForSigQuit(std::ostream& os) {
Thread* self = Thread::Current();
if (dex_cache_image_class_lookup_required_) {
ScopedObjectAccess soa(self);
MoveImageClassesToClassTable();
}
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes="
<< class_table_.Size() << "\n";
}
void InternTable::DumpForSigQuit(std::ostream& os) const {
os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
}
void JavaVMExt::DumpForSigQuit(std::ostream& os) {
os << "JNI: CheckJNI is " << (check_jni_ ? "on" : "off");
if (force_copy_) {
os << " (with forcecopy)";
}
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, globals_lock_);
os << "; globals=" << globals_.Capacity();
}
{
MutexLock mu(self, weak_globals_lock_);
if (weak_globals_.Capacity() > 0) {
os << " (plus " << weak_globals_.Capacity() << " weak)";
}
}
os << '\n';
{
MutexLock mu(self, *Locks::jni_libraries_lock_);
os << "Libraries: " << Dumpable<Libraries>(*libraries_) << " (" << libraries_->size() << ")\n";
}
}
void Heap::DumpForSigQuit(std::ostream& os) {
os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
<< PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
DumpGcPerformanceInfo(os);
}
void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative timings.
os << "Dumping cumulative Gc timings\n";
uint64_t total_duration = 0;
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (auto& collector : garbage_collectors_) {
total_duration += collector->GetCumulativeTimings().GetTotalNs();
total_paused_time += collector->GetTotalPausedTimeNs();
collector->DumpPerformanceInfo(os);
}
uint64_t allocation_time =
static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
if (total_duration != 0) {
const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
os << "Mean GC size throughput: "
<< PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
os << "Mean GC object throughput: "
<< (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
}
uint64_t total_objects_allocated = GetObjectsAllocatedEver();
os << "Total number of allocations " << total_objects_allocated << "\n";
os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
if (kMeasureAllocationTime) {
os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
<< "\n";
}
if (HasZygoteSpace()) {
os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
os << "Total GC count: " << GetGcCount() << "\n";
os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
{
MutexLock mu(Thread::Current(), *gc_complete_lock_);
if (gc_count_rate_histogram_.SampleSize() > 0U) {
os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
gc_count_rate_histogram_.DumpBins(os);
os << "\n";
}
if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
os << "Histogram of blocking GC count per "
<< NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
blocking_gc_count_rate_histogram_.DumpBins(os);
os << "\n";
}
}
BaseMutex::DumpAll(os);
}
void BaseMutex::DumpAll(std::ostream& os) {
if (kLogLockContentions) {
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
if (all_mutexes == nullptr) {
// No mutexes have been created yet during at startup.
return;
}
typedef std::set<BaseMutex*>::const_iterator It;
os << "(Contended)\n";
for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
BaseMutex* mutex = *it;
if (mutex->HasEverContended()) {
mutex->Dump(os);
os << "\n";
}
}
os << "(Never contented)\n";
for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
BaseMutex* mutex = *it;
if (!mutex->HasEverContended()) {
mutex->Dump(os);
os << "\n";
}
}
}
}
void Dump(std::ostream& os) {
if (kEnableTrackingAllocator) {
os << "Dumping native memory usage\n";
for (size_t i = 0; i < kAllocatorTagCount; ++i) {
uint64_t bytes_used = g_bytes_used[i].LoadRelaxed();
uint64_t max_bytes_used = g_max_bytes_used[i];
uint64_t total_bytes_used = g_total_bytes_used[i].LoadRelaxed();
if (total_bytes_used != 0) {
os << static_cast<AllocatorTag>(i) << " active=" << bytes_used << " max="
<< max_bytes_used << " total=" << total_bytes_used << "\n";
}
}
}
}
void ThreadList::DumpForSigQuit(std::ostream& os) {
{
ScopedObjectAccess soa(Thread::Current());
// Only print if we have samples.
if (suspend_all_historam_.SampleSize() > 0) {
Histogram<uint64_t>::CumulativeData data;
suspend_all_historam_.CreateHistogram(&data);
suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
}
}
Dump(os);
DumpUnattachedThreads(os);
}
void ThreadList::Dump(std::ostream& os) {
{
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
os << "DALVIK THREADS (" << list_.size() << "):\n";
}
DumpCheckpoint checkpoint(&os);
size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
if (threads_running_checkpoint != 0) {
checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
}
}
size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) ->
checkpoint_function->Run(self);
void Run(Thread* thread) OVERRIDE {
// Note thread and self may not be equal if thread was already suspended at the point of the
// request.
Thread* self = Thread::Current();
std::ostringstream local_os;
{
ScopedObjectAccess soa(self);
thread->Dump(local_os);
}
local_os << "\n";
{
// Use the logging lock to ensure serialization when writing to the common ostream.
MutexLock mu(self, *Locks::logging_lock_);
*os_ << local_os.str();
}
if (thread->GetState() == kRunnable) {
barrier_.Pass(self);
}
}
void Thread::Dump(std::ostream& os) const {
DumpState(os);
DumpStack(os);
}
void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
std::string group_name;
int priority;
bool is_daemon = false;
Thread* self = Thread::Current();
// If flip_function is not null, it means we have run a checkpoint
// before the thread wakes up to execute the flip function and the
// thread roots haven't been forwarded. So the following access to
// the roots (opeer or methods in the frames) would be bad. Run it
// here. TODO: clean up.
if (thread != nullptr) {
ScopedObjectAccessUnchecked soa(self);
Thread* this_thread = const_cast<Thread*>(thread);
Closure* flip_func = this_thread->GetFlipFunction();
if (flip_func != nullptr) {
flip_func->Run(this_thread);
}
}
// Don't do this if we are aborting since the GC may have all the threads suspended. This will
// cause ScopedObjectAccessUnchecked to deadlock.
if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
ScopedObjectAccessUnchecked soa(self);
priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
->GetInt(thread->tlsPtr_.opeer);
is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
->GetBoolean(thread->tlsPtr_.opeer);
mirror::Object* thread_group =
soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
if (thread_group != nullptr) {
ArtField* group_name_field =
soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
mirror::String* group_name_string =
reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
}
} else {
priority = GetNativePriority();
}
std::string scheduler_group_name(GetSchedulerGroupName(tid));
if (scheduler_group_name.empty()) {
scheduler_group_name = "default";
}
if (thread != nullptr) {
os << '"' << *thread->tlsPtr_.name << '"';
if (is_daemon) {
os << " daemon";
}
os << " prio=" << priority
<< " tid=" << thread->GetThreadId()
<< " " << thread->GetState();
if (thread->IsStillStarting()) {
os << " (still starting up)";
}
os << "\n";
} else {
os << '"' << ::art::GetThreadName(tid) << '"'
<< " prio=" << priority
<< " (not attached)\n";
}
if (thread != nullptr) {
MutexLock mu(self, *Locks::thread_suspend_count_lock_);
os << " | group=\"" << group_name << "\""
<< " sCount=" << thread->tls32_.suspend_count
<< " dsCount=" << thread->tls32_.debug_suspend_count
<< " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
<< " self=" << reinterpret_cast<const void*>(thread) << "\n";
}
os << " | sysTid=" << tid
<< " nice=" << getpriority(PRIO_PROCESS, tid)
<< " cgrp=" << scheduler_group_name;
if (thread != nullptr) {
int policy;
sched_param sp;
CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
__FUNCTION__);
os << " sched=" << policy << "/" << sp.sched_priority
<< " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
}
os << "\n";
// Grab the scheduler stats for this thread.
std::string scheduler_stats;
if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
} else {
scheduler_stats = "0 0 0";
}
char native_thread_state = '?';
int utime = 0;
int stime = 0;
int task_cpu = 0;
GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
os << " | state=" << native_thread_state
<< " schedstat=( " << scheduler_stats << " )"
<< " utm=" << utime
<< " stm=" << stime
<< " core=" << task_cpu
<< " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
if (thread != nullptr) {
os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
<< reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
<< PrettySize(thread->tlsPtr_.stack_size) << "\n";
// Dump the held mutexes.
os << " | held mutexes=";
for (size_t i = 0; i < kLockLevelCount; ++i) {
if (i != kMonitorLock) {
BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
if (mutex != nullptr) {
os << " \"" << mutex->GetName() << "\"";
if (mutex->IsReaderWriterMutex()) {
ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
os << "(exclusive held)";
} else {
os << "(shared held)";
}
}
}
}
}
os << "\n";
}
}
void Thread::DumpStack(std::ostream& os) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
bool dump_for_abort = (gAborting > 0);
bool safe_to_dump = (this == Thread::Current() || IsSuspended());
if (!kIsDebugBuild) {
// We always want to dump the stack for an abort, however, there is no point dumping another
// thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
safe_to_dump = (safe_to_dump || dump_for_abort);
}
if (safe_to_dump) {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
}
DumpJavaStack(os);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
}