c++ 死锁检测与内存泄露

死锁检测与内存泄露
死锁检测
死锁

死锁是多线程并发执行时,竞争对方持有的资源而无法向前继续执行;一般线程进程长时间处于BLOCKED状态可能存在死锁的情况;

预防死锁

​ 1. 避免循环等待条件:按照固定顺序申请资源、确保不会形成环形等待

​ 2. 设置超时:在尝试获取锁时设置一个较为合理的超时时间;超时后释放已经获取的资;

死锁检测

对于正在运行时的程序或者进程,可以采用外部的工具进行观察进程状态和系统调用情况、检查是否有线程卡在lock操作上无法继续执行:

gdb调试:

	1. 使用 info threads命令,显示所有线程状态以及其堆栈跟踪,找到阻塞在互斥量上的线程
	2. 使用thread thread_id进入线程,bt或者where查看当前堆栈信息
	3. 查找等待锁或者条件变量的线程,若有多个线程在彼此等待对方持有的资源就是死锁
(gdb) info thread
  Id   Target Id                                    Frame 
* 1    Thread 0x7ffff7a563c0 (LWP 55512) "deadlock" __futex_abstimed_wait_common64 (private=128, cancel=true, abstime=0x0,
    futex_word=0x7ffff7a52910) at ./nptl/futex-internal.c:57
  2    Thread 0x7ffff7a52640 (LWP 55515) "deadlock" futex_wait (private=0, expected=2, futex_word=0x55555555a1a0 <mutex2>)
    at ../sysdeps/nptl/futex-internal.h:146
  3    Thread 0x7ffff7251640 (LWP 55516) "deadlock" futex_wait (private=0, expected=2, futex_word=0x55555555a1e0 <mutex3>)
    at ../sysdeps/nptl/futex-internal.h:146
  4    Thread 0x7ffff6a50640 (LWP 55517) "deadlock" futex_wait (private=0, expected=2, futex_word=0x55555555a160 <mutex1>)
    at ../sysdeps/nptl/futex-internal.h:146
(gdb) thread 2
[Switching to thread 2 (Thread 0x7ffff7a52640 (LWP 55515))]
#0  futex_wait (private=0, expected=2, futex_word=0x55555555a1a0 <mutex2>) at ../sysdeps/nptl/futex-internal.h:146
146	../sysdeps/nptl/futex-internal.h: No such file or directory.
(gdb) bt
#0  futex_wait (private=0, expected=2, futex_word=0x55555555a1a0 <mutex2>) at ../sysdeps/nptl/futex-internal.h:146
#1  __GI___lll_lock_wait (futex=futex@entry=0x55555555a1a0 <mutex2>, private=0) at ./nptl/lowlevellock.c:49
#2  0x00007ffff7bd6002 in lll_mutex_lock_optimized (mutex=0x55555555a1a0 <mutex2>) at ./nptl/pthread_mutex_lock.c:48
#3  ___pthread_mutex_lock (mutex=0x55555555a1a0 <mutex2>) at ./nptl/pthread_mutex_lock.c:93
#4  0x000055555555583a in __gthread_mutex_lock (__mutex=0x55555555a1a0 <mutex2>) at /usr/include/x86_64-linux-gnu/c++/11/b
#5  0x00005555555559b0 in std::mutex::lock (this=0x55555555a1a0 <mutex2>) at /usr/include/c++/11/bits/std_mutex.h:100
#6  0x0000555555555a2a in std::lock_guard<std::mutex>::lock_guard (this=0x7ffff7a51d68, __m=...) at /usr/include/c++/11/bi
#7  0x00005555555553a2 in FuncA () at deadlock.cpp:14
#8  0x00005555555564cb in std::__invoke_impl<void, void (*)()> (__f=@0x55555556ceb8: 0x555555555334 <FuncA()>) at /usr/inc
#9  0x0000555555556477 in std::__invoke<void (*)()> (__fn=@0x55555556ceb8: 0x555555555334 <FuncA()>) at /usr/include/c++/1
#10 0x0000555555556418 in std::thread::_Invoker<std::tuple<void (*)()> >::_M_invoke<0ul> (this=0x55555556ceb8)
    at /usr/include/c++/11/bits/std_thread.h:259
#11 0x00005555555563e8 in std::thread::_Invoker<std::tuple<void (*)()> >::operator() (this=0x55555556ceb8) at /usr/include
#12 0x00005555555563c8 in std::thread::_State_impl<std::thread::_Invoker<std::tuple<void (*)()> > >::_M_run (this=0x555555
    at /usr/include/c++/11/bits/std_thread.h:211
#13 0x00007ffff7e63253 in ?? () from /lib/x86_64-linux-gnu/libstdc++.so.6
#14 0x00007ffff7bd2ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#15 0x00007ffff7c64850 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81

如上所示,使用info thread可以显示当前说有线程信息,第2、3、4个线程分别在执行 futex_wait 函数,并且各自等待着不同的互斥量(mutex1、mutex2、mutex3),它们的地址分别是 0x55555555a160、0x55555555a1a0 和 0x55555555a1e0。
在进入线程2后,打印当前堆栈信息,通过分析堆栈信息可以得出,当前线程在执行FuncA函数时,在deadlock.cpp文件的14行获取mutex2的互斥锁时发生死锁;
pstack命令:

	1. 可以打印当前所有线程的堆栈信息,可以等大一下多次获取堆栈信息,查看是否存在某几个线程的堆栈信息么有变化,查看时候在相互等待获取对方持有的资源
	2. 使用pstack id可以打印指定线程的堆栈信息

strace命令:

	1. strace时一个强大的系统调用追踪工具,它可以跟踪进程执行时的系统调用和信号
	2. 使用是trace检查死锁时,关注fatex系统调用,它通常用于实现pthread_mutex_lock等多线程同步原语
	3. strace -p pid 查看输出,若看到大量的线程反复尝试获取某个futex而失败,且没有线程释放,可能存在死锁

perf监控:

perf时linux系统自带的系统性能分析工具,主要用来分析系统的性能瓶颈,可以通过监控线程状态和cpu占用情况开间接判断是否存在死锁

valgrind + helgrind:

  1. valgrind是一款内存错误检测工具,而helgrind是valgrind工具集成的一款工具,用于检测多线程程序中的竞争条件和死锁;

  2. helgrind通过获取模拟死锁的获取和释放来检测线程之间是否存在非法的资源访问序列,从而查找可能出现死锁的编程错误

  3. valgrind --tool=helgrind ./your_program
    

编程通过hook函数将锁的获取和释放情况管理,分析出现死锁的线程函数

// build: gcc -o deadlock deadlock.c -lpthread -ldl

#define _GNU_SOURCE
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <unistd.h>
#include <stdint.h>

#if 1
typedef unsigned long int uint64;


#define MAX		100

enum Type {PROCESS, RESOURCE};

struct source_type {

	uint64 id;
	enum Type type;

	uint64 lock_id;
	int degress;
};

struct vertex {

	struct source_type s;
	struct vertex *next;

};

struct task_graph {

	struct vertex list[MAX];
	int num;

	struct source_type locklist[MAX];
	int lockidx; //

	pthread_mutex_t mutex;
};

struct task_graph *tg = NULL;
int path[MAX+1];
int visited[MAX];
int k = 0;
int deadlock = 0;

struct vertex *create_vertex(struct source_type type) {
	struct vertex *tex = (struct vertex *)malloc(sizeof(struct vertex ));
	tex->s = type;
	tex->next = NULL;
	return tex;
}

int search_vertex(struct source_type type) {
	int i = 0;
	for (i = 0;i < tg->num;i ++) {
		if (tg->list[i].s.type == type.type && tg->list[i].s.id == type.id) {
			return i;
		}
	}
	return -1;
}

void add_vertex(struct source_type type) {

	if (search_vertex(type) == -1) {

		tg->list[tg->num].s = type;
		tg->list[tg->num].next = NULL;
		tg->num ++;
	}
}

int add_edge(struct source_type from, struct source_type to) {
	add_vertex(from);
	add_vertex(to);
	struct vertex *v = &(tg->list[search_vertex(from)]);

	while (v->next != NULL) {
		v = v->next;
	}
	v->next = create_vertex(to);
}

int verify_edge(struct source_type i, struct source_type j) {

	if (tg->num == 0) return 0;

	int idx = search_vertex(i);
	if (idx == -1) {
		return 0;
	}

	struct vertex *v = &(tg->list[idx]);
	while (v != NULL) {

		if (v->s.id == j.id) return 1;

		v = v->next;	
	}

	return 0;
}

int remove_edge(struct source_type from, struct source_type to) {

	int idxi = search_vertex(from);
	int idxj = search_vertex(to);

	if (idxi != -1 && idxj != -1) {

		struct vertex *v = &tg->list[idxi];
		struct vertex *remove;

		while (v->next != NULL) {

			if (v->next->s.id == to.id) {

				remove = v->next;
				v->next = v->next->next;

				free(remove);
				break;
			}
			v = v->next;
		}
	}
}


void print_deadlock(void) {
	int i = 0;
	printf("cycle : ");
	for (i = 0;i < k-1;i ++) {
		printf("%ld --> ", tg->list[path[i]].s.id);
	}

	printf("%ld\n", tg->list[path[i]].s.id);
}

int DFS(int idx) {
	struct vertex *ver = &tg->list[idx];
	if (visited[idx] == 1) {

		path[k++] = idx;
		print_deadlock();
		deadlock = 1;
		
		return 0;
	}
	visited[idx] = 1;
	path[k++] = idx;

	while (ver->next != NULL) {

		DFS(search_vertex(ver->next->s));
		k --;
		
		ver = ver->next;
	}
	return 1;
}

int search_for_cycle(int idx) {
	struct vertex *ver = &tg->list[idx];
	visited[idx] = 1;
	k = 0;
	path[k++] = idx;

	while (ver->next != NULL) {
		int i = 0;
		for (i = 0;i < tg->num;i ++) {
			if (i == idx) continue;
			
			visited[i] = 0;
		}
		for (i = 1;i <= MAX;i ++) {
			path[i] = -1;
		}
		k = 1;
		DFS(search_vertex(ver->next->s));
		ver = ver->next;
	}

}

#endif

#if 1
int search_lock(uint64 lock) {
	int i = 0;
	for (i = 0;i < tg->lockidx;i ++) {
		
		if (tg->locklist[i].lock_id == lock) {
			return i;
		}
	}
	return -1;
}

int search_empty_lock(uint64 lock) {
	int i = 0;
	for (i = 0;i < tg->lockidx;i ++) {
		
		if (tg->locklist[i].lock_id == 0) {
			return i;
		}
	}
	return tg->lockidx;
}

void lock_before(uint64_t tid, uint64_t lockaddr) {
	/*
	1. 	if (lockaddr) {
			tid --> lockaddr.tid;
	   	}
	*/

	int idx = 0;

	for (idx = 0;idx < tg->lockidx;idx ++) {

		if (tg->locklist[idx].lock_id == lockaddr) { // 

			struct source_type from;
			from.id = tid;
			from.type = PROCESS;
			add_vertex(from);

			struct source_type to;
			to.id = tg->locklist[idx].id;
			to.type = PROCESS;
			add_vertex(to);

			tg->locklist[idx].degress ++;
			if (!verify_edge(from, to))
				add_edge(from, to);
		}
	}
}

void lock_after(uint64_t tid, uint64_t lockaddr) {
	/*
		if (!lockaddr) {

			tid --> lockaddr;

		} else {

			lockaddr.tid = tid;
			tid -> lockaddr;

		}
		
	 */
	int idx = 0;
	if (-1 == (idx = search_lock(lockaddr))) {// 
		int eidx = search_empty_lock(lockaddr);

		tg->locklist[eidx].id = tid;
		tg->locklist[eidx].lock_id = lockaddr;

		tg->lockidx ++;
	} else {
		struct source_type from;
		from.id = tid;
		from.type = PROCESS;
		add_vertex(from);

		struct source_type to;
		to.id = tg->locklist[idx].id;
		to.type = PROCESS;
		add_vertex(to);

		tg->locklist[idx].degress --;

		if (verify_edge(from, to))
			remove_edge(from, to);

		tg->locklist[idx].id = tid;
		
	}
}


void unlock_after(uint64_t tid, uint64_t lockaddr) {
	int idx = search_lock(lockaddr);
	if (tg->locklist[idx].degress == 0) {
		tg->locklist[idx].id = 0;
		tg->locklist[idx].lock_id = 0;
	}
}


void check_dead_lock(void) {
	int i = 0;
	deadlock = 0;
	for (i = 0;i < tg->num;i ++) {
		if (deadlock == 1) break;
		search_for_cycle(i);
	}
	if (deadlock == 0) {
		printf("no deadlock\n");
	}
}

static void *thread_routine(void *args) {
	while (1) {
		sleep(5);
		check_dead_lock();
	}
}


void start_check(void) {
	tg = (struct task_graph*)malloc(sizeof(struct task_graph));
	tg->num = 0;
	tg->lockidx = 0;
	pthread_t tid;
	pthread_create(&tid, NULL, thread_routine, NULL);
}


// hook
// define
typedef int (*pthread_mutex_lock_t)(pthread_mutex_t *mutex);
pthread_mutex_lock_t pthread_mutex_lock_f = NULL;

typedef int (*pthread_mutex_unlock_t)(pthread_mutex_t *mutex);
pthread_mutex_unlock_t pthread_mutex_unlock_f = NULL;


// implement
int pthread_mutex_lock(pthread_mutex_t *mutex) {
	pthread_t selfid = pthread_self();
	
	lock_before((uint64_t)selfid, (uint64_t)mutex);
	
	pthread_mutex_lock_f(mutex);

	lock_after((uint64_t)selfid, (uint64_t)mutex);
}

int pthread_mutex_unlock(pthread_mutex_t *mutex) {

	pthread_mutex_unlock_f(mutex);

	pthread_t selfid = pthread_self();
	unlock_after((uint64_t)selfid, (uint64_t)mutex);
}

// init
void init_hook(void) {
	if (!pthread_mutex_lock_f)
		pthread_mutex_lock_f = dlsym(RTLD_NEXT, "pthread_mutex_lock");

	if (!pthread_mutex_unlock_f)
		pthread_mutex_unlock_f = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
}

#endif

#if 1 //sample

pthread_mutex_t r1 = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t r2 = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t r3 = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t r4 = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t r5 = PTHREAD_MUTEX_INITIALIZER;

void *t1_cb(void *arg) {

	printf("t1: %ld\n", pthread_self());

	pthread_mutex_lock(&r1);
	sleep(1);
	pthread_mutex_lock(&r2);

	pthread_mutex_unlock(&r2);

	pthread_mutex_unlock(&r1);

}

void *t2_cb(void *arg) {

	printf("t2: %ld\n", pthread_self());

	pthread_mutex_lock(&r2);
	sleep(1);
	pthread_mutex_lock(&r3);

	pthread_mutex_unlock(&r3);
	pthread_mutex_unlock(&r2);

}

void *t3_cb(void *arg) {

	printf("t3: %ld\n", pthread_self());

	pthread_mutex_lock(&r3);
	sleep(1);
	pthread_mutex_lock(&r4);

	pthread_mutex_unlock(&r4);
	pthread_mutex_unlock(&r3);

}

void *t4_cb(void *arg) {

	printf("t4: %ld\n", pthread_self());

	pthread_mutex_lock(&r4);
	sleep(1);
	pthread_mutex_lock(&r5);

	pthread_mutex_unlock(&r5);
	pthread_mutex_unlock(&r4);
}

void *t5_cb(void *arg) {

	printf("t5: %ld\n", pthread_self());

	pthread_mutex_lock(&r1);
	sleep(1);
	pthread_mutex_lock(&r5);

	pthread_mutex_unlock(&r5);
	pthread_mutex_unlock(&r1);

}


int main() {
	init_hook();
	start_check();

	pthread_t t1, t2, t3, t4, t5;

	pthread_create(&t1, NULL, t1_cb, NULL);
	pthread_create(&t2, NULL, t2_cb, NULL);
	
	pthread_create(&t3, NULL, t3_cb, NULL);
	pthread_create(&t4, NULL, t4_cb, NULL);
	pthread_create(&t5, NULL, t5_cb, NULL);
    
	pthread_join(t1, NULL);
	pthread_join(t2, NULL);
	pthread_join(t3, NULL);
	pthread_join(t4, NULL);
	pthread_join(t5, NULL);
	printf("complete\n");
}
#endif
内存泄露

内存泄露是由于没有释放堆上申请的内存导致,可以通过以下集中方法进行检测

  1. Valgrind工具:

    valgrind可以用例检测内存错误的工具,包括内存泄露等多种内存问题;

    valgrind --tool=memcheck --leak-check=yes your_program
    

    valgrind会报告所有未释放的内存以及其分配的位置;
    具体使用方法可参考博文https://blog.csdn.net/xiaofeilongyu/article/details/128538777?spm=1001.2014.3001.5502

  2. AddressSanitizer(ASan)

    AddressSanitizer是gcc和clang编译器提供的一个动态检测内存错误的工具,但是需要重新编译源码

    g++ -fsanitize=address -g your_program.cpp -o your_program
    ./your_program
    
  3. 通过hook函数管理malloc/free内存分配和释放函数,分析内存使用情况

    #define _GNU_SOURCE
    #include <dlfcn.h>
    #include <link.h>
    #include <stdio.h>
    #include <stdlib.h>
    #include <unistd.h>
    
    int flag = 1;
    
    #if 0
    void *nMalloc(size_t size, const char *filename, int line) {
    	void *p = malloc(size);
    	if (flag) {
    		char buff[128] = {0};
    		snprintf(buff, 128, "./mem/%p.mem", p);
    		FILE *fp = fopen(buff, "w");
    		if (!fp) {
    			free(p);
    			return NULL;
    		}
    		fprintf(fp, "[+]%s:%d, addr: %p, size: %ld\n", filename, line, p, size);
    		fflush(fp);
    		fclose(fp);
    	}
    	//printf("nMalloc: %p, size: %ld\n", p, size);
    	return p;
    }
    
    void nFree(void *ptr) {
    	//printf("nFree: %p, \n", ptr);
    	if (flag) {	
    		char buff[128] = {0};
    		snprintf(buff, 128, "./mem/%p.mem", ptr);
    		if (unlink(buff) < 0) {
    			printf("double free: %p", ptr);
    			return ;
    		}
    	}
    	return free(ptr);
    }
    
    #define malloc(size) nMalloc(size, __FILE__, __LINE__)
    #define free(ptr) nFree(ptr)
    
    #else
    // hook 
    typedef void *(*malloc_t)(size_t size);
    malloc_t malloc_f = NULL;
    typedef void (*free_t)(void *ptr);
    free_t free_f = NULL;
    
    int enable_malloc = 1;
    int enable_free = 1;
    
    // 地址转换
    void *ConvertToELF(void *addr) {
    	Dl_info info;
    	struct link_map *link;
    	dladdr1(addr, &info, (void **)&link, RTLD_DL_LINKMAP);
    	return (void *)((size_t)addr - link->l_addr);
    }
    // main --> f1 --> f2 --> malloc
    void *malloc(size_t size) {
    	void *p = NULL;
    	if (enable_malloc) {
    		enable_malloc = 0;
    
    		p = malloc_f(size);
            // 获取上该函数调用的函数地址 参数0调试一级调用,参数1调试二级调用,参数2调试三级调用
    		void *caller = __builtin_return_address(0);
    		char buff[128] = {0};
    		sprintf(buff, "./mem/%p.mem", p);
    		FILE *fp = fopen(buff, "w");
    		if (!fp) {
    			free(p);
    			return NULL;
    		}
    
    		// fprintf(fp, "[+]%p, addr: %p, size: %ld\n", caller, p, size);
    		fprintf(fp, "[+]%p, addr: %p, size: %ld\n", ConvertToELF(caller), p, size);
    		fflush(fp);
    		enable_malloc = 1;
    	} else {
    		p = malloc_f(size);
    	}
    	return p;
    }
    
    // addr2line 
    void free(void *ptr) {
    
    	if (enable_free) {
    		enable_free = 0;
    
    		char buff[128] = {0};
    		snprintf(buff, 128, "./mem/%p.mem", ptr);
    
    		if (unlink(buff) < 0) {
    			printf("double free: %p", ptr);
    			return ;
    		}
    
    		free_f(ptr);
    
    		enable_free = 1;
    	} else {
    		free_f(ptr);
    	}
    
    	return ;
    }
    
    void init_hook(void) {
    	if (!malloc_f) {
            // RTLD_NEXT 表示获取下一个符号的地址
    		malloc_f = (malloc_t)dlsym(RTLD_NEXT, "malloc");
    	}
    	if (!free_f) {
    		free_f = (free_t)dlsym(RTLD_NEXT, "free");
    	}
    }
    #endif
    
    // __FILE__, __LINE__, __func__
    int main() {
    	init_hook();
    	void *p1 = malloc(5);
    	void *p2 = malloc(10);  
    	void *p3 = malloc(35);
    	void *p4 = malloc(10);
    
    	free(p1);
    	free(p3);
    	free(p4);
    	getchar();
    } 
    

    通过hook函数从写malloc/free函数,每次malloc都回创建一个.mem文件,free会将申请内存时创建的文件删除,当mem文件夹下存在文件时,说明会有内存泄露,查看文件中内存可以查询到具体在哪里发生内存泄露

    cxl@cxl:~/code/memleak$ cat ./mem/0x5557eff714b0.mem 
    [+]0x16c8, addr: 0x5557eff714b0, size: 10
    

    提示发10个字节的内存泄露函数指针地址为:0x5557eff714b0,发生泄露的地址为:0x16c8

    可以同通过addr2line命令查询具体发生的函数和具体位置,如下

    cxl@cxl:~/code/memleak$ addr2line -f -e ./memleak -a 0x16c8
    0x00000000000016c8
    main
    /home/cxl/code/3.2.6-memleak/memleak.c:176
    
    

内存泄露检测本人目前感觉较为好用的是使用valgrind工具进行检测

专属学习链接:https://xxetb.xetslk.com/s/36yiy3

  • 15
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值