iOS多线程——GCD底层探索中(dispatch_async与dispatch_sync源码)

准备工作以及源码的下载在上一篇文章中

异步函数 dispatch_async

异步函数的底层分析流程图

请添加图片描述

查看源码 dispatch_async(dispatch_queue_t

// dq为队列,work为任务
void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
	// 临时变量,参数
	dispatch_continuation_t dc = _dispatch_continuation_alloc();
	uintptr_t dc_flags = DC_FLAG_CONSUME; // 设置标识位
	dispatch_qos_t qos;
    
	// 任务包装函数 接收 保存 函数式 保存block
	qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
	_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

dispatch_continuation_init函数只是一个初始化,主要就是保存Block上下文,指定block的执行函数

dispatch_async的源码不多,接下来我们要探索两个方面:

  • 子线程创建的时机点
  • 任务block执行的时机点

子线程何时创建

1.点击进去dispatch_continuation_init函数

static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
		dispatch_queue_class_t dqu, dispatch_block_t work,
		dispatch_block_flags_t flags, uintptr_t dc_flags)
{
	// // block对象赋值到dc_ctx
	void *ctxt = _dispatch_Block_copy(work);//拷贝任务

	dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
	if (unlikely(_dispatch_block_has_private_data(work))) {
		dc->dc_flags = dc_flags;
		dc->dc_ctxt = ctxt; //赋值
		// will initialize all fields but requires dc_flags & dc_ctxt to be set
		return _dispatch_continuation_init_slow(dc, dqu, flags);
	}

	dispatch_function_t func = _dispatch_Block_invoke(work); //封装work - 异步回调
	if (dc_flags & DC_FLAG_CONSUME) {
		func = _dispatch_call_block_and_release; //回调函数赋值 - 同步回调
	}
	return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}

_dispatch_call_block_and_release这个函数就是直接执行block了,所以dc->dc_func被调用的话就block会被直接执行了。

void
_dispatch_call_block_and_release(void *block)
{
	void (^b)(void) = block;
	b();
	Block_release(b);
}

_dispatch_continuation_init_f方法将回调函数赋值,即f就是func,将其保存在属性中

请添加图片描述

  • 点击_dispatch_continuation_voucher_set进去

请添加图片描述

  • _dispatch_continuation_priority_set --> dc的dc_priority的设置

请添加图片描述

我们发现_dispatch_continuation_init里面并没有子线程创建的代码,接下来我们去_dispatch_continuation_async找找

2._dispatch_continuation_async 搜索

_dispatch_continuation_async(dispatch_queue_class_t dqu,
		dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
	// 如果是用barrier插进来的任务或者是串行队列,直接将任务加入到队列
#if DISPATCH_INTROSPECTION
	if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
		_dispatch_trace_item_push(dqu, dc);
		//trace 轨迹 跟踪器的意思  
	}
#else
	(void)dc_flags;
#endif
	return dx_push(dqu._dq, dc, qos);//宏
}

宏:#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z),我们只关心dq_push

3. 根据返回值,锁定到了dx_push。全局搜索dx_push

#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)

4.全局搜索 dq_push

DISPATCH_VTABLE_INSTANCE(queue,
	// This is the base class for queues, no objects of this type are made
	.do_type        = _DISPATCH_QUEUE_CLUSTER,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
);



DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
	.do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);

根据队列的类型不同,这里会对应的有不同的方法
比如:
并发队列:.dq_push = _dispatch_lane_concurrent_push,
串行队列:.dq_push = _dispatch_lane_push,

5. 并发队列_dispatch_lane_concurrent_push, 点击进去_dispatch_lane_concurrent_push, 看源码

_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
		dispatch_qos_t qos)
{
	if (dq->dq_items_tail == NULL &&
			!_dispatch_object_is_waiter(dou) &&
			!_dispatch_object_is_barrier(dou) &&
			_dispatch_queue_try_acquire_async(dq)) {
		// 非栅栏情况走_redirect_push
		return _dispatch_continuation_redirect_push(dq, dou, qos);
	}
    // 其他情况走
	_dispatch_lane_push(dq, dou, qos);
}

显然是非栅栏函数,那么进入_dispatch_continuation_redirect_push

6. 全局搜索 _dispatch_continuation_redirect_push

_dispatch_continuation_redirect_push(dispatch_lane_t dl,
		dispatch_object_t dou, dispatch_qos_t qos)
{
	if (likely(!_dispatch_object_is_redirection(dou))) {
		dou._dc = _dispatch_async_redirect_wrap(dl, dou);
	} else if (!dou._dc->dc_ctxt) {
		// find first queue in descending target queue order that has
		// an autorelease frequency set, and use that as the frequency for
		// this continuation.
		dou._dc->dc_ctxt = (void *)
		(uintptr_t)_dispatch_queue_autorelease_frequency(dl);
	}

	dispatch_queue_t dq = dl->do_targetq;
	if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
	dx_push(dq, dou, qos); // 又做了一遍dx_push,此时的入参dq === do_do_targetq
// 原因在于GCD也是对象,也存在继承封装的问题,类似于 类 父类 根类的关系。
}

这里会发现又走到了dx_push,即递归了,综合前面队列创建时可知,队列也是一个对象,有父类、根类,所以会递归执行到根类的方法

那do_targetq是什么呢?得回到队列的创建dispatch_queue_create去查看

请添加图片描述
请添加图片描述
请添加图片描述

那么,_dispatch_continuation_redirect_push里的dx_push时的队列是_dispatch_get_root_queue()

请添加图片描述

同理,找dispatch_queue_global_t 对应的 dq_push 的方法
请添加图片描述

7. 进去 _dispatch_root_queue_push

请添加图片描述

8. 进去_dispatch_root_queue_push_inline

请添加图片描述

9._dispatch_root_queue_poke 进去

请添加图片描述

10.进去 _dispatch_root_queue_poke_slow

DISPATCH_NOINLINE
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
    int remaining = n;
    int r = ENOSYS;

    _dispatch_root_queues_init();//重点
    
    ...
    //do-while循环创建线程
    do {
        _dispatch_retain(dq); // released in _dispatch_worker_thread
        while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
            if (r != EAGAIN) {
                (void)dispatch_assume_zero(r);
            }
            _dispatch_temporary_resource_shortage();
        }
    } while (--remaining);
    ...
}

首先我们先分析一下:_dispatch_root_queues_init

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_root_queues_init(void)
{
    dispatch_once_f(&_dispatch_root_queues_pred, NULL, _dispatch_root_queues_init_once);
}

进入_dispatch_root_queues_init源码实现,发现是一个dispatch_once_f单例(这里不作说明,以后重点分析),其中传入的func是_dispatch_root_queues_init_once。

_dispatch_root_queues_init_once 开始进入底层os的处理了。通过断点,bt打印堆栈信息如下:

bt
* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 1.1
  * frame #0: 0x0000000100930d0e libdispatch.dylib`_dispatch_root_queues_init_once
    frame #1: 0x000000010091e9c8 libdispatch.dylib`_dispatch_client_callout + 8
    frame #2: 0x000000010091ff33 libdispatch.dylib`_dispatch_once_callout + 66
    frame #3: 0x000000010092b5c3 libdispatch.dylib`_dispatch_root_queue_poke_slow + 363
    frame #4: 0x00007fff2469bc11 UIKitCore`_UIApplicationMainPreparations + 91
    frame #5: 0x00007fff2469bb8c UIKitCore`UIApplicationMain + 73
    frame #6: 0x00000001006ad132 GCD`main(argc=1, argv=0x00007ffeef552cc8) at main.m:17:12
    frame #7: 0x00007fff2025a3e9 libdyld.dylib`start + 1

综上所述,异步函数的底层分析如下

【准备工作】:首先,将异步任务拷贝并封装,并设置回调函数func

【block回调】:底层通过dx_push递归,会重定向到根队列,然后通过pthread_creat创建线程,最后通过dx_invoke执行block回调(注意dx_push 和 dx_invoke 是成对的)

到这里,我们应该知道dispatch_async中子线程创建的调用流程:

总结 dispatch_async中子线程创建的调用流程:

1.dispatch_async --> _dispatch_continuation_async --> dx_push --> dq_push --> 并发队列:_dispatch_lane_concurrent_push --> _dispatch_continuation_redirect_push
2._dispatch_continuation_redirect_push --> dx_push(此时是global_queue) -->_dispatch_root_queue_push --> _dispatch_root_queue_push_inline–>_dispatch_root_queue_poke–>_dispatch_root_queue_poke_slow -->线程池调度,创建线程pthread_create

GCD的block何时回调

dispatch_async 查看堆栈信息

    dispatch_async(queue2, ^{
        NSLog(@"打印");
    });

NSLog(@“打印”); 打断点查看堆栈信息

请添加图片描述

注意到在调用栈中有一个**_dispatch_worker_thread2**

_dispatch_worker_thread2 何时调起

什么时候调起的_dispatch_worker_thread2?

1.我们先全局搜索一下_dispatch_worker_thread2

请添加图片描述

发现全在一个方法里面 --> _dispatch_root_queues_init_once中,

  1. 同理,全局搜索 _dispatch_root_queues_init_once

请添加图片描述

_dispatch_root_queues_init 这个方法调用了

  1. 同理,全局搜索 _dispatch_root_queues_init

请添加图片描述

_dispatch_root_queue_poke_slow是否很熟悉? 就是我们上面在查找创建子线程时调用栈走过的方法,那么此时任务block的调用和子线程的创建产生了联系,这个联系就是_dispatch_root_queue_poke_slow

_dispatch_root_queue_poke_slow 这个方法里面调用了_dispatch_root_queues_init 方法

  1. 全局搜搜索_dispatch_root_queues_init 方法创建
_dispatch_root_queues_init(void)
{
	dispatch_once_f(&_dispatch_root_queues_pred, NULL,
			_dispatch_root_queues_init_once);
}

dispatch_once_f是否有些熟悉?
莫非是单例?我们平时写的单例是这样的

 static dispatch_once_t onceToken;
    dispatch_once(&onceToken, ^{
        // input your code
    });

5.搜索一下dispatch_once源码

dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
	dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
  1. 继续探索dispatch_once_f
DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
    dispatch_once_gate_t l = (dispatch_once_gate_t)val;

#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
    if (likely(v == DLOCK_ONCE_DONE)) {
        return;
    }
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    if (likely(DISPATCH_ONCE_IS_GEN(v))) {
        return _dispatch_once_mark_done_if_quiesced(l, v);
    }
#endif
#endif
    if (_dispatch_once_gate_tryenter(l)) {
        return _dispatch_once_callout(l, ctxt, func);
    }
    return _dispatch_once_wait(l);
}

请添加图片描述
请添加图片描述
请添加图片描述

请添加图片描述

总之,这个_dispatch_once_gate_tryenter判断条件,就能保证当前只有一个线程进去执行代码,那为什么只能执行一次呢?还是看_dispatch_once_gate_broadcast
里的 _dispatch_once_mark_done

请添加图片描述

请添加图片描述

6.回到Block的调用时机

即什么时候调起的_dispatch_worker_thread2 ?
在_dispatch_root_queues_init时,单例执行的任务block是_dispatch_root_queues_init_once

请添加图片描述

  1. 再来看看_dispatch_root_queues_init_once

请添加图片描述
请添加图片描述

上图可知,在_dispatch_root_queues_init_once中完成了线程与任务_dispatch_worker_thread2的绑定过程。接下来就看看_dispatch_worker_thread2的大致流程

_dispatch_worker_thread2的大致流程
  1. 直接搜索_dispatch_worker_thread2
_dispatch_worker_thread2(pthread_priority_t pp)
{
	bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
	dispatch_queue_global_t dq;

	pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
	_dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
	dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit);

	_dispatch_introspection_thread_add();
	_dispatch_trace_runtime_event(worker_unpark, dq, 0);

	int pending = os_atomic_dec2o(dq, dgq_pending, relaxed);
	dispatch_assert(pending >= 0);
	_dispatch_root_queue_drain(dq, dq->dq_priority,
			DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN);
	_dispatch_voucher_debug("root queue clear", NULL);
	_dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK);
	_dispatch_trace_runtime_event(worker_park, NULL, 0);
}
  1. _dispatch_root_queue_drain
_dispatch_root_queue_drain(dispatch_queue_global_t dq,
		dispatch_priority_t pri, dispatch_invoke_flags_t flags)
{
#if DISPATCH_DEBUG
	dispatch_queue_t cq;
	if (unlikely(cq = _dispatch_queue_get_current())) {
		DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling");
	}
#endif
	_dispatch_queue_set_current(dq);
	_dispatch_init_basepri(pri);
	_dispatch_adopt_wlh_anon();

	struct dispatch_object_s *item;
	bool reset = false;
	dispatch_invoke_context_s dic = { };
#if DISPATCH_COCOA_COMPAT
	_dispatch_last_resort_autorelease_pool_push(&dic);
#endif // DISPATCH_COCOA_COMPAT
	_dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri);
	_dispatch_perfmon_start();
	while (likely(item = _dispatch_root_queue_drain_one(dq))) {
		if (reset) _dispatch_wqthread_override_reset();
		_dispatch_continuation_pop_inline(item, &dic, flags, dq);
		reset = _dispatch_reset_basepri_override();
		if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) {
			break;
		}
	}

	// overcommit or not. worker thread
	if (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
		_dispatch_perfmon_end(perfmon_thread_worker_oc);
	} else {
		_dispatch_perfmon_end(perfmon_thread_worker_non_oc);
	}

#if DISPATCH_COCOA_COMPAT
	_dispatch_last_resort_autorelease_pool_pop(&dic);
#endif // DISPATCH_COCOA_COMPAT
	_dispatch_reset_wlh();
	_dispatch_clear_basepri();
	_dispatch_queue_set_current(NULL);
}

3._dispatch_continuation_pop_inline 搜索

_dispatch_continuation_pop_inline(dispatch_object_t dou,
		dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
		dispatch_queue_class_t dqu)
{
	dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
			_dispatch_get_pthread_root_queue_observer_hooks();
	if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq);
	flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
	if (_dispatch_object_has_vtable(dou)) {
		dx_invoke(dou._dq, dic, flags);
	} else {
		_dispatch_continuation_invoke_inline(dou, flags, dqu);
	}
	if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq);
}

最终,我们来到了dx_invoke 和_dispatch_continuation_invoke_inline。

  • _dispatch_continuation_invoke_inline
    请添加图片描述

请添加图片描述

底层源码弄清楚了block()的调用流程👇

1.通过在block任务中打断点,LLDB bt指令查看调用栈信息,找到_dispatch_worker_thread2;
2.搜索调用_dispatch_worker_thread2的地方,找到_dispatch_root_queues_init --> _dispatch_root_queues_init_once;
3.接着我们在_dispatch_root_queues_init_once中发现了子线程的创建,并绑定了block任务_dispatch_worker_thread2;
4.接着我们继续查看_dispatch_worker_thread2的底层源码,发现了调用block任务的时机点

同步函数 dispatch_sync

同步函数的底层分析流程图

请添加图片描述

1.搜索dispatch_sync直接调用的是dispatch_sync_f

void dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    // 很大可能不会走if分支,看做if(_dispatch_block_has_private_data(work))
    if (unlikely(_dispatch_block_has_private_data(work))) {
        return _dispatch_sync_block_with_private_data(dq, work, 0);
    }
    dispatch_sync_f(dq, work, _dispatch_Block_invoke(work));
}

static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
        uintptr_t dc_flags)
{
    _dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}

2.查看_dispatch_sync_f_inline源码,其中width = 1表示是串行队列,其中有两个重点:

static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	if (likely(dq->dq_width == 1)) { // width 代表串行
		return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
	}

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// 全局获取的并行队列或者绑定的是非调度线程的队列会走进这个if分支
	if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
		return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
	}

	if (unlikely(dq->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
			_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}

查看_dispatch_sync_f_inline源码,其中width = 1表示是串行队列,其中有两个重点:

栅栏:_dispatch_barrier_sync_f(可以通过后文的栅栏函数底层分析解释),可以得出同步函数的底层实现其实是同步栅栏函数
死锁:_dispatch_sync_f_slow,如果存在相互等待的情况,就会造成死锁

_dispatch_barrier_sync_f 同步栅栏函数

1. 搜索_dispatch_barrier_sync_f

static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}

2. _dispatch_barrier_sync_f_inline 点击进去

_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	dispatch_tid tid = _dispatch_tid_self();

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	
	// 栅栏函数也会死锁
	if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
				DC_FLAG_BARRIER | dc_flags);
	}

	if (unlikely(dl->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func,
				DC_FLAG_BARRIER | dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

2.1 点击进去 _dispatch_introspection_sync_begin

_dispatch_introspection_sync_begin(dispatch_queue_class_t dq)
{
	if (!_dispatch_introspection.debug_queue_inversions) return;
	// 这些都是准备工作
	_dispatch_introspection_order_record(dq._dq);
}

3.搜索 _dispatch_lane_barrier_sync_invoke_and_complete

static void
_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
		void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
	// 首先会执行这个函数
	_dispatch_sync_function_invoke_inline(dq, ctxt, func);
	_dispatch_trace_item_complete(dc);
	if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
		// 内部其实就是唤醒队列
		return _dispatch_lane_barrier_complete(dq, 0, 0);
	}

	const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
			DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
			DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
			DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
	uint64_t old_state, new_state;

	// similar to _dispatch_queue_drain_try_unlock
	  // 原子锁。检查dq->dq_state与old_state是否相等,如果相等把new_state赋值给dq->dq_state,如果不相等,把dq_state赋值给old_state。
	   // 串行队列走到这里,dq->dq_state与old_state是相等的,会把new_state也就是闭包里的赋值的值给dq->dq_state
	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
		new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
		new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
		new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
		if (unlikely(old_state & fail_unlock_mask)) {
			os_atomic_rmw_loop_give_up({
				return _dispatch_lane_barrier_complete(dq, 0, 0);
			});
		}
	});
	if (_dq_state_is_base_wlh(old_state)) {
		_dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
	}
}

_dispatch_sync_f_slow 死锁

1.进入_dispatch_sync_f_slow,当前的主队列是挂起、阻塞的

_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
		dispatch_function_t func, uintptr_t top_dc_flags,
		dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
	dispatch_queue_t top_dq = top_dqu._dq;
	dispatch_queue_t dq = dqu._dq;
	if (unlikely(!dq->do_targetq)) {
		return _dispatch_sync_function_invoke(dq, ctxt, func);
	}

	pthread_priority_t pp = _dispatch_get_priority();
	struct dispatch_sync_context_s dsc = {
		.dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
		.dc_func     = _dispatch_async_and_wait_invoke,
		.dc_ctxt     = &dsc,
		.dc_other    = top_dq,
		.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
		.dc_voucher  = _voucher_get(),
		.dsc_func    = func,
		.dsc_ctxt    = ctxt,
		.dsc_waiter  = _dispatch_tid_self(),
	};
    // 往一个队列中 加入任务,会push加入主队列,进入_dispatch_trace_item_push
	_dispatch_trace_item_push(top_dq, &dsc);
	__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);

	if (dsc.dsc_func == NULL) {
		dispatch_queue_t stop_dq = dsc.dc_other;
		return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
	}

	_dispatch_introspection_sync_begin(top_dq);
	_dispatch_trace_item_pop(top_dq, &dsc);
	_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
			DISPATCH_TRACE_ARG(&dsc));
}
  • 往一个队列中 加入任务,会push加入主队列,进入_dispatch_trace_item_push
static inline void
_dispatch_trace_item_push(dispatch_queue_class_t dqu, dispatch_object_t _tail)
{
    if (unlikely(DISPATCH_QUEUE_PUSH_ENABLED())) {
        _dispatch_trace_continuation(dqu._dq, _tail._do, DISPATCH_QUEUE_PUSH);
    }

    _dispatch_trace_item_push_inline(dqu._dq, _tail._do);
    _dispatch_introspection_queue_push(dqu, _tail);
}
  • 进入DISPATCH_WAIT_FOR_QUEUE,判断dq是否为正在等待的队列,然后给出一个状态state,然后将dq的状态和当前任务依赖的队列进行匹配
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
// 判断dq是否为正在等待的队列,然后给出一个状态state
    uint64_t dq_state = _dispatch_wait_prepare(dq);
    if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
        DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
                "dispatch_sync called on queue "
                "already owned by current thread");
    }
   .....省略 ....
}
  • 判断dq是否为正在等待的队列,然后给出一个状态state,
  • 然后将dq的状态和当前任务依赖的队列进行匹配

进入_dq_state_drain_locked_by -> _dispatch_lock_is_locked_by源码

DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
    // equivalent to _dispatch_lock_owner(lock_value) == tid
    //异或操作:相同为0,不同为1,如果相同,则为0,0 &任何数都为0
    //即判断 当前要等待的任务 和 正在执行的任务是否一样,通俗的解释就是 执行和等待的是否在同一队列
    return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}

如果当前等待的和正在执行的是同一个队列,即判断线程ID是否相等,如果相等,则会造成死锁

dispatch_sync总结

  • 同步函数的底层实现实际是同步栅栏函数
  • 同步函数中如果当前正在执行的队列和等待的是同一个队列,形成相互等待的局面,则会造成死锁
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值