变态的libDispatch结构分析-全局队列异步任务处理过程


调用   dispatch_async_f(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), reinterpret_cast<void *>(item), _WorkItemRunner);

1. 继续回顾globleQueue

我们以default优先级为例

queue结构

    {  
        .do_vtable = &_dispatch_queue_root_vtable,  
        .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,  
        .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,  
        .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,  
        .do_ctxt = &_dispatch_root_queue_contexts[2],  
      
        .dq_label = "com.apple.root.default-priority",  
        .dq_running = 2,  
        .dq_width = UINT32_MAX,//# define UINT32_MAX        (4294967295U)  
        .dq_serialnum = 6,  
    },  

vtable结构

static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = {
	.do_type = DISPATCH_QUEUE_GLOBAL_TYPE,
	.do_kind = "global-queue",
	.do_debug = dispatch_queue_debug,
	.do_probe = _dispatch_queue_wakeup_global,
};

do_ctxt结构:

	{
		.dgq_thread_mediator = &_dispatch_thread_mediator[2],
		.dgq_thread_pool_size = MAX_THREAD_COUNT,
	},

do_ctxt的mediator结构:

	{
		.do_vtable = &_dispatch_semaphore_vtable,
		.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
		.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
	},

mediator的vtable结构:

const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = {
	.do_type = DISPATCH_SEMAPHORE_TYPE,
	.do_kind = "semaphore",
	.do_dispose = _dispatch_semaphore_dispose,
	.do_debug = _dispatch_semaphore_debug,
};

2. 异步入口dispatch_async_f

2.1. dispatch_continuation_t

// If dc_vtable is less than 127, then the object is a continuation.
// Otherwise, the object has a private layout and memory management rules. The
// first two words must align with normal objects.
#define DISPATCH_CONTINUATION_HEADER(x)    \
    const void *                do_vtable;    \
    struct x *volatile    do_next;    \
    dispatch_function_t    dc_func;    \
    void *                        dc_ctxt

struct dispatch_continuation_s {
	DISPATCH_CONTINUATION_HEADER(dispatch_continuation_s);
	dispatch_group_t	dc_group;
	void *				dc_data[3];
};

libdispatch将异步任务封装成了dispatch_continuation_s;

上面的一段注释很重要:

// If dc_vtable is less than 127, then the object is a continuation.
// Otherwise, the object has a private layout and memory management rules. The
// first two words must align with normal objects.

dc_vtable应该表示的是dispatch_continuation_s中的vtable;

如果dc_vtable < 127 才表示这个object是一个continuation;否则表示是一个私有的布局,这个私有的布局表示它可能是一个队列或者其他的结构,这种私有的布局的结构要求前面2个字,也就是前32位必须与普通object是对齐的;

以后在队列和任务的调度过程中,会发现这个do_vtable用来判定调度的对象是任务和队列;


2.2. dispatch_async_f

void
dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
	dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly());

	// unlike dispatch_sync_f(), we do NOT need to check the queue width,
	// the "drain" function will do this test

	if (!dc) {
		return _dispatch_async_f_slow(dq, ctxt, func);
	}

	dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
	dc->dc_func = func;
	dc->dc_ctxt = ctxt;

	_dispatch_queue_push(dq, dc);
}
static inline dispatch_continuation_t
_dispatch_continuation_alloc_cacheonly(void)
{
    dispatch_continuation_t dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
    if (dc) {
        _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
    }
    return dc;
}

先从_dispatch_continuation_alloc_cacheonly取出是否有cache的dc;若有dc存在,那么将dc填充,然后push到dq中去;假设我们第一次进来,没有cache,那么进入到:

	if (!dc) {
		return _dispatch_async_f_slow(dq, ctxt, func);
	}

2.3. _dispatch_async_f_slow

static void
_dispatch_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func)
{
	dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap());

	dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
	dc->dc_func = func;
	dc->dc_ctxt = context;

	_dispatch_queue_push(dq, dc);
}
dispatch_continuation_t
_dispatch_continuation_alloc_from_heap(void)
{
    static dispatch_once_t pred;
    dispatch_continuation_t dc;

    dispatch_once_f(&pred, NULL, _dispatch_ccache_init);

    while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
        sleep(1);
    }

    return dc;
}

(a). 通过_dispatch_continuation_alloc_from_heap从堆栈中分配一个dc,

(b). 填充dc,切记它填充的vtable =DISPATCH_OBJ_ASYNC_BIT;

#define DISPATCH_OBJ_ASYNC_BIT    0x1
(c).  _dispatch_queue_push,将dc push到dq中去;


2.4. _dispatch_queue_push

#define _dispatch_queue_push(x, y) _dispatch_queue_push_list((x), (y), (y))
static inline void
_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, dispatch_object_t _tail)
{
	struct dispatch_object_s *prev, *head = _head._do, *tail = _tail._do;

	tail->do_next = NULL;
	prev = fastpath(dispatch_atomic_xchg(&dq->dq_items_tail, tail));
	if (prev) {
		// if we crash here with a value less than 0x1000, then we are at a known bug in client code
		// for example, see _dispatch_queue_dispose or _dispatch_atfork_child
		prev->do_next = head;
	} else {
		_dispatch_queue_push_list_slow(dq, head);
	}
}


进入到_dispatch_queue_push_list,_head/_tail都是dc;

将tail->do_next设置为NULL;

然后将

	prev = fastpath(dispatch_atomic_xchg(&dq->dq_items_tail, tail));

这条语句干了这么两件事:

(a). 将dc插入到dq->dq_items_tail上;也就是tail和dq->dq_items_tail呼唤;

(b). 返回dq->dq_items_tail的值;


若发现prev !=NULL,表示dq中存在任务,prev->do_next = head;表示将Head插入到prev的后面;??,然后返回;

否则,进入到        _dispatch_queue_push_list_slow(dq, head);

2.5. _dispatch_queue_push_list_slow

DISPATCH_NOINLINE 
void
_dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj)
{
	// The queue must be retained before dq_items_head is written in order
	// to ensure that the reference is still valid when _dispatch_wakeup is
	// called. Otherwise, if preempted between the assignment to
	// dq_items_head and _dispatch_wakeup, the blocks submitted to the
	// queue may release the last reference to the queue when invoked by
	// _dispatch_queue_drain. <rdar://problem/6932776>
	_dispatch_retain(dq);
	dq->dq_items_head = obj;
	_dispatch_wakeup(dq);
	_dispatch_release(dq);
}

(1). retain和release

又出现了_dispatch_retain(dq);_dispatch_release(dq); 这个是控制错误的,但是为什么,还不是很清楚:

void
_dispatch_retain(dispatch_object_t dou)
{
	struct dispatch_object_s *obj = DO_CAST(dou);

	if (obj->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) {
		return; // global object
	}
	if ((dispatch_atomic_inc(&obj->do_ref_cnt) - 1) == 0) {
		DISPATCH_CLIENT_CRASH("Resurrection of an object");
	}
}

因为GlobalQueue的do_ref_cnt就是DISPATCH_OBJECT_GLOBAL_REFCNT;因此其实它没干啥事;


(2). 插入节点

dq->dq_items_head = obj;

(3). 唤醒队列

	_dispatch_wakeup(dq);

2.6. _dispatch_wakeup

dispatch_queue_t
_dispatch_wakeup(dispatch_object_t dou)
{
	dispatch_queue_t tq;

	if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
		return NULL;
	}
	if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) {
		return NULL;
	}

	if (!_dispatch_trylock(dou._do)) {
		return NULL;
	}
	_dispatch_retain(dou._do);
	tq = dou._do->do_targetq;
	_dispatch_queue_push(tq, dou._do);
	return tq;	// libdispatch doesn't need this, but the Instrument DTrace probe does
}

(1). 判定任务是否是suspended,直接给出结果:不是;之前解释过了;

	if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
		return NULL;
	}

(2). 
	if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) {
		return NULL;
	}

dx_probe 其实就是调用了dq的dx_probe:_dispatch_queue_wakeup_global

若_dispatch_queue_wakeup_global返回是false且dq是空的,那么直接return了;

带回儿我们进入到_dispatch_queue_wakeup_global。

我们姑且在这里留个标签,带回儿分析完_dispatch_queue_wakeup_global之后回到这个标签上来:

标签A(3)


2.7. _dispatch_queue_wakeup_global

bool
_dispatch_queue_wakeup_global(dispatch_queue_t dq)
{
	static dispatch_once_t pred;
	struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
	pthread_t pthr;
	int r, t_count;

	if (!dq->dq_items_tail) {
		return false;
	}

	_dispatch_safe_fork = false;

	dispatch_debug_queue(dq, __PRETTY_FUNCTION__);

	dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);

	if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
		goto out;
	}

	do {
		t_count = qc->dgq_thread_pool_size;
		if (!t_count) {
			_dispatch_debug("The thread pool is full: %p", dq);
			goto out;
		}
	} while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1));

	while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) {
		if (r != EAGAIN) {
			(void)dispatch_assume_zero(r);
		}
		sleep(1);
	}
	r = pthread_detach(pthr);
	(void)dispatch_assume_zero(r);

out:
	return false;
}

(1). dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);

表示只执行_dispatch_root_queues_init一次,也就是初始化一次;

static void
_dispatch_root_queues_init(void *context __attribute__((unused)))
{
#if USE_POSIX_SEM
	int ret;
#endif
	int i;

	for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
#if USE_POSIX_SEM
		/* XXXRW: POSIX semaphores don't support LIFO? */
		ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0);
		(void)dispatch_assume_zero(ret);
#endif
	}
}

_dispatch_root_queues_init就是将所有的_dispatch_thread_mediator[i].dsema_sem 初始化了一遍,初始值为0;

毕竟接下来就要用到,sem相关的远离参看:bionic semaphore学习


(2).  信号检测


struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
		goto out;
	}

而这个qc就是:

struct dispatch_root_queue_context_s {
	uint32_t dgq_pending;
	uint32_t dgq_thread_pool_size;
	dispatch_semaphore_t dgq_thread_mediator;
};
    {
        .dgq_thread_mediator = &_dispatch_thread_mediator[2],
        .dgq_thread_pool_size = MAX_THREAD_COUNT,
    },
    {
        .do_vtable = &_dispatch_semaphore_vtable,
        .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
        .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
    },

我们进入到dispatch_semaphore_signal

long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	if (dispatch_atomic_inc(&dsema->dsema_value) > 0) {
		return 0;
	}
	return _dispatch_semaphore_signal_slow(dsema);
}

dispatch_atomic_inc(&dsema->dsema_value),在变态的libDispatch结构分析-semaphore中给出了说明:

若这个信号值大于等于0,表示这个信号值无竞争,有可用资源,直接返回0;

否则表示有waiter在等待这个资源,需要去signal,进入到了_dispatch_semaphore_signal_slow;唤醒等待的waiter就行,之后会再次提到;

我们的流程走到这,return 0;

(3). 修改线程池的大小

	do {
		t_count = qc->dgq_thread_pool_size;
		if (!t_count) {
			_dispatch_debug("The thread pool is full: %p", dq);
			goto out;
		}
	} while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1));
将线程池的大小减去1;
(4). 创建线程

	while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) {
		if (r != EAGAIN) {
			(void)dispatch_assume_zero(r);
		}
		sleep(1);
	}
	r = pthread_detach(pthr);
	(void)dispatch_assume_zero(r);
通过Pthread_create创建线程,然后进入到_dispatch_worker_thread;

2.8. _dispatch_worker_thread


创建线程后干的进入的第一个函数,记住这可是新线程了;

void *
_dispatch_worker_thread(void *context)
{
	dispatch_queue_t dq = context;
	struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
	sigset_t mask;
	int r;

	// workaround tweaks the kernel workqueue does for us
	r = sigfillset(&mask);
	(void)dispatch_assume_zero(r);
	r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL);
	(void)dispatch_assume_zero(r);

	do {
		_dispatch_worker_thread2(context);
		// we use 1 second to prevent the cpu usage is always high.
	} while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 1ull * NSEC_PER_SEC)) == 0);

	dispatch_atomic_inc(&qc->dgq_thread_pool_size);
	if (dq->dq_items_tail) {
		_dispatch_queue_wakeup_global(dq);
	}

	return NULL;
}



创建新线程,进入到_dispatch_worker_thread

	do {
		_dispatch_worker_thread2(context);
		// we use 1 second to prevent the cpu usage is always high.
	} while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 1ull * NSEC_PER_SEC)) == 0);

(a). _dispatch_worker_thread2(context); 待会儿细诉,它负责调度dq: 取出任务执行,或者调度队列中的其他队列(也就是队列中的object是dq对象);

(b). wait信号量

这里有个小bug, 我们调用的时候传入的dispatch_time(0, 1ull * NSEC_PER_SEC)

dispatch_time_t
dispatch_time(dispatch_time_t inval, int64_t delta)
{
	if (inval == DISPATCH_TIME_FOREVER) {
		return DISPATCH_TIME_FOREVER;
	}
	if ((int64_t)inval < 0) {
		// wall clock
		if (delta >= 0) {
			if ((int64_t)(inval -= delta) >= 0) {
				return DISPATCH_TIME_FOREVER;      // overflow
			}
			return inval;
		}
		if ((int64_t)(inval -= delta) >= -1) {
			// -1 is special == DISPATCH_TIME_FOREVER == forever
			return -2;      // underflow
		}
		return inval;
	}
	// mach clock
	delta = _dispatch_time_nano2mach(delta);
   	if (inval == 0) {
		inval = _dispatch_absolute_time();//inval为当前绝对时间值
	}
	if (delta >= 0) {
		if ((int64_t)(inval += delta) <= 0) { //原来在这里把delta加上去了
			return DISPATCH_TIME_FOREVER;      // overflow
		}
		return inval; //返回这个东东,坑爹啊;
	}
	if ((int64_t)(inval += delta) < 1) {
		return 1;       // underflow
	}
	return inval;
}

这个要对应7.(2)中检测信号,每次进来时,会先signal一下;

struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
		goto out;
}

再回到(b)中,

在调度完之后,会wait一下;在GCD中很多地方都能看到调度,或者唤醒;而这个siginal和wait则是为了控制线程数,当创建一个新的线程后,为了防止每次有队列插入任务都需要创建线程;特别是在线程调度特别频繁的时候;当一个线程调度结束,它会通过wait等待一下是否相同的队列有任务过来,如果在超时之前的这段时间中有任务进来,然后重新在本线程中调度,否则超时,退出循环,然后将线程池的 pool_size加1;

	dispatch_atomic_inc(&qc->dgq_thread_pool_size);//将线程池的 pool_size加1;
	if (dq->dq_items_tail) {
		_dispatch_queue_wakeup_global(dq); //有新的任务来临,再次唤醒;
	}
        //退出;

2.9. 新线程如何调度

新线程如何调度队中任务,_dispatch_worker_thread2(context);

void
_dispatch_worker_thread2(void *context)
{
	struct dispatch_object_s *item;
	dispatch_queue_t dq = context;
	struct dispatch_root_queue_context_s *qc = dq->do_ctxt;

	if (_dispatch_thread_getspecific(dispatch_queue_key)) {
		DISPATCH_CRASH("Premature thread recycling");
	}

	_dispatch_thread_setspecific(dispatch_queue_key, dq);
	qc->dgq_pending = 0;

	while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
		_dispatch_continuation_pop(item);
	}

	_dispatch_thread_setspecific(dispatch_queue_key, NULL);

	_dispatch_force_cache_cleanup();
}

(1). 为新线程设置私有数据dq

	_dispatch_thread_setspecific(dispatch_queue_key, dq);
	qc->dgq_pending = 0;

(2). 循环调度任务

	while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { //取出任务
		_dispatch_continuation_pop(item); //执行并释放;
	}

(3). 清除私有数据

_dispatch_thread_setspecific(dispatch_queue_key, NULL);

现在重点是取出任务和执行;分开讲解:取出任务在10,执行在11.


2.10. 取出任务 _dispatch_queue_concurrent_drain_one

struct dispatch_object_s *
_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq)
{
	struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul;

	// The mediator value acts both as a "lock" and a signal
	head = dispatch_atomic_xchg(&dq->dq_items_head, mediator); //取出头部
(sate a)
	if (slowpath(head == NULL)) {
		// The first xchg on the tail will tell the enqueueing thread that it
		// is safe to blindly write out to the head pointer. A cmpxchg honors
		// the algorithm.
		(void)dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL);
		_dispatch_debug("no work on global work queue");
		return NULL;
	}
(state b)
	if (slowpath(head == mediator)) {
		// This thread lost the race for ownership of the queue.
		//
		// The ratio of work to libdispatch overhead must be bad. This
		// scenario implies that there are too many threads in the pool.
		// Create a new pending thread and then exit this thread.
		// The kernel will grant a new thread when the load subsides.
		_dispatch_debug("Contention on queue: %p", dq);
		_dispatch_queue_wakeup_global(dq);
		return NULL;
	}

	// Restore the head pointer to a sane value before returning.
	// If 'next' is NULL, then this item _might_ be the last item.
	next = fastpath(head->do_next);

	if (slowpath(!next)) {
		dq->dq_items_head = NULL;
(state c)		
		if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) {
			// both head and tail are NULL now
			goto out;
		}

(state d)
                // There must be a next item now. This thread won't wait long.
		while (!(next = head->do_next)) {
			_dispatch_hardware_pause();
		}
	}
(state e)
	dq->dq_items_head = next;
	_dispatch_queue_wakeup_global(dq);
out:
	return head;
}

(a)(b). state a/state b,这两种情况是队列里没有任何任务;

(c). 取出head,且next不是NULL;

dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)
这句代码表示:若tail==head,那么表示dq只有一个任务就是head,我们将tail设置为NULL,然后返回true,goto out;否则表示有2个以上的任务,返回false;


(d). 既然跳入到d,那么说明肯定有两个以上的任务,只是还没有将第二个任务衔接到head上来;不过接下来这段代码真是没怎么看懂;

		while (!(next = head->do_next)) {
			_dispatch_hardware_pause();
		}


虽然注释说线程不会等太久,为什么? 不过无关大雅了,继续往下走;


(e).  更新dq的头部,然后唤醒dq,这个dq的唤醒位于当前新线程中,当当前新线程再此创建线程之后将返回;

	dq->dq_items_head = next;
	_dispatch_queue_wakeup_global(dq);

(f). 返回head:



2.11.  调度任务

_dispatch_continuation_pop(item);

之所以说调度任务,而不是执行任务;这其中的原因在于dq中取出来的Object可能是任务,也可能是其他对象,毕竟object只是一个Union,相当于C++中的基类指针;

static inline void
_dispatch_continuation_pop(dispatch_object_t dou)
{
	dispatch_continuation_t dc = dou._dc;
	dispatch_group_t dg;

	if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
		return _dispatch_queue_invoke(dou._dq);
	}

	// Add the item back to the cache before calling the function. This
	// allows the 'hot' continuation to be used for a quick callback.
	//
	// The ccache version is per-thread.
	// Therefore, the object has not been reused yet.
	// This generates better assembly.
	if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
		_dispatch_continuation_free(dc);
	}
	if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
		dg = dc->dc_group;
	} else {
		dg = NULL;
	}
	dc->dc_func(dc->dc_ctxt);
	if (dg) {
		dispatch_group_leave(dg);
		_dispatch_release(dg);
	}
}

(1). 判定Object类型

	if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
		return _dispatch_queue_invoke(dou._dq);
	}

#define DISPATCH_OBJ_IS_VTABLE(x)	((unsigned long)(x)->do_vtable > 127ul)

在介绍libdispatch的基本结构的时候,我提到过vtable,do_vtable的指针值不同,会代表其所代表的结构;

若dou是一个任务,那么do_vtable通常设置为0ull,比如在2.2.小节封装dc的时候:

	dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;

若dou是一个队列,那么这个do_vtable会指向一个实际的内存,这个内存有一个表指明必要的函数指针;


所以,通过DISPATCH_OBJ_IS_VTABLE(x) 若发现dou是队列,则会进入_dispatch_queue_invoke(dou._dq),重新调度这个dou指向的队列;

若dou是任务(本篇中分析的流程)

(2). 执行任务

	if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
		_dispatch_continuation_free(dc);
	}
	if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
		dg = dc->dc_group;
	} else {
		dg = NULL;
	}
	dc->dc_func(dc->dc_ctxt);
	if (dg) {
		dispatch_group_leave(dg);
		_dispatch_release(dg);
	}
其他地方的判定都是通过宏来判定是group(一种group任务机制) 或者async,

然后执行任务:

dc->dc_func(dc->dc_ctxt);

顺便给出_dispatch_continuation_free的代码
static inline void
_dispatch_continuation_free(dispatch_continuation_t dc)
{
	dispatch_continuation_t prev_dc = _dispatch_thread_getspecific(dispatch_cache_key);
	dc->do_next = prev_dc;
	_dispatch_thread_setspecific(dispatch_cache_key, dc);
}
取出cache_key,这里是NULL,然后衔接在do_next上;接下来将cache_key关联为dc;

这就是GCD的异步方式,从插入任务到异步执行任务的代码流程。





  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值