iOS底层原理篇(十一)----GCD源码分析(一)

同步函数 dispatch_sync
  • 先来看一张图:
    死锁分析

  • 上面是死锁现象的分析,我们下面来看看底层的代码实现:

      void
      dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
      {
      	uintptr_t dc_flags = DC_FLAG_BLOCK;
      	if (unlikely(_dispatch_block_has_private_data(work))) {
      		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
      	}
      	_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
      }
      
      static void
      _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
      uintptr_t dc_flags)
      {
      	_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
      }
    
      static inline void
      _dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
      {
      	// 串行 来到这里 dq_width==1
      	if (likely(dq->dq_width == 1)) {
      		return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
      	}
      	//并发走的代码省略了......
      }
      
      static void
      _dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
      {
      	_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
      }
    
      static inline void
      _dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
      {
      	// 获取线程id   _dispatch_tid_self底层实现也是key value赋值
      	dispatch_tid tid = _dispatch_tid_self();
    
      	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
      		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
      	}
    
      	dispatch_lane_t dl = upcast(dq)._dl;
      	//死锁
      	//在这里,就是对死锁的判断 通过获取到的tid
      	if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
      		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
      		DC_FLAG_BARRIER | dc_flags);
      	}
    
      	if (unlikely(dl->do_targetq->do_targetq)) {
      		return _dispatch_sync_recurse(dl, ctxt, func,
      		DC_FLAG_BARRIER | dc_flags);
      	}
      	//如果不是死锁就会执行并调用
      	_dispatch_introspection_sync_begin(dl);
      	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
      	DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
      			dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
      }
    
      //造成死锁的判断条件,返回值bool:
      //dq:传进来的queue
      //tid:传进来的线程id
      static inline bool
      _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid)
      {
      	return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0);
      }
      
      static inline bool
      _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, uint32_t tid, uint64_t suspend_count) 
      {
      	uint64_t init  = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
      	//tid获取value
      	uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | _dispatch_lock_value_from_tid(tid) | (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
      	uint64_t old_state, new_state;
      	// 从os底层获取状态信息
      	return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
      		uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
      		if (old_state != (init | role)) {
      			os_atomic_rmw_loop_give_up(break);
      		}
      		new_state = value | role;
      	});
      }
      //如果造成了死锁,从底层判断的条件就会成立,会走if里面的代码:
      if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
      		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
      		DC_FLAG_BARRIER | dc_flags);
      }
    
      //还记得上面死锁图片中的调用堆栈吗?底层调用的就是这个方法!!!!
      static void
      _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
      dispatch_function_t func, uintptr_t top_dc_flags,
      dispatch_queue_class_t dqu, uintptr_t dc_flags)
      {
      	dispatch_queue_t top_dq = top_dqu._dq;
      	dispatch_queue_t dq = dqu._dq;
      	
      	if (unlikely(!dq->do_targetq)) {
      		return _dispatch_sync_function_invoke(dq, ctxt, func);
      	}
    
      	pthread_priority_t pp = _dispatch_get_priority();
      	struct dispatch_sync_context_s dsc = {
      		.dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
      		.dc_func     = _dispatch_async_and_wait_invoke,
      		.dc_ctxt     = &dsc,
      		.dc_other    = top_dq,
      		.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
      		.dc_voucher  = _voucher_get(),
      		.dsc_func    = func,
      		.dsc_ctxt    = ctxt,
      		.dsc_waiter  = _dispatch_tid_self(),
      	};
      	
      	//压栈
      	_dispatch_trace_item_push(top_dq, &dsc);
      	//死锁的核心代码   crash前调用的最后一个方法 堆栈信息__DISPATCH_WAIT_FOR_QUEUE__
      	__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
    
      	if (dsc.dsc_func == NULL) {
      		dispatch_queue_t stop_dq = dsc.dc_other;
      		return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
      	}
    
      	_dispatch_introspection_sync_begin(top_dq);
      	_dispatch_trace_item_pop(top_dq, &dsc);
      	_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
      	DISPATCH_TRACE_ARG(&dsc));
      }
    
      //
      static void
      __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
      {
      	//获取等待状态
      	uint64_t dq_state = _dispatch_wait_prepare(dq);
      	//死锁的crash
      	if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
      		DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
      		"dispatch_sync called on queue " 
      		"already owned by current thread");
      	}
      	....省略....
      }
      
      //这里肯定判断YES
      static inline bool
      _dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
      {
      	return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
      }
      static inline bool
      _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
      {
      	// ^ (异或运算法) 两个相同就会出现 0 否则为1
      	//lock_value和tid相同
      	return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
      }
    
异步函数 dispatch_async
void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)	
{
	/**
	typedef struct dispatch_continuation_s *dispatch_continuation_t;
	
	typedef struct dispatch_continuation_s {
		DISPATCH_CONTINUATION_HEADER(continuation);
	} *dispatch_continuation_t;
	
	#define DISPATCH_CONTINUATION_HEADER(x) \
		union { \
			const void *do_vtable; \ 
			uintptr_t dc_flags; \
		}; \
		//联合体  互斥原则
		union { \
			pthread_priority_t dc_priority; \
			int dc_cache_cnt; \
			uintptr_t dc_pad; \
		}; \
		struct voucher_s *dc_voucher; \
		struct dispatch_##x##_s *volatile do_next; \
		dispatch_function_t dc_func; \  //承担执行任务的对象,宏定义_dispatch_client_callout最终会以dc_func(dc_ctxt)的方式回调
		void *dc_ctxt; \  //存储了continuation对象的上下文数据,同样用于执行任务
		void *dc_data; \
		void *dc_other
	#endif
	并发任务会被包装成dispatch_continuation_s结构体对象 保存在队列的qos中
	*/
	dispatch_continuation_t dc = _dispatch_continuation_alloc();
	uintptr_t dc_flags = DC_FLAG_CONSUME;
	dispatch_qos_t qos;
	
	//这里面会把传进来的block(work)处理成dispatch_function_t类型的func,
	//并且赋值给dc->dc_func = func 进行函数式保存!
	//dispatch_function_t func = _dispatch_Block_invoke(work);
	qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
	
	_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
	dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
	if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
		_dispatch_trace_item_push(dqu, dc);
	}
#else
	(void)dc_flags;
#endif
	return dx_push(dqu._dq, dc, qos);
}

//dx_push 宏定义
//只有当do_vtable的值小于127时才表示变量是一个continuation,
//派发到主/串行队列的任务会被标记DISPATCH_OBJ_BARRIER_BIT屏障标记
#define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable)
#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
//看到这里有点懵逼.......dq_push是个什么玩意......

//原来是:
DISPATCH_VTABLE_INSTANCE(queue, //队列的基类
	.do_type        = _DISPATCH_QUEUE_CLUSTER,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
);

DISPATCH_VTABLE_INSTANCE(workloop,
	.do_type        = DISPATCH_WORKLOOP_TYPE,
	.do_dispose     = _dispatch_workloop_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_workloop_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_workloop_wakeup,
	.dq_push        = _dispatch_workloop_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
	.do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
	.do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);

#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane,
	.do_type        = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE,
	.do_dispose     = _dispatch_pthread_root_queue_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);
#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, lane,
	.do_type        = DISPATCH_QUEUE_MGR_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
#if DISPATCH_USE_MGR_THREAD
	.do_invoke      = _dispatch_mgr_thread,
#else
	.do_invoke      = _dispatch_object_no_invoke,
#endif

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_mgr_queue_wakeup,
	.dq_push        = _dispatch_mgr_queue_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, lane,
	.do_type        = DISPATCH_QUEUE_MAIN_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_main_queue_wakeup,
	.dq_push        = _dispatch_main_queue_push,
);

#if DISPATCH_COCOA_COMPAT
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, lane,
	.do_type        = DISPATCH_QUEUE_RUNLOOP_TYPE,
	.do_dispose     = _dispatch_runloop_queue_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_runloop_queue_wakeup,
	.dq_push        = _dispatch_lane_push,
);
#endif

DISPATCH_VTABLE_INSTANCE(source,
	.do_type        = DISPATCH_SOURCE_KEVENT_TYPE,
	.do_dispose     = _dispatch_source_dispose,
	.do_debug       = _dispatch_source_debug,
	.do_invoke      = _dispatch_source_invoke,

	.dq_activate    = _dispatch_source_activate,
	.dq_wakeup      = _dispatch_source_wakeup,
	.dq_push        = _dispatch_lane_push,
);

#if HAVE_MACH	
DISPATCH_VTABLE_INSTANCE(mach,
	.do_type        = DISPATCH_MACH_CHANNEL_TYPE,
	.do_dispose     = _dispatch_mach_dispose,
	.do_debug       = _dispatch_mach_debug,
	.do_invoke      = _dispatch_mach_invoke,

	.dq_activate    = _dispatch_mach_activate,
	.dq_wakeup      = _dispatch_mach_wakeup,
	.dq_push        = _dispatch_lane_push,
);
#endif // HAVE_MACH

//我们从这一堆的队列初始化中选一个去看吧!
//就看这个_dispatch_root_queue_push,这个流程是最直接的,因为这个是最基础的!!!
void _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
	dispatch_qos_t qos)
{
#if DISPATCH_USE_KEVENT_WORKQUEUE
	dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
	if (unlikely(ddi && ddi->ddi_can_stash)) {
		dispatch_object_t old_dou = ddi->ddi_stashed_dou;
		dispatch_priority_t rq_overcommit;
		rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;

		if (likely(!old_dou._do || rq_overcommit)) {
			dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq;
			dispatch_qos_t old_qos = ddi->ddi_stashed_qos;
			ddi->ddi_stashed_rq = rq;
			ddi->ddi_stashed_dou = dou;
			ddi->ddi_stashed_qos = qos;
			_dispatch_debug("deferring item %p, rq %p, qos %d",
				dou._do, rq, qos);
			if (rq_overcommit) {
				ddi->ddi_can_stash = false;
			}
			if (likely(!old_dou._do)) {
				return;
			}
			// push the previously stashed item
			qos = old_qos;
			rq = old_rq;
			dou = old_dou;
		}
	}
#endif
#if HAVE_PTHREAD_WORKQUEUE_QOS
	if (_dispatch_root_queue_push_needs_override(rq, qos)) {
		return _dispatch_root_queue_push_override(rq, dou, qos);
	}
#else
	(void)qos;
#endif
	//两个判断之后的调用,我们主线思维,当然是看这个方法....
	_dispatch_root_queue_push_inline(rq, dou, dou, 1);
}

static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
	dispatch_object_t _head, dispatch_object_t _tail, int n)
{
	struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
	//源码看到这里走了一个unlikely,没有方法了,os_mpsc_push_list底层的判断,
	//咱也不知道是啥,只能看看unlikely里面的方法了,我觉得他会走 o(╥﹏╥)o
	if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
		return _dispatch_root_queue_poke(dq, n, 0);
	}
}
//又是一堆的判断,让代码更强壮!!!!!!!
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
	/**
	static inline bool
	_dispatch_queue_class_probe(dispatch_lane_class_t dqu)
	{
		struct dispatch_object_s *tail;
		// seq_cst wrt atomic store to dq_state <rdar://problem/14637483>
		// seq_cst wrt atomic store to dq_flags <rdar://problem/22623242>
		tail = os_atomic_load2o(dqu._dl, dq_items_tail, ordered);
		return unlikely(tail != NULL);
	}
	*/
	if (!_dispatch_queue_class_probe(dq)) {
		return;
	}
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
	if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE))
#endif
	{
		//如果走这里就报错了,大概率正常情况应该不会走这里
		if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) {
			_dispatch_root_queue_debug("worker thread request still pending "
				"for global queue: %p", dq);
			return;
		}
	}
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
	return _dispatch_root_queue_poke_slow(dq, n, floor);
}
//
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
	//floor = 0 ,n = 1 这两个都是从前面传过来的
	int remaining = n; //remaining 表示要执行的任务数量 
	int r = ENOSYS;//ENOSYS:函数没有实现
	//队列初始化
	_dispatch_root_queues_init();
	//打印信息
	_dispatch_debug_root_queue(dq, __func__);
	//类型强转
	_dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);

#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
	if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
	{
		_dispatch_root_queue_debug("requesting new worker thread for global "
			"queue: %p", dq);
		r = _pthread_workqueue_addthreads(remaining,
			_dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
		(void)dispatch_assume_zero(r);
		return;
	}
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
	dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt;
	if (likely(pqc->dpq_thread_mediator.do_vtable)) {
		while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
			_dispatch_root_queue_debug("signaled sleeping worker for "
				"global queue: %p", dq);
			if (!--remaining) {
				return;
			}
		}
	}

	bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
	if (overcommit) {
		os_atomic_add2o(dq, dgq_pending, remaining, relaxed);
	} else {
		if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) {
			_dispatch_root_queue_debug("worker thread request still pending for "
				"global queue: %p", dq);
			return;
		}
	}

	int can_request, t_count;
	//os底层获取线程池中可创建线程数大小
	t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered);
	while的判断条件:!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count,
		t_count - remaining, &t_count, acquire),底层实现不得而知!
	do {
		//此处floor = 0,意思就是如果可创建的线程数小于0;
		can_request = t_count < floor ? 0 : t_count - floor;
		//请求创建的数量如果大于可创建的线程数,那么就会打印信息并且将remaining = can_request;
		//因为remaining = 1; 如果can_request = 0,那么就会走下面的第一个if判断,并且将remaining = 0;
		//remaining被置为0后,就会走下面的第二个if判断,
		//打印_dispatch_root_queue_debug("pthread pool is full for root queue: " "%p", dq);并返回!!!
		//进来时remaining>0,如果是小于can_request,下面的两个if判断都不会走,应该就会跳出while循环!!!
		if (remaining > can_request) {//
			_dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
				remaining, can_request);
			os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed);
			remaining = can_request;
		}
		
		if (remaining == 0) {
			_dispatch_root_queue_debug("pthread pool is full for root queue: "
				"%p", dq);
			return;
		}
	} while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count,
		t_count - remaining, &t_count, acquire));

	pthread_attr_t *attr = &pqc->dpq_thread_attr;
	pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
	if (unlikely(dq == &_dispatch_mgr_root_queue)) {
		pthr = _dispatch_mgr_root_queue_init();
	}
#endif
	//跳出上面的while循环后,此处对remaining循环创建
	do {
		_dispatch_retain(dq); // released in _dispatch_worker_thread
		//创建线程以及线程池的维护
		//r = pthread_create(pthr, attr, _dispatch_worker_thread, dq)
		while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
			if (r != EAGAIN) {//EAGAIN:再尝试一下
				(void)dispatch_assume_zero(r);
			}
			_dispatch_temporary_resource_shortage();
		}
	} while (--remaining);
#else
	(void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
}
  • 上面我们看到了队列的创建,以及对回调的block的func处理,函数式保存!那么,函数的调用是在什么时机呢?
    GCD任务块的调用时机

      //先来看看 _dispatch_lane_barrier_sync_invoke_and_complete 这个方法是不是很熟悉,
      //在上面死锁的代码中,在方法_dispatch_barrier_sync_f_inline(),
      //有一段注释说如果不是死锁就调用的方法就是这个方法!!!
      
      static void
      _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
      void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
      {
      	//重点代码 此举就是调用
      	_dispatch_sync_function_invoke_inline(dq, ctxt, func);
      	_dispatch_trace_item_complete(dc);
      	if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
      		return _dispatch_lane_barrier_complete(dq, 0, 0);
      	}
      	
      	//以下代码是对各种状态的设置
      	const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
      	DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
      	DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
      	DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
      	uint64_t old_state, new_state;
      	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
      		new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
      		new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
      		new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
      		if (unlikely(old_state & fail_unlock_mask)) {
      			os_atomic_rmw_loop_give_up({
      				return _dispatch_lane_barrier_complete(dq, 0, 0);
      			});
      		}
      	});
      	if (_dq_state_is_base_wlh(old_state)) {
      		_dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
      	}
      }
    
      static inline void
      _dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
      dispatch_function_t func)
      {
      	dispatch_thread_frame_s dtf;
      	_dispatch_thread_frame_push(&dtf, dq);
      	// f(ctxt) -- func(ctxt)
      	//堆栈调用的方法,之后就是-[ViewController viewDidLoad]_block_invoke
      	_dispatch_client_callout(ctxt, func);
      	_dispatch_perfmon_workitem_inc();
      	_dispatch_thread_frame_pop(&dtf);
      }		
      
      void
      _dispatch_client_callout(void *ctxt, dispatch_function_t f)
      {
      	_dispatch_get_tsd_base();
      	void *u = _dispatch_get_unwind_tsd();
      	if (likely(!u)) return f(ctxt);
      	_dispatch_set_unwind_tsd(NULL);
      	
      	//代码块的调用,以及传回的上下文ctxt
      	f(ctxt);
      	_dispatch_free_unwind_tsd();
      	_dispatch_set_unwind_tsd(u);
      }
      //这里是同步任务,我们根据demo的调用堆栈,在源码中也找到了他的一个调用的过程
      //其实异步任务是相通的
      //我们去看一下异步任务的调用堆栈,下面上一张图!!!
    

异步任务堆栈调用信息

	//首先看这个方法的调用:
	void
	_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic,
	dispatch_invoke_flags_t flags)
	{
		_dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2);
	}
	//上面方法的参数里传递了一个静态方法_dispatch_lane_invoke2
	static inline dispatch_queue_wakeup_target_t
	_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic,
	dispatch_invoke_flags_t flags, uint64_t *owned)
	{
		dispatch_queue_t otq = dq->do_targetq;
		dispatch_queue_t cq = _dispatch_queue_get_current();

		if (unlikely(cq != otq)) {
			return otq;
		}
		//不开启线程
		if (dq->dq_width == 1) {
			return _dispatch_lane_serial_drain(dq, dic, flags, owned);
		}
		//去开启新线程
		return _dispatch_lane_concurrent_drain(dq, dic, flags, owned);
	}

	//并发
	static dispatch_queue_wakeup_target_t
	_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu,
	dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
	uint64_t *owned)	
	{
		return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false);
	}
	//串行
	dispatch_queue_wakeup_target_t
	_dispatch_lane_serial_drain(dispatch_lane_class_t dqu,
	dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
	uint64_t *owned)
	{
		flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN;
		return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true);
	}
	
	//并发与串行调的同一个方法
	static dispatch_queue_wakeup_target_t
	_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic,
	dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain)
	{
		dispatch_queue_t orig_tq = dq->do_targetq;
		dispatch_thread_frame_s dtf;
		struct dispatch_object_s *dc = NULL, *next_dc;
		uint64_t dq_state, owned = *owned_ptr;

		if (unlikely(!dq->dq_items_tail)) return NULL;

		_dispatch_thread_frame_push(&dtf, dq);
		if (serial_drain || _dq_state_is_in_barrier(owned)) {
			// we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL`
			// but width can change while draining barrier work items, so we only
			// convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER`
			owned = DISPATCH_QUEUE_IN_BARRIER;
		} else {
			owned &= DISPATCH_QUEUE_WIDTH_MASK;
		}

		dc = _dispatch_queue_get_head(dq);
		//直接goto跳转
		goto first_iteration;
		......... //代码省略,如果dc不存在的处理

	first_iteration:
			dq_state = os_atomic_load(&dq->dq_state, relaxed);
			判断状态是否是挂起
			if (unlikely(_dq_state_is_suspended(dq_state))) {
				break;
			}
			//tq不是目标tq.......
			if (unlikely(orig_tq != dq->do_targetq)) {
				break;
			}
			//此处就是串行与并发的处理
			//serial_drain 如果是_dispatch_lane_serial_drain调用,传进来的就是true
			//如果是_dispatch_lane_concurrent_drain调用,传进来的就是false
			if (serial_drain || _dispatch_object_is_barrier(dc)) {
				//serial_drain为true
				if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) {//栅栏
					if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) {
						//return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;
						goto out_with_no_width;
					}
					owned = DISPATCH_QUEUE_IN_BARRIER;
				}
				//同步任务barrier
				if (_dispatch_object_is_sync_waiter(dc) &&!(flags & DISPATCH_INVOKE_THREAD_BOUND)) {
					dic->dic_barrier_waiter = dc;
					goto out_with_barrier_waiter;
				}
				next_dc = _dispatch_queue_pop_head(dq, dc);
			} else {
				//serial_drain为false
				if (owned == DISPATCH_QUEUE_IN_BARRIER) {//栅栏
					os_atomic_xor2o(dq, dq_state, owned, release);
					owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;永远等待
				} else if (unlikely(owned == 0)) {
					if (_dispatch_object_is_waiter(dc)) {
						//同步任务不会走
						// sync "readers" don't observe the limit
						_dispatch_queue_reserve_sync_width(dq);
					} else if (!_dispatch_queue_try_acquire_async(dq)) {
						//return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;永远等待
						goto out_with_no_width;
					}
					owned = DISPATCH_QUEUE_WIDTH_INTERVAL;
				}

				next_dc = _dispatch_queue_pop_head(dq, dc);
				if (_dispatch_object_is_waiter(dc)) {
					owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
					_dispatch_non_barrier_waiter_redirect_or_wake(dq, dc);
					continue;
				}
				//重定向
				if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) {
					owned -= DISPATCH_QUEUE_WIDTH_INTERVAL;
					_dispatch_continuation_redirect_push(dq, dc,
					_dispatch_queue_max_qos(dq));
					continue;
				}
			}
			//调用
			_dispatch_continuation_pop_inline(dc, dic, flags, dq);
		}

		if (owned == DISPATCH_QUEUE_IN_BARRIER) {
			// if we're IN_BARRIER we really own the full width too
			owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
		}
		if (dc) {
			owned = _dispatch_queue_adjust_owned(dq, owned, dc);
		}
		*owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
		*owned_ptr |= owned;
		_dispatch_thread_frame_pop(&dtf);
		return dc ? dq->do_targetq : NULL;

	out_with_no_width:
		*owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR;
		_dispatch_thread_frame_pop(&dtf);
		return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT;

	out_with_barrier_waiter:
		if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) {
	DISPATCH_INTERNAL_CRASH(0,
					"Deferred continuation on source, mach channel or mgr");
		}
		_dispatch_thread_frame_pop(&dtf);
		return dq->do_targetq;
	}

	static inline void
	_dispatch_continuation_pop_inline(dispatch_object_t dou,
	dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
	dispatch_queue_class_t dqu)
	{
		dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
		_dispatch_get_pthread_root_queue_observer_hooks();
		if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq);
		flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
		if (_dispatch_object_has_vtable(dou)) {//
			//这里是特殊情况,已经有了vtable,直接去调用
			dx_invoke(dou._dq, dic, flags);
		} else {
			//我们来看一般的情况
			_dispatch_continuation_invoke_inline(dou, flags, dqu);
		}
		if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq);
	}
	
	static inline void
	_dispatch_continuation_invoke_inline(dispatch_object_t dou,
	dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu)
	{
		dispatch_continuation_t dc = dou._dc, dc1;
		dispatch_invoke_with_autoreleasepool(flags, {
			uintptr_t dc_flags = dc->dc_flags;
	
			_dispatch_continuation_voucher_adopt(dc, dc_flags);
			if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
				_dispatch_trace_item_pop(dqu, dou);
			}
			if (dc_flags & DC_FLAG_CONSUME) {
				dc1 = _dispatch_continuation_free_cacheonly(dc);
			} else {
				dc1 = NULL;
			}
			if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) {//GROUP组
				_dispatch_continuation_with_group_invoke(dc);
			} else {
				//有点熟悉,堆栈调用的那个方法
				_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
				_dispatch_trace_item_complete(dc);
			}
			if (unlikely(dc1)) {
				_dispatch_continuation_free_to_cache_limit(dc1);
			}
		});
		_dispatch_perfmon_workitem_inc();
	}

	void
	_dispatch_client_callout(void *ctxt, dispatch_function_t f)
	{
		_dispatch_get_tsd_base();
		void *u = _dispatch_get_unwind_tsd();
		if (likely(!u)) return f(ctxt);
		_dispatch_set_unwind_tsd(NULL);
		//func调用
		f(ctxt);
		_dispatch_free_unwind_tsd();
		_dispatch_set_unwind_tsd(u);
	}
	//这里实现了任务块的调用
	//但是,我们看堆栈调用时,是下面的方法
	void
	_dispatch_call_block_and_release(void *block)
	{
		void (^b)(void) = block;//强转
		b();//调用
		Block_release(b);//release
	}
	//那么是在哪里调用了这个方法呢?_dispatch_call_block_and_release
	//还记得上面我们说dispatch_async函数实现,里面对qos的赋值操作
	//qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
	static inline dispatch_qos_t
	_dispatch_continuation_init(dispatch_continuation_t dc,
	dispatch_queue_class_t dqu, dispatch_block_t work,
	dispatch_block_flags_t flags, uintptr_t dc_flags)
	{
		void *ctxt = _dispatch_Block_copy(work);

		dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
		if (unlikely(_dispatch_block_has_private_data(work))) {
			dc->dc_flags = dc_flags;
			dc->dc_ctxt = ctxt;
			// will initialize all fields but requires dc_flags & dc_ctxt to be set
			return _dispatch_continuation_init_slow(dc, dqu, flags);
		}

		dispatch_function_t func = _dispatch_Block_invoke(work);
		if (dc_flags & DC_FLAG_CONSUME) {
			//_dispatch_call_block_and_release 函数式保存
			func = _dispatch_call_block_and_release;
		}
		return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
	}
	//所以再调用f(ctxt)时,会调用_dispatch_call_block_and_release这个函数实现!!!!
  • 这里面我们说了线程创建r = pthread_create(),任务块封装为func以及任务块的调用!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值