adreno源码系列(七)adreno_dispatcher

static void adreno_dispatcher_work(struct kthread_work *work)
{
	struct adreno_dispatcher *dispatcher =
		container_of(work, struct adreno_dispatcher, work);
	struct adreno_device *adreno_dev =
		container_of(dispatcher, struct adreno_device, dispatcher);
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int count = 0;
	unsigned int i = 0;

	mutex_lock(&dispatcher->mutex);

	/*
	 * As long as there are inflight commands, process retired comamnds from
	 * all drawqueues
	 */
    // 遍历adreno_device的所有adreno_ringbuffer
	for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
        // 获取adreno_ringbuffer的命令队列:adreno_dispatcher_drawqueue
		struct adreno_dispatcher_drawqueue *drawqueue =
			DRAWQUEUE(&adreno_dev->ringbuffers[i]);

        // [见第1节]
		count += adreno_dispatch_process_drawqueue(adreno_dev,
			drawqueue);
		if (dispatcher->inflight == 0)
			break;
	}

    // [见第4节]
	kgsl_process_event_groups(device);

	/*
	 * dispatcher_do_fault() returns 0 if no faults occurred. If that is the
	 * case, then clean up preemption and try to schedule more work
	 */
	if (dispatcher_do_fault(adreno_dev) == 0) {

		/* Clean up after preemption */
		if (gpudev->preemption_schedule)
			gpudev->preemption_schedule(adreno_dev);

		/* Run the scheduler for to dispatch new commands */
        // [见第5节]
		_adreno_dispatcher_issuecmds(adreno_dev);
	}

	/*
	 * If there are commands pending, update the timers, otherwise release
	 * the power state to prepare for power down
	 */
	if (dispatcher->inflight > 0)
		_dispatcher_update_timers(adreno_dev);
	else
		_dispatcher_power_down(adreno_dev);

	mutex_unlock(&dispatcher->mutex);
}

1. adreno_dispatch_process_drawqueue

static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
		struct adreno_dispatcher_drawqueue *drawqueue)
{
    // 移除已过期的命令[见第2节]
	int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);

	/* Nothing to do if there are no pending commands */
    // 如果adreno_ringbuffer的命令队列没有有效的命令, 则返回处理的已过期命令的数量
	if (adreno_drawqueue_is_empty(drawqueue))
		return count;

	/* Don't update the drawqueue timeout if it isn't active */
    // 判断命令对立的adreno_ringbuffer与adreno_device的是否是同一个
	if (!drawqueue_is_current(drawqueue))
		return count;

	/*
	 * If the current ringbuffer retired any commands then universally
	 * reset the timeout
	 */
    // 如果当前adreno_ringbuffer有移除的命令, 则需要更新超时时间为2s
	if (count) {
		drawqueue->expires = jiffies +
			msecs_to_jiffies(adreno_drawobj_timeout);
		return count;
	}

	/*
	 * If we get here then 1) the ringbuffer is current and 2) we haven't
	 * retired anything.  Check to see if the timeout if valid for the
	 * current drawobj and fault if it has expired
	 */
    // 检测超时[见第3节]
	_adreno_dispatch_check_timeout(adreno_dev, drawqueue);
	return 0;
}

2. adreno_dispatch_retire_drawqueue

static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
		struct adreno_dispatcher_drawqueue *drawqueue)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	int count = 0;

    // adreno_ringbuffer的命令队列不为空
	while (!adreno_drawqueue_is_empty(drawqueue)) {
        // 依次取出命令队列的头节点
		struct kgsl_drawobj_cmd *cmdobj =
			drawqueue->cmd_q[drawqueue->head];
		struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);

        // 判断命令是否已经处理:如果已经处理则直接返回
		if (!kgsl_check_timestamp(device, drawobj->context,
			drawobj->timestamp))
			break;

        // 否则销毁这条命令[见2.1节]
		retire_cmdobj(adreno_dev, cmdobj);

		dispatcher->inflight--;
		drawqueue->inflight--;

        // 当前命令队列头节点置空
		drawqueue->cmd_q[drawqueue->head] = NULL;

        // 同时将头节点向下移动一位
		drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
			ADRENO_DISPATCH_DRAWQUEUE_SIZE);

        // 销毁的命令数量加1
		count++;
	}

	return count;
}

2.1 retire_cmdobj

static void retire_cmdobj(struct adreno_device *adreno_dev,
		struct kgsl_drawobj_cmd *cmdobj)
{
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
	struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
	struct adreno_ringbuffer *rb = drawctxt->rb;
	struct kgsl_context *context = drawobj->context;
	uint64_t start = 0, end = 0;
	struct retire_info info = {0};

	if (cmdobj->fault_recovery != 0) {
		set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
		_print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);
	}

    // profiling这条命令GPU开始处理的起始和结束时间
	if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv))
		cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end);

	info.inflight = (int)dispatcher->inflight;
	info.rb_id = rb->id;
	info.wptr = rb->wptr;
	info.timestamp = drawobj->timestamp;
	info.sop = start;
	info.eop = end;

	msm_perf_events_update(MSM_PERF_GFX, MSM_PERF_RETIRED,
			       pid_nr(context->proc_priv->pid),
			       context->id, drawobj->timestamp,
			       !!(drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME));

	if (drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME)
		atomic64_inc(&context->proc_priv->frame_count);

	/*
	 * For A3xx we still get the rptr from the CP_RB_RPTR instead of
	 * rptr scratch out address. At this point GPU clocks turned off.
	 * So avoid reading GPU register directly for A3xx.
	 */
	if (adreno_is_a3xx(adreno_dev)) {
		trace_adreno_cmdbatch_retired(drawobj->context, &info,
			drawobj->flags, rb->dispatch_q.inflight,
			cmdobj->fault_recovery);
	} else {
		info.rptr = adreno_get_rptr(rb);
        // trace
		trace_adreno_cmdbatch_retired(drawobj->context, &info,
			drawobj->flags, rb->dispatch_q.inflight,
			cmdobj->fault_recovery);
	}

    // 记录这条被销毁的命令从提交到结束处理的时间:submit_ticks在sendcmd时被更新
	drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
		end - cmdobj->submit_ticks;

    // 命令的索引加1
	drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
		SUBMIT_RETIRE_TICKS_SIZE;

    // 销毁kgsl_drawobj
	kgsl_drawobj_destroy(drawobj);
}

3. _adreno_dispatch_check_timeout

static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
		struct adreno_dispatcher_drawqueue *drawqueue)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_drawobj *drawobj =
			DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);

	/* Don't timeout if the timer hasn't expired yet (duh) */
    // 判断是否2s超时
	if (time_is_after_jiffies(drawqueue->expires))
		return;

	/* Don't timeout if the IB timeout is disabled globally */
	if (!adreno_long_ib_detect(adreno_dev))
		return;

	/* Don't time out if the context has disabled it */
    // adreno_context不允许超时
	if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
		return;

	pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
		drawobj->context->id, drawobj->timestamp);

    // 设置GPU超时
	adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);

	/*
	 * This makes sure dispatcher doesn't run endlessly in cases where
	 * we couldn't run recovery
	 */
    // 重新更新超时时间
	drawqueue->expires = jiffies + msecs_to_jiffies(adreno_drawobj_timeout);
}

4. kgsl_process_event_groups

void kgsl_process_event_groups(struct kgsl_device *device)
{
	struct kgsl_event_group *group;

	read_lock(&device->event_groups_lock);
    // 遍历kgsl_device的所有kgsl_event_group
	list_for_each_entry(group, &device->event_groups, group)
		_process_event_group(device, group, false);
	read_unlock(&device->event_groups_lock);
}

4.1 _process_event_group

static void _process_event_group(struct kgsl_device *device,
		struct kgsl_event_group *group, bool flush)
{
	struct kgsl_event *event, *tmp;
	unsigned int timestamp;
	struct kgsl_context *context;

	if (group == NULL)
		return;

	context = group->context;

	/*
	 * Sanity check to be sure that we we aren't racing with the context
	 * getting destroyed
	 */
	if (WARN_ON(context != NULL && !_kgsl_context_get(context)))
		return;

	spin_lock(&group->lock);

	group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
		&timestamp);

	if (!flush && !_do_process_group(group->processed, timestamp))
		goto out;

	list_for_each_entry_safe(event, tmp, &group->events, node) {
		if (timestamp_cmp(event->timestamp, timestamp) <= 0)
			signal_event(device, event, KGSL_EVENT_RETIRED);
		else if (flush)
			signal_event(device, event, KGSL_EVENT_CANCELLED);

	}

	group->processed = timestamp;

out:
	spin_unlock(&group->lock);
	kgsl_context_put(context);
}

5. _adreno_dispatcher_issuecmds

/**
 * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
 * @adreno_dev: Pointer to the adreno device struct
 *
 * Issue as many commands as possible (up to inflight) from the pending contexts
 * This function assumes the dispatcher mutex has been locked.
 */
static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	int i;

	/* Leave early if the dispatcher isn't in a happy state */
	if (adreno_gpu_fault(adreno_dev) != 0)
		return;

	for (i = 0; i < ARRAY_SIZE(dispatcher->jobs); i++)
		dispatcher_handle_jobs(adreno_dev, i);
}

5.1 dispatcher_handle_jobs

static void dispatcher_handle_jobs(struct adreno_device *adreno_dev, int id)
{
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	unsigned long map[BITS_TO_LONGS(KGSL_MEMSTORE_MAX)];
	struct llist_node *requeue, *jobs;

	memset(map, 0, sizeof(map));

	requeue = llist_del_all(&dispatcher->requeue[id]);
	jobs = llist_del_all(&dispatcher->jobs[id]);

	dispatcher_handle_jobs_list(adreno_dev, id, map, requeue);
	dispatcher_handle_jobs_list(adreno_dev, id, map, jobs);
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值