staticvoidadreno_dispatcher_work(structkthread_work*work){structadreno_dispatcher*dispatcher =container_of(work,structadreno_dispatcher, work);structadreno_device*adreno_dev =container_of(dispatcher,structadreno_device, dispatcher);structkgsl_device*device =KGSL_DEVICE(adreno_dev);conststructadreno_gpudev*gpudev =ADRENO_GPU_DEVICE(adreno_dev);int count =0;unsignedint i =0;mutex_lock(&dispatcher->mutex);/*
* As long as there are inflight commands, process retired comamnds from
* all drawqueues
*/// 遍历adreno_device的所有adreno_ringbufferfor(i =0; i < adreno_dev->num_ringbuffers; i++){// 获取adreno_ringbuffer的命令队列:adreno_dispatcher_drawqueuestructadreno_dispatcher_drawqueue*drawqueue =DRAWQUEUE(&adreno_dev->ringbuffers[i]);// [见第1节]
count +=adreno_dispatch_process_drawqueue(adreno_dev,
drawqueue);if(dispatcher->inflight ==0)break;}// [见第4节]kgsl_process_event_groups(device);/*
* dispatcher_do_fault() returns 0 if no faults occurred. If that is the
* case, then clean up preemption and try to schedule more work
*/if(dispatcher_do_fault(adreno_dev)==0){/* Clean up after preemption */if(gpudev->preemption_schedule)
gpudev->preemption_schedule(adreno_dev);/* Run the scheduler for to dispatch new commands */// [见第5节]_adreno_dispatcher_issuecmds(adreno_dev);}/*
* If there are commands pending, update the timers, otherwise release
* the power state to prepare for power down
*/if(dispatcher->inflight >0)_dispatcher_update_timers(adreno_dev);else_dispatcher_power_down(adreno_dev);mutex_unlock(&dispatcher->mutex);}
1. adreno_dispatch_process_drawqueue
staticintadreno_dispatch_process_drawqueue(structadreno_device*adreno_dev,structadreno_dispatcher_drawqueue*drawqueue){// 移除已过期的命令[见第2节]int count =adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);/* Nothing to do if there are no pending commands */// 如果adreno_ringbuffer的命令队列没有有效的命令, 则返回处理的已过期命令的数量if(adreno_drawqueue_is_empty(drawqueue))return count;/* Don't update the drawqueue timeout if it isn't active */// 判断命令对立的adreno_ringbuffer与adreno_device的是否是同一个if(!drawqueue_is_current(drawqueue))return count;/*
* If the current ringbuffer retired any commands then universally
* reset the timeout
*/// 如果当前adreno_ringbuffer有移除的命令, 则需要更新超时时间为2sif(count){
drawqueue->expires = jiffies +msecs_to_jiffies(adreno_drawobj_timeout);return count;}/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
* current drawobj and fault if it has expired
*/// 检测超时[见第3节]_adreno_dispatch_check_timeout(adreno_dev, drawqueue);return0;}
staticvoidretire_cmdobj(structadreno_device*adreno_dev,structkgsl_drawobj_cmd*cmdobj){structadreno_dispatcher*dispatcher =&adreno_dev->dispatcher;structkgsl_drawobj*drawobj =DRAWOBJ(cmdobj);structadreno_context*drawctxt =ADRENO_CONTEXT(drawobj->context);structadreno_ringbuffer*rb = drawctxt->rb;structkgsl_context*context = drawobj->context;uint64_t start =0, end =0;structretire_info info ={0};if(cmdobj->fault_recovery !=0){set_bit(ADRENO_CONTEXT_FAULT,&drawobj->context->priv);_print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);}// profiling这条命令GPU开始处理的起始和结束时间if(test_bit(CMDOBJ_PROFILE,&cmdobj->priv))cmdobj_profile_ticks(adreno_dev, cmdobj,&start,&end);
info.inflight =(int)dispatcher->inflight;
info.rb_id = rb->id;
info.wptr = rb->wptr;
info.timestamp = drawobj->timestamp;
info.sop = start;
info.eop = end;msm_perf_events_update(MSM_PERF_GFX, MSM_PERF_RETIRED,pid_nr(context->proc_priv->pid),
context->id, drawobj->timestamp,!!(drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME));if(drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME)atomic64_inc(&context->proc_priv->frame_count);/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
* rptr scratch out address. At this point GPU clocks turned off.
* So avoid reading GPU register directly for A3xx.
*/if(adreno_is_a3xx(adreno_dev)){trace_adreno_cmdbatch_retired(drawobj->context,&info,
drawobj->flags, rb->dispatch_q.inflight,
cmdobj->fault_recovery);}else{
info.rptr =adreno_get_rptr(rb);// tracetrace_adreno_cmdbatch_retired(drawobj->context,&info,
drawobj->flags, rb->dispatch_q.inflight,
cmdobj->fault_recovery);}// 记录这条被销毁的命令从提交到结束处理的时间:submit_ticks在sendcmd时被更新
drawctxt->submit_retire_ticks[drawctxt->ticks_index]=
end - cmdobj->submit_ticks;// 命令的索引加1
drawctxt->ticks_index =(drawctxt->ticks_index +1)%
SUBMIT_RETIRE_TICKS_SIZE;// 销毁kgsl_drawobjkgsl_drawobj_destroy(drawobj);}
3. _adreno_dispatch_check_timeout
staticvoid_adreno_dispatch_check_timeout(structadreno_device*adreno_dev,structadreno_dispatcher_drawqueue*drawqueue){structkgsl_device*device =KGSL_DEVICE(adreno_dev);structkgsl_drawobj*drawobj =DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);/* Don't timeout if the timer hasn't expired yet (duh) */// 判断是否2s超时if(time_is_after_jiffies(drawqueue->expires))return;/* Don't timeout if the IB timeout is disabled globally */if(!adreno_long_ib_detect(adreno_dev))return;/* Don't time out if the context has disabled it */// adreno_context不允许超时if(drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)return;pr_context(device, drawobj->context,"gpu timeout ctx %d ts %d\n",
drawobj->context->id, drawobj->timestamp);// 设置GPU超时adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);/*
* This makes sure dispatcher doesn't run endlessly in cases where
* we couldn't run recovery
*/// 重新更新超时时间
drawqueue->expires = jiffies +msecs_to_jiffies(adreno_drawobj_timeout);}
staticvoid_process_event_group(structkgsl_device*device,structkgsl_event_group*group, bool flush){structkgsl_event*event,*tmp;unsignedint timestamp;structkgsl_context*context;if(group ==NULL)return;
context = group->context;/*
* Sanity check to be sure that we we aren't racing with the context
* getting destroyed
*/if(WARN_ON(context !=NULL&&!_kgsl_context_get(context)))return;spin_lock(&group->lock);
group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,×tamp);if(!flush &&!_do_process_group(group->processed, timestamp))goto out;list_for_each_entry_safe(event, tmp,&group->events, node){if(timestamp_cmp(event->timestamp, timestamp)<=0)signal_event(device, event, KGSL_EVENT_RETIRED);elseif(flush)signal_event(device, event, KGSL_EVENT_CANCELLED);}
group->processed = timestamp;
out:spin_unlock(&group->lock);kgsl_context_put(context);}
5. _adreno_dispatcher_issuecmds
/**
* _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
* @adreno_dev: Pointer to the adreno device struct
*
* Issue as many commands as possible (up to inflight) from the pending contexts
* This function assumes the dispatcher mutex has been locked.
*/staticvoid_adreno_dispatcher_issuecmds(structadreno_device*adreno_dev){structadreno_dispatcher*dispatcher =&adreno_dev->dispatcher;int i;/* Leave early if the dispatcher isn't in a happy state */if(adreno_gpu_fault(adreno_dev)!=0)return;for(i =0; i <ARRAY_SIZE(dispatcher->jobs); i++)dispatcher_handle_jobs(adreno_dev, i);}