在上一期,我们看到了多个输入如何被封装,然后被塞入llm_engine中,接下来,通过_run_engine,我们要进行输入的处理了。
def _run_engine(
self, *, use_tqdm: bool
) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
# Initialize tqdm.
if use_tqdm:
num_requests = self.llm_engine.get_num_unfinished_requests()
pbar = tqdm(
total=num_requests,
desc="Processed prompts",
dynamic_ncols=True,
postfix=f"Generation Speed: {0:.2f} toks/s",
)
# Run the engine.
outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = []
total_toks = 0
while self.llm_engine.has_unfinished_requests():
step_outputs = self.llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
if use_tqdm:
if isinstance(output, RequestOutput):
# Calculate tokens only for RequestOutput
total_toks += sum(
len(stp.token_ids) for stp in output.outputs)
spd = total_toks / pbar.format_dict["elapsed"]
pbar.postfix = f"Generation Speed: {spd:.2f} toks/s"
pbar.update(1)
if use_tqdm:
pbar.close()
# Sort the outputs by request ID.
# This is necessary because some requests may be finished earlier than
# its previous requests.
return sorted(outputs, key=lambda x: int(x.request_id))
其中,主要的函数有两个,一个是
seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()
另一个是 model_exector.execute_model
output = self.model_executor.execute_model(
execute_model_req=execute_model_req)
该函数的调用非常深,涉及到底层缓存的管理。
看到这里,我已经失去耐心了,meta几个文件的代码怎么被搞的这么复杂。
所以,我们接着看,关于model_executor.execute_model最重要的内容,那就是模型的推理。对应函数
output = self.model_executor.execute_model(
execute_model_req=execute_model_req)
该函数具体包含两部分model_executable以及compute_logits,以及sample
hidden_states = model_executable(
input_ids=input_tokens,
positions=input_positions,
kv_caches=kv_caches,
attn_metadata=attn_metadata,
**multi_modal_kwargs,
)
# Compute the logits.
logits = self.model.compute_logits(hidden_states, sampling_metadata)
# Only perform sampling in the driver worker.
if not self.is_driver_worker:
return None
# Sample the next token.
output = self.model.sample(
logits=logits,
sampling_metadata=sampling_metadata,
)
这里,hidden_states尺寸为 [26,768], logits的维度为[4, 50272], output对应的数据类型为vllm.sequence.SamplerOutput. 这里4对应的就是输入数量,26对应的是输入的总token数。
request_outputs = self._process_model_outputs(
output, scheduler_outputs.scheduled_seq_groups,
scheduler_outputs.ignored_seq_groups, seq_group_metadata_list)
# Log stats.
self.do_log_stats(scheduler_outputs, output)
在经过N次迭代后,我们完成了最终推理
Pdb) p request_outputs
[RequestOutput(request_id=0, prompt='Hello, my name is', prompt_token_ids=[2, 31414, 6, 127, 766, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' Joel', token_ids=[8966], cumulative_logprob=-7.448906421661377, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3341556, last_token_time=1721867331.3341556, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9262118339538574, finished_time=None), lora_request=None), RequestOutput(request_id=1, prompt='The president of the United States is', prompt_token_ids=[2, 133, 394, 9, 5, 315, 532, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' speaking', token_ids=[2686], cumulative_logprob=-5.128592491149902, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3390863, last_token_time=1721867331.3390863, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.921281099319458, finished_time=None), lora_request=None), RequestOutput(request_id=2, prompt='The capital of France is', prompt_token_ids=[2, 133, 812, 9, 1470, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' a', token_ids=[10], cumulative_logprob=-1.876983642578125, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.3396564, last_token_time=1721867331.3396564, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9207110404968262, finished_time=None), lora_request=None), RequestOutput(request_id=3, prompt='The future of AI is', prompt_token_ids=[2, 133, 499, 9, 4687, 16], prompt_logprobs=None, outputs=[CompletionOutput(index=0, text=' facing', token_ids=[2114], cumulative_logprob=-7.601373195648193, logprobs=None, finish_reason=None, stop_reason=None)], finished=False, metrics=RequestMetrics(arrival_time=1721867331.340093, last_token_time=1721867331.340093, first_scheduled_time=1721867333.2603674, first_token_time=1721868233.8055446, time_in_queue=1.9202744960784912, finished_time=None), lora_request=None)]
(Pdb)
回顾之前的流程,我觉得几个值得观察的地方。
1. execute_model第一次被调用的地方
第一次execute_model()函数的调用, 是在llm engine的构造时,有一次warm up,会模拟推理,按照最大batch数走一遍流程。
调用的堆栈信息如下
2. slot mapping在prefill和decode中不同状态
slot mapping表示的是每个kv对对应的物理id,一个token对应的一个slot。
对于prefill data,还会记录当前处理的最大prefill长度(max_query_len),不太清楚具体用意。
对于序列的第一次处理是prefill,之后就进入decode阶段,其中:
decode阶段,每次每个sequence占用一个slot mapping,batch为4时,就占用四个,并且每次迭代后,slot mapping的index增加1,此外,可以看到每个序列对应一个block_table,尺寸为【128】, max_decode_seq_length也在一直增加。
当max_decode_seq_leng增加到17时,第二个block table占据了2个元素。对应的slot_mapping在此前一直每次+1,而这一次从542303 变成了542240. 额, 542304-16 - 542240 = 48