server
./run_cluster.sh 698668586297dd30428a9f3bbcadb8a2034732ed13fdcd49218c8c3c9eb1cbd6 ip_address --head /home/prometheus/module-test/llama-factory/LLM-Model/Qwen2.5-Coder-7B-Instruct
./run_cluster.sh 698668586297dd30428a9f3bbcadb8a2034732ed13fdcd49218c8c3c9eb1cbd6 ip_address --worker /home/prometheus/module-test/llama-factory/LLM-Model/Qwen2.5-Coder-7B-Instruct
container
docker exec -it node /bin/bash
vllm serve /root/.cache/huggingface/ --tensor-parallel-size 1 --pipeline-parallel-size 1
use model in container
curl http://localhost:8000/v1/models
{
"object": "list",
"data": [
{
"id": "/root/.cache/huggingface/",
"object": "model",
"created": 1733322900,
"owned_by": "vllm",
"root": "/root/.cache/huggingface/",
"parent": null,
"max_model_len": 32768,
"permission": [
{
"id": "modelperm-19491f4adebc4075b2871384cf1c3a11",
"object": "model_permission",
"created": 1733322900,
"allow_create_engine": false,
"allow_sampling": true,
"allow_logprobs": true,
"allow_search_indices": false,
"allow_view": true,
"allow_fine_tuning": false,
"organization": "*",
"group": null,
"is_blocking": false
}
]
}
]
}
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "/root/.cache/huggingface/",
"prompt": "San Francisco is a",
"max_tokens": 7,
"temperature": 0
}'
{
"id": "cmpl-c3de13b28c57437f8de33169dc211516",
"object": "text_completion",
"created": 1733323053,
"model": "/root/.cache/huggingface/",
"choices": [
{
"index": 0,
"text": " city in California, United States.",
"logprobs": null,
"finish_reason": "length",
"stop_reason": null,
"prompt_logprobs": null
}
],
"usage": {
"prompt_tokens": 4,
"total_tokens": 11,
"completion_tokens": 7,
"prompt_tokens_details": null
}
}
OpenAI Compatible Server
vllm serve NousResearch/Meta-Llama-3-8B-Instruct --dtype auto --api-key token-abc123
how to call the server
from openai import OpenAI
client = OpenAI(
base_url="http://localhost:8000/v1",
api_key="token-abc123",
)
completion = client.chat.completions.create(
model="NousResearch/Meta-Llama-3-8B-Instruct",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices[0].message)
--served-model-name
The model name(s) used in the API.
If multiple names are provided, the server will respond to any of the provided names.
The model name in the model field of a response will be the first name in this list.
If not specified, the model name will be the same as the –model argument.
Noted that this name(s)will also be used in model_name tag content of prometheus metrics,
if multiple names provided, metricstag will take the first one.
--max-num-batched-tokens 5120
Maximum number of batched tokens per iteration.
decrease gpu memory usage
--gpu-memory-utilization 0.8 --max-model-len 4096 --enable-chunked-prefill=False