笔者在日常的工作中经常需要使用GPT4-o来进行VQA推理,特别是few-shot推理
现给出推理脚本demo
import base64
import json
from openai import OpenAI
# Initialize OpenAI client
client = OpenAI(api_key="yourkey", base_url="https://openai.com")
# Function to encode the image to base64
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Input and output file paths
input_file = 'input.jsonl'
output_file = 'output.jsonl'
# Few-shot examples for image-text interaction
few_shot_examples = [
{
"prompt": "Please describe the object inside the red rectangle in the image and explain why it affect ego car driving.",
"image_path": "",
"answer": "This object is a traffic sign with directional arrows and supplementary plates. The sign shows three arrows indicating lane directions: the left arrow directs traffic to turn left, the central arrow indicates that the lane goes straight ahead, and the right arrow signifies a lane for turning right. The plates below the arrows display speed limits and vehicle classification restrictions. The presence of this sign guides the ego car to choose the correct lane based on its intended route. If the ego car intends to proceed straight, it should align with the central arrow. The speed limit and vehicle classification signs instruct the driver to adhere to the indicated speed limit and lane usage based on the type of vehicle they are operating."
}
]
# Prepare few-shot examples for GPT-4 input format
few_shot_prompts = [
{
"role": "user",
"content": [
{
"type": "text",
"text": ex["prompt"]
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{encode_image(ex['image_path'])}",
"detail": "high"
}
},
{
"type": "text",
"text": ex["answer"]
}
]
}
for ex in few_shot_examples
]
# Open input and output files
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
for line in infile:
# Parse JSON data from current line
data = json.loads(line)
# Extract information
question_id = data['question_id']
image_path = data['image']
question_text = data['question']
base64_image = encode_image(image_path)
# Prepare messages for the GPT-4 API request
messages = [
{"role": "system", "content": "You are an autonomous driving expert, specializing in recognizing traffic scenes and making driving decisions."},
{"role": "user",
"content": [
{
"type": "text",
"text": question_text
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail": "high"
}
}
]
}
]
# Incorporate few-shot examples into the messages
messages.extend(few_shot_prompts)
# Request completion from GPT-4 API using few-shot method
response = client.chat.completions.create(
model="gpt-4o-2024-05-13",
messages=messages,
stream=False
)
# Extract model response from API response
model_response = response.choices[0].message.content
# Print model response (for debugging purposes)
print(f"Question ID: {question_id}\nQuestion: {question_text}\nAnswer: {model_response}\n")
# Add answer field to data
data['answer'] = model_response
# Write updated data back to output JSONL file
outfile.write(json.dumps(data) + '\n')
print("Processing completed. Answers added to each entry and saved to", output_file)