三步快速对接 Grok-4.1 API
准备工作
系统要求
Python: 3.7+ 或 Node.js 14+
网络: 能够访问 HTTPS 服务
依赖: requests (Python) 或 axios (Node.js)
API 基础信息
Base URL: https://api.aaigc.top
接口端点: /v1/chat/completions
请求方法: POST
内容类型: application/json
第一步:获取 API Key
1.1 登录控制台
访问 AAIGC 控制台
使用您的账号登录
进入 “API 管理” 页面
1.2 创建 API Key
步骤说明
- 点击 “创建 API Key” 按钮
- 输入应用名称(例如:“我的Grok应用”)
- 选择权限范围(建议选择 “标准权限”)
- 点击 “确认创建”
- 复制生成的 API Key(重要:请妥善保管,不要泄露)
1.3 验证 API Key
# Python 验证代码
import requests
API_KEY = "your-api-key"
BASE_URL = "https://api.aaigc.top/v1/chat/completions"
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
def test_api_key():
payload = {
"model": "grok-4.1",
"messages": [{"role": "user", "content": "Hello, Grok!"}]
}
try:
response = requests.post(BASE_URL, headers=headers, json=payload)
if response.status_code == 200:
print("API Key 验证成功!")
return True
elif response.status_code == 401:
print("API Key 验证失败:无效的密钥")
return False
else:
print(f"验证失败,状态码:{response.status_code}")
return False
except Exception as e:
print(f"网络错误:{e}")
return False
if __name__ == "__main__":
test_api_key()
第二步:编写基础调用代码
2.1 Python 实现
基础同步调用
import requests
import json
class GrokClient:
def __init__(self, api_key):
self.api_key = api_key
self.base_url = "https://api.aaigc.top/v1/chat/completions"
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
def chat(self, messages, model="grok-4.1", temperature=0.7, max_tokens=1024):
"""
基础对话接口
"""
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens
}
try:
response = requests.post(
self.base_url,
headers=self.headers,
json=payload,
timeout=30
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"API 请求失败: {e}")
return None
# 使用示例
if __name__ == "__main__":
# 初始化客户端
client = GrokClient("your-api-key")
# 构建对话消息
messages = [
{"role": "system", "content": "你是一个专业的AI助手,回答要简洁明了"},
{"role": "user", "content": "请介绍一下Grok-4.1的主要特性"}
]
# 发送请求
result = client.chat(messages)
# 处理响应
if result:
print("Grok 回复:", result["choices"][0]["message"]["content"])
print("使用统计:", result["usage"])
流式调用(实时聊天)
import requests
import json
import sseclient
class GrokStreamClient(GrokClient):
def stream_chat(self, messages, model="grok-4.1", temperature=0.7):
"""
流式对话接口,适用于实时聊天应用
"""
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": True
}
try:
response = requests.post(
self.base_url,
headers=self.headers,
json=payload,
stream=True,
timeout=60
)
client = sseclient.SSEClient(response)
full_response = ""
print("Grok 正在回复...")
for event in client.events():
if event.data == "[DONE]":
break
try:
data = json.loads(event.data)
delta = data["choices"][0]["delta"]
if "content" in delta:
content = delta["content"]
full_response += content
print(content, end="", flush=True)
except json.JSONDecodeError:
continue
print("\n")
return full_response
except requests.exceptions.RequestException as e:
print(f"流式请求失败: {e}")
return None
# 流式调用示例
if __name__ == "__main__":
client = GrokStreamClient("your-api-key")
messages = [
{"role": "system", "content": "你是一个故事创作专家"},
{"role": "user", "content": "请写一个关于AI助手的短篇故事,100字以内"}
]
client.stream_chat(messages)
2.2 Node.js 实现
const axios = require('axios');
class GrokClient {
constructor(apiKey) {
this.apiKey = apiKey;
this.baseUrl = 'https://api.aaigc.top/v1/chat/completions';
this.headers = {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json'
};
}
async chat(messages, options = {}) {
const payload = {
model: options.model || 'grok-4.1',
messages: messages,
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens || 1024,
...options
};
try {
const response = await axios.post(this.baseUrl, payload, {
headers: this.headers,
timeout: 30000
});
return response.data;
} catch (error) {
console.error('API Error:', error.response?.data || error.message);
throw error;
}
}
async streamChat(messages, options = {}, onChunk) {
const payload = {
model: options.model || 'grok-4.1',
messages: messages,
temperature: options.temperature || 0.7,
stream: true,
...options
};
const response = await axios.post(this.baseUrl, payload, {
headers: this.headers,
responseType: 'stream',
timeout: 60000
});
let fullResponse = '';
for await (const chunk of response.data) {
const lines = chunk.toString().split('\n');
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.substring(6);
if (data === '[DONE]') {
return fullResponse;
}
try {
const parsed = JSON.parse(data);
const delta = parsed.choices[0].delta;
if (delta.content) {
fullResponse += delta.content;
if (onChunk) {
onChunk(delta.content);
}
}
} catch (e) {
console.error('解析错误:', e);
}
}
}
}
return fullResponse;
}
}
// 使用示例
async function main() {
const client = new GrokClient('your-api-key');
const messages = [
{ role: 'system', content: '你是一个JavaScript专家' },
{ role: 'user', content: '如何优化Node.js应用性能?' }
];
try {
// 基础调用
const result = await client.chat(messages);
console.log('回复:', result.choices[0].message.content);
// 流式调用
console.log('\n流式回复:');
await client.streamChat(messages, {}, (chunk) => {
process.stdout.write(chunk);
});
} catch (error) {
console.error('错误:', error);
}
}
main();
第三步:优化和部署
3.1 错误处理优化
class GrokAPIError(Exception):
"""自定义API异常类"""
def __init__(self, message, error_type=None, code=None, status_code=None):
super().__init__(message)
self.error_type = error_type
self.code = code
self.status_code = status_code
class AdvancedGrokClient(GrokClient):
def _handle_error(self, response):
"""统一错误处理"""
try:
error_data = response.json()
error = error_data.get('error', {})
raise GrokAPIError(
message=error.get('message', 'API请求失败'),
error_type=error.get('type'),
code=error.get('code'),
status_code=response.status_code
)
except ValueError:
raise GrokAPIError(
message=f"API请求失败: {response.status_code}",
status_code=response.status_code
)
def chat(self, messages, model="grok-4.1", temperature=0.7, max_tokens=1024):
try:
response = requests.post(
self.base_url,
headers=self.headers,
json={
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens
},
timeout=30
)
if not response.ok:
self._handle_error(response)
return response.json()
except requests.exceptions.Timeout:
raise GrokAPIError("请求超时,请检查网络连接")
except requests.exceptions.ConnectionError:
raise GrokAPIError("连接失败,请检查网络连接")
except requests.exceptions.RequestException as e:
raise GrokAPIError(f"网络错误: {str(e)}")
# 使用优化后的客户端
if __name__ == "__main__":
client = AdvancedGrokClient("your-api-key")
try:
messages = [{"role": "user", "content": "Hello Grok!"}]
result = client.chat(messages)
print("成功:", result["choices"][0]["message"]["content"])
except GrokAPIError as e:
print(f"API错误: {e.message}")
if e.status_code == 401:
print("请检查API Key是否正确")
elif e.status_code == 429:
print("请求频率超限,请稍后重试")
3.2 连接池和性能优化
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
class OptimizedGrokClient(AdvancedGrokClient):
def __init__(self, api_key):
super().__init__(api_key)
self.session = self._create_optimized_session()
def _create_optimized_session(self):
"""创建优化的HTTP会话"""
session = requests.Session()
# 配置重试策略
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
)
# 配置连接池
adapter = HTTPAdapter(
max_retries=retry_strategy,
pool_connections=10,
pool_maxsize=20,
pool_block=False
)
session.mount("https://", adapter)
session.headers.update(self.headers)
return session
def chat(self, messages, model="grok-4.1", temperature=0.7, max_tokens=1024):
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens
}
try:
response = self.session.post(
self.base_url,
json=payload,
timeout=30
)
if not response.ok:
self._handle_error(response)
return response.json()
except Exception as e:
self.session.close()
raise GrokAPIError(f"会话错误: {str(e)}")
3.3 缓存机制
import hashlib
from datetime import datetime, timedelta
class CachedGrokClient(OptimizedGrokClient):
def __init__(self, api_key, cache_max_size=500, cache_ttl=3600):
super().__init__(api_key)
self.cache = {}
self.cache_max_size = cache_max_size
self.cache_ttl = timedelta(seconds=cache_ttl)
def _generate_cache_key(self, messages, model, temperature):
"""生成缓存键"""
key_data = {
"messages": messages,
"model": model,
"temperature": temperature
}
return hashlib.md5(str(key_data).encode()).hexdigest()
def chat(self, messages, model="grok-4.1", temperature=0.7, max_tokens=1024, use_cache=True):
# 检查缓存
if use_cache:
cache_key = self._generate_cache_key(messages, model, temperature)
if cache_key in self.cache:
entry = self.cache[cache_key]
if datetime.now() - entry["timestamp"] < self.cache_ttl:
return entry["data"]
# 缓存未命中,调用API
result = super().chat(messages, model, temperature, max_tokens)
# 更新缓存
if use_cache and result:
cache_key = self._generate_cache_key(messages, model, temperature)
# LRU缓存淘汰
if len(self.cache) >= self.cache_max_size:
oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k]["timestamp"])
del self.cache[oldest_key]
self.cache[cache_key] = {
"data": result,
"timestamp": datetime.now()
}
return result
355

被折叠的 条评论
为什么被折叠?



