MiniMax API Examples
本页面提供Agentsflare MiniMax API的使用示例,帮助您快速集成和使用MiniMax AI服务。
基础配置
在开始使用API之前,请确保您已经获取了API Key。如果还没有,请参考创建API Key。
基础信息
- API Base URL:
https://api.agentsflare.com/anthropic/v1/messages - 认证方式: Bearer Token
- 内容类型:
application/json
请求示例
bash
curl --location --request POST 'https://api.agentsflare.com/anthropic/v1/messages' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer $API_KEY' \
--data-raw '{
"messages": [
{
"role": "user",
"content": "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"
}
],
"max_tokens": 8920,
"model": "MiniMax-M2.5",
"stream": true
}'python
import anthropic
client = anthropic.Anthropic(
api_key="YOUR_API_KEY",
base_url="https://api.agentsflare.com/anthropic"
)
message = client.messages.create(
model="MiniMax-M2.5",
max_tokens=8920,
messages=[
{
"role": "user",
"content": "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"
}
],
stream=True
)
for event in message:
if event.type == "content_block_delta":
delta = event.delta
if delta.type == "text_delta":
# 正常文本输出
print(delta.text, end="", flush=True)
elif delta.type == "thinking_delta":
# 思考过程(可选择打印或忽略)
print(delta.thinking, end="", flush=True) # 如果想看思考过程javascript
import Anthropic from "@anthropic-ai/sdk";
const client = new Anthropic({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/anthropic"
});
async function main() {
try {
const stream = await client.messages.create({
model: "MiniMax-M2.5",
max_tokens: 8920,
messages: [
{
role: "user",
content: "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"
}
],
stream: true
});
for await (const event of stream) {
if (event.type === 'content_block_delta') {
process.stdout.write(event.delta.text);
}
}
} catch (err) {
console.error(err?.response?.data ?? err);
}
}
main();javascript
const Anthropic = require("@anthropic-ai/sdk");
const client = new Anthropic({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/anthropic"
});
async function main() {
try {
const stream = await client.messages.create({
model: "MiniMax-M2.5",
max_tokens: 8920,
messages: [
{
role: "user",
content: "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"
}
],
stream: true
});
for await (const event of stream) {
if (event.type === 'content_block_delta') {
process.stdout.write(event.delta.text);
}
}
} catch (err) {
console.error(err?.response?.data ?? err);
}
}
main();python
import anthropic
client = anthropic.Anthropic(
api_key="YOUR_API_KEY",
base_url="https://api.agentsflare.com/anthropic"
)
message = client.messages.create(
model="MiniMax-M2.5",
max_tokens=8920,
messages=[
{
"role": "user",
"content": "你好,请介绍一下你自己"
}
]
)
print(message.content[0].text)go
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
)
func main() {
apiKey := os.Getenv("AGENTSFLARE_API_KEY")
if apiKey == "" {
log.Fatal("missing env AGENTSFLARE_API_KEY")
}
client := anthropic.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://api.agentsflare.com/anthropic"),
)
ctx := context.Background()
stream := client.Messages.NewStreaming(ctx, anthropic.MessageNewParams{
Model: anthropic.F("MiniMax-M2.5"),
MaxTokens: anthropic.F(int64(8920)),
Messages: anthropic.F([]anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("你好,请介绍一下你自己")),
}),
})
for stream.Next() {
event := stream.Current()
if delta, ok := event.Delta.(anthropic.ContentBlockDeltaEventDelta); ok {
if textDelta, ok := delta.AsTextDelta(); ok {
fmt.Print(textDelta.Text)
}
}
}
if err := stream.Err(); err != nil {
log.Fatalf("stream error: %v", err)
}
}响应示例
流式响应
json
event: message_start
data: {"type":"message_start","message":{"id":"msg_123","type":"message","role":"assistant","content":[],"model":"MiniMax-M2.5","stop_reason":null,"usage":{"input_tokens":45,"output_tokens":0}}}
event: content_block_start
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"老三"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"叫小明"}}
event: content_block_stop
data: {"type":"content_block_stop","index":0}
event: message_delta
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":15}}
event: message_stop
data: {"type":"message_stop"}非流式响应
json
{
"id": "msg_123456",
"type": "message",
"role": "assistant",
"content": [
{
"type": "text",
"text": "老三叫小明。因为题目问的是"小明的爸爸",所以小明就是第三个儿子。"
}
],
"model": "MiniMax-M2.5",
"stop_reason": "end_turn",
"usage": {
"input_tokens": 45,
"output_tokens": 28
}
}请求参数
| 参数 | 类型 | 必填 | 说明 |
|---|---|---|---|
| model | string | 是 | 模型名称,如 MiniMax-M2.5 |
| messages | array | 是 | 消息数组,包含 role 和 content |
| max_tokens | integer | 是 | 最大生成token数,建议设置为8920 |
| stream | boolean | 否 | 是否启用流式响应,默认为false |
| temperature | float | 否 | 采样温度,范围0-1 |
| top_p | float | 否 | 核采样参数 |
特性说明
流式输出
MiniMax API 支持流式输出(SSE),通过设置 stream: true 启用。流式响应可以实时获取生成内容,提供更好的用户体验。
Token 限制
- 建议
max_tokens设置为 8920 或更小 - 实际可用的 token 数量取决于输入长度
模型特点
MiniMax-M2.5 是一个强大的中文语言模型,特别适合:
- 中文对话和问答
- 逻辑推理
- 内容创作
- 知识问答
注意事项
- API Key 安全:请勿在代码中硬编码 API Key,建议使用环境变量
- 请求频率:请遵守 API 调用频率限制
- 错误处理:建议实现完善的错误处理机制
- 流式响应:使用流式响应时注意正确处理各种事件类型