MiniMax API Examples
This page provides examples of using the Agentsflare MiniMax API to help you quickly integrate and use MiniMax AI services.
Basic Configuration
Before starting to use the API, please ensure you have obtained an API Key. If not, please refer to Create API Key.
Basic Information
- API Base URL:
https://api.agentsflare.com/anthropic/v1/messages - Authentication Method: Bearer Token
- Content Type:
application/json
Request Examples
bash
curl --location --request POST 'https://api.agentsflare.com/anthropic/v1/messages' \
--header 'Content-Type: application/json' \
--header 'Authorization: Bearer $API_KEY' \
--data-raw '{
"messages": [
{
"role": "user",
"content": "Xiao Ming'\''s father has three sons. The eldest is called Da Mao, the second is called Er Mao, what is the third one called? Please reason in detail but tell me the answer concisely."
}
],
"max_tokens": 8920,
"model": "MiniMax-M2.5",
"stream": true
}'python
import anthropic
client = anthropic.Anthropic(
api_key="YOUR_API_KEY",
base_url="https://api.agentsflare.com/anthropic"
)
message = client.messages.create(
model="MiniMax-M2.5",
max_tokens=8920,
messages=[
{
"role": "user",
"content": "Hello, please introduce yourself"
}
],
stream=True
)
for event in message:
if event.type == "content_block_delta":
delta = event.delta
if delta.type == "text_delta":
# Normal text output
print(delta.text, end="", flush=True)
elif delta.type == "thinking_delta":
# Thinking process (optional to print or ignore)
print(delta.thinking, end="", flush=True) # If you want to see the thinking processjavascript
import Anthropic from "@anthropic-ai/sdk";
const client = new Anthropic({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/anthropic"
});
async function main() {
try {
const stream = await client.messages.create({
model: "MiniMax-M2.5",
max_tokens: 8920,
messages: [
{
role: "user",
content: "Hello, please introduce yourself"
}
],
stream: true
});
for await (const event of stream) {
if (event.type === 'content_block_delta') {
process.stdout.write(event.delta.text);
}
}
} catch (err) {
console.error(err?.response?.data ?? err);
}
}
main();javascript
const Anthropic = require("@anthropic-ai/sdk");
const client = new Anthropic({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/anthropic"
});
async function main() {
try {
const stream = await client.messages.create({
model: "MiniMax-M2.5",
max_tokens: 8920,
messages: [
{
role: "user",
content: "Hello, please introduce yourself"
}
],
stream: true
});
for await (const event of stream) {
if (event.type === 'content_block_delta') {
process.stdout.write(event.delta.text);
}
}
} catch (err) {
console.error(err?.response?.data ?? err);
}
}
main();python
import anthropic
client = anthropic.Anthropic(
api_key="YOUR_API_KEY",
base_url="https://api.agentsflare.com/anthropic"
)
message = client.messages.create(
model="MiniMax-M2.5",
max_tokens=8920,
messages=[
{
"role": "user",
"content": "Hello, please introduce yourself"
}
]
)
print(message.content[0].text)go
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
)
func main() {
apiKey := os.Getenv("AGENTSFLARE_API_KEY")
if apiKey == "" {
log.Fatal("missing env AGENTSFLARE_API_KEY")
}
client := anthropic.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://api.agentsflare.com/anthropic"),
)
ctx := context.Background()
stream := client.Messages.NewStreaming(ctx, anthropic.MessageNewParams{
Model: anthropic.F("MiniMax-M2.5"),
MaxTokens: anthropic.F(int64(8920)),
Messages: anthropic.F([]anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello, please introduce yourself")),
}),
})
for stream.Next() {
event := stream.Current()
if delta, ok := event.Delta.(anthropic.ContentBlockDeltaEventDelta); ok {
if textDelta, ok := delta.AsTextDelta(); ok {
fmt.Print(textDelta.Text)
}
}
}
if err := stream.Err(); err != nil {
log.Fatalf("stream error: %v", err)
}
}Response Examples
Streaming Response
json
event: message_start
data: {"type":"message_start","message":{"id":"msg_123","type":"message","role":"assistant","content":[],"model":"MiniMax-M2.5","stop_reason":null,"usage":{"input_tokens":45,"output_tokens":0}}}
event: content_block_start
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"The third"}}
event: content_block_delta
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" son is Xiao Ming"}}
event: content_block_stop
data: {"type":"content_block_stop","index":0}
event: message_delta
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":15}}
event: message_stop
data: {"type":"message_stop"}Non-streaming Response
json
{
"id": "msg_123456",
"type": "message",
"role": "assistant",
"content": [
{
"type": "text",
"text": "The third son is called Xiao Ming. Because the question asks about 'Xiao Ming's father', Xiao Ming is the third son."
}
],
"model": "MiniMax-M2.5",
"stop_reason": "end_turn",
"usage": {
"input_tokens": 45,
"output_tokens": 28
}
}Request Parameters
| Parameter | Type | Required | Description |
|---|---|---|---|
| model | string | Yes | Model name, e.g., MiniMax-M2.5 |
| messages | array | Yes | Array of messages with role and content |
| max_tokens | integer | Yes | Maximum tokens to generate, recommended 8920 |
| stream | boolean | No | Enable streaming response, default false |
| temperature | float | No | Sampling temperature, range 0-1 |
| top_p | float | No | Nucleus sampling parameter |
Feature Description
Streaming Output
MiniMax API supports streaming output (SSE) by setting stream: true. Streaming responses allow real-time content generation, providing a better user experience.
Token Limits
- Recommended
max_tokenssetting is 8920 or less - Actual available tokens depend on input length
Model Characteristics
MiniMax-M2.5 is a powerful Chinese language model, particularly suitable for:
- Chinese dialogue and Q&A
- Logical reasoning
- Content creation
- Knowledge Q&A
Important Notes
- API Key Security: Do not hardcode API Keys in your code, use environment variables
- Request Rate: Please comply with API call rate limits
- Error Handling: Implement comprehensive error handling mechanisms
- Streaming Response: When using streaming responses, properly handle various event types