Skip to content

Chat GPT API Examples

This page provides examples of using the Agentsflare Chat GPT API to help you quickly integrate and use our AI services.

Basic Configuration

Before starting to use the API, please ensure you have obtained an API Key. If not, please refer to Create API Key.

Basic Information

  • API Base URL: https://api.agentsflare.com/v1/chat/completions
  • Authentication Method: Bearer Token
  • Content Type: application/json

Request Examples

bash
curl -X POST "https://api.agentsflare.com/v1/chat/completions" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "gpt-4",
    "messages": [
      {
        "role": "user",
        "content": "Hello, how are you?"
      }
    ],
    "max_tokens": 100,
    "temperature": 0.7
  }'
python
from openai import OpenAI
url = "https://api.agentsflare.com/v1"

client = OpenAI(
    base_url=url,
    api_key="YOUR_API_KEY"
)

completion = client.chat.completions.create(
  model="gpt-5.2",
  messages=[
    {"role": "user", "content": "You are a helpful assistant."}
  ]
)

print(completion.choices[0].message)
javascript

import OpenAI from "openai";

const client = new OpenAI({
  apiKey: process.env.AGENTSFLARE_API_KEY, 
  baseURL: "https://api.agentsflare.com/v1"    
});

async function main() {
  try {
    const res = await client.chat.completions.create({
      model: "gpt-4",
      messages: [{ role: "user", content: "Hello, how are you?" }],
      max_tokens: 100,
      temperature: 0.7
    });

    // You can also get only the text
    console.log(res.choices?.[0]?.message?.content);
    // Or print the full response
    // console.log(res);
  } catch (err) {
    // The OpenAI SDK error object usually has more detailed response
    console.error(err?.response?.data ?? err);
  }
}

main();
java
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.chat.completions.ChatCompletionCreateParams;
import com.openai.models.chat.completions.ChatCompletion;

public class Main {
  public static void main(String[] args) {
    String apiKey = System.getenv("AGENTSFLARE_API_KEY"); 
    if (apiKey == null || apiKey.isBlank()) {
      throw new IllegalStateException("Missing AGENTSFLARE_API_KEY env var");
    }

    OpenAIClient client = OpenAIOkHttpClient.builder()
        .apiKey(apiKey)
        .baseUrl("https://api.agentsflare.com/v1")
        .build();

    ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
        .model("gpt-4")
        .addMessage(ChatCompletionCreateParams.Message.builder()
            .role(ChatCompletionCreateParams.Message.Role.USER)
            .content("Hello, how are you?")
            .build())
        .maxTokens(100)
        .temperature(0.7)
        .build();

    ChatCompletion res = client.chat().completions().create(params);

    String content = res.choices().get(0).message().content();
    System.out.println(content);
  }
}
go
package main

import (
	"context"
	"fmt"
	"log"
	"os"

	openai "github.com/openai/openai-go"
	"github.com/openai/openai-go/option"
)

func main() {
	apiKey := os.Getenv("AGENTSFLARE_API_KEY") // Recommended to use environment variables
	if apiKey == "" {
		log.Fatal("missing env AGENTSFLARE_API_KEY")
	}

	client := openai.NewClient(
		option.WithAPIKey(apiKey),
		// Key: point the SDK's base url to agentsflare
		option.WithBaseURL("https://api.agentsflare.com/v1"),
	)

	ctx := context.Background()

	resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		Model: openai.F("gpt-4"),
		Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
			openai.UserMessage("Hello, how are you?"),
		}),
		MaxTokens:   openai.F(int64(100)),
		Temperature: openai.F(0.7),
	})
	if err != nil {
		log.Fatalf("chat completion failed: %v", err)
	}

	// Print the reply text
	if len(resp.Choices) > 0 && resp.Choices[0].Message.Content != "" {
		fmt.Println(resp.Choices[0].Message.Content)
	} else {
		fmt.Printf("empty response: %+v\n", resp)
	}
}
javascript
const { OpenAI } = require("openai");

const client = new OpenAI({
  apiKey: process.env.AGENTSFLARE_API_KEY, 
  baseURL: "https://api.agentsflare.com/v1"    
});

async function main() {
  try {
    const res = await client.chat.completions.create({
      model: "gpt-4",
      messages: [{ role: "user", content: "Hello, how are you?" }],
      max_tokens: 100,
      temperature: 0.7
    });

    // You can also get only the text
    console.log(res.choices?.[0]?.message?.content);
    // Or print the full response
    // console.log(res);
  } catch (err) {
    // The OpenAI SDK error object usually has more detailed response
    console.error(err?.response?.data ?? err);
  }
}

main();

Response Example

{
    "choices": [
        {
            "content_filter_results": {
                "hate": {
                    "filtered": false,
                    "severity": "safe"
                },
                "protected_material_code": {
                    "filtered": false,
                    "detected": false
                },
                "protected_material_text": {
                    "filtered": false,
                    "detected": false
                },
                "self_harm": {
                    "filtered": false,
                    "severity": "safe"
                },
                "sexual": {
                    "filtered": false,
                    "severity": "safe"
                },
                "violence": {
                    "filtered": false,
                    "severity": "safe"
                }
            },
            "finish_reason": "stop",
            "index": 0,
            "logprobs": null,
            "message": {
                "annotations": [

                ],
                "content": "Hello! How can I help you? 😊",
                "refusal": null,
                "role": "assistant"
            }
        }
    ],
    "created": 1767765293,
    "id": "chatcmpl-CvGnN6pDPthK2Aw5pToFy1K098dhV",
    "model": "gpt-4.1-mini-2025-04-14",
    "object": "chat.completion",
    "prompt_filter_results": [
        {
            "prompt_index": 0,
            "content_filter_results": {
                "hate": {
                    "filtered": false,
                    "severity": "safe"
                },
                "jailbreak": {
                    "filtered": false,
                    "detected": false
                },
                "self_harm": {
                    "filtered": false,
                    "severity": "safe"
                },
                "sexual": {
                    "filtered": false,
                    "severity": "safe"
                },
                "violence": {
                    "filtered": false,
                    "severity": "safe"
                }
            }
        }
    ],
    "system_fingerprint": "fp_3dcd5944f5",
    "usage": {
        "completion_tokens": 11,
        "completion_tokens_details": {
            "accepted_prediction_tokens": 0,
            "audio_tokens": 0,
            "reasoning_tokens": 0,
            "rejected_prediction_tokens": 0
        },
        "prompt_tokens": 10,
        "prompt_tokens_details": {
            "audio_tokens": 0,
            "cached_tokens": 0
        },
        "total_tokens": 21
    }
}

Request Parameters

Parameters see Chat Completions API

Batch Request Examples

The examples below demonstrate submitting multiple chat requests in a single batch. Actual endpoints and parameters may vary by SDK/version; these are reference examples using POST https://api.agentsflare.com/v1/chat/batch.

cURL (synchronous example)

bash
curl -X POST "https://api.agentsflare.com/v1/chat/batch" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "requests": [
      {
        "model": "gpt-4.1",
        "messages": [{"role": "user", "content": "Translate to English: 你好"}],
        "max_tokens": 100
      },
      {
        "model": "gpt-5",
        "messages": [{"role": "user", "content": "Summarize the following text: ..."}],
        "max_tokens": 200
      }
    ]
  }'

Python (requests)

python
import requests, os
url = "https://api.agentsflare.com/v1/chat/batch"
api_key = os.getenv("AGENTSFLARE_API_KEY")

payload = {
  "requests": [
    {"model": "gpt-4.1", "messages": [{"role":"user","content":"Translate to English: 你好"}], "max_tokens": 100},
    {"model": "gpt-5", "messages": [{"role":"user","content":"Extract key points from the text: ..."}], "max_tokens": 200}
  ]
}

resp = requests.post(url, headers={
    "Authorization": f"Bearer {api_key}",
    "Content-Type": "application/json"
}, json=payload)

data = resp.json()
for i, item in enumerate(data.get("results", [])):
    print(f"request {i} ->", item.get("choices", [{}])[0].get("message", {}).get("content"))

JavaScript (fetch)

javascript
const resp = await fetch("https://api.agentsflare.com/v1/chat/batch", {
  method: "POST",
  headers: {
    "Authorization": `Bearer ${process.env.AGENTSFLARE_API_KEY}`,
    "Content-Type": "application/json"
  },
  body: JSON.stringify({
    requests: [
      { model: "gpt-4.1", messages: [{ role: "user", content: "Translate to English: 你好" }], max_tokens: 100 },
      { model: "gpt-5", messages: [{ role: "user", content: "Summarize this text: ..." }], max_tokens: 200 }
    ]
  })
});
const data = await resp.json();
console.log(data.results);

Note: Some SDKs expose higher-level batch features (asynchronous jobs, job IDs, webhook callbacks, etc.). Refer to the platform SDK documentation for supported patterns.

Channel Notes (Batch Support)

  • Standard Channel: Cannot use OpenAI official endpoints (openai.com SDK). Use the platform Batch API. Supported batch models: gpt-4.1, gpt5, gpt-5.1.
  • Advanced Channel: Same restriction as Standard (cannot use OpenAI official endpoints). Supported batch models: gpt-4.1, gpt5, gpt-5.1.
  • Dedicated Channel: Can use OpenAI official endpoints and SDKs. Supports all GPT-family models the platform allows for batch (including gpt-4.1, gpt5, gpt-5.1 and future batch-capable models).

To confirm whether a specific model or your account/channel supports batch, check your account/channel permissions in the console or contact platform support.

This documentation is licensed under CC BY-SA 4.0.