Skip to content

Gemini API Examples

使用 OpenAI SDK 访问gemini模型的使用示例。

基础配置

在开始使用API之前,请确保您已经获取了API Key。如果还没有,请参考创建API Key

基础信息

  • API Base URL: https://api.agentsflare.com/v1
  • 认证方式: Bearer Token
  • 内容类型: application/json

请求示例

bash
curl -X POST "https://api.agentsflare.com/v1/chat/completions" \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "gemini-2.5-flash",
    "messages": [
      {
        "role": "user",
        "content": "Hello, how are you?"
      }
    ],
    "max_tokens": 100,
    "temperature": 0.7
  }'
python
from openai import OpenAI
url = "https://api.agentsflare.com/v1"

client = OpenAI(
    base_url=url,
    api_key="YOUR_API_KEY"
)

completion = client.chat.completions.create(
  model="gemini-2.5-flash",
  messages=[
    {"role": "developer", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello!"}
  ]
)

print(completion.choices[0].message)
javascript

import OpenAI from "openai";

const client = new OpenAI({
  apiKey: process.env.AGENTSFLARE_API_KEY, 
  baseURL: "https://api.agentsflare.com/v1"    
});

async function main() {
  try {
    const res = await client.chat.completions.create({
      model: "gemini-2.5-flash",
      messages: [{ role: "user", content: "Hello, how are you?" }],
      max_tokens: 100,
      temperature: 0.7
    });

    // 你也可以只取文本
    console.log(res.choices?.[0]?.message?.content);
    // 或打印完整响应
    // console.log(res);
  } catch (err) {
    // openai sdk 的错误对象里通常有更详细的 response
    console.error(err?.response?.data ?? err);
  }
}

main();
java
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.chat.completions.ChatCompletionCreateParams;
import com.openai.models.chat.completions.ChatCompletion;

public class Main {
  public static void main(String[] args) {
    String apiKey = System.getenv("AGENTSFLARE_API_KEY"); 
    if (apiKey == null || apiKey.isBlank()) {
      throw new IllegalStateException("Missing AGENTSFLARE_API_KEY env var");
    }

    OpenAIClient client = OpenAIOkHttpClient.builder()
        .apiKey(apiKey)
        .baseUrl("https://api.agentsflare.com/v1/")
        .build();

    ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
        .model("gemini-2.5-flash")
        .addMessage(ChatCompletionCreateParams.Message.builder()
            .role(ChatCompletionCreateParams.Message.Role.USER)
            .content("Hello, how are you?")
            .build())
        .maxTokens(100)
        .temperature(0.7)
        .build();

    ChatCompletion res = client.chat().completions().create(params);

    String content = res.choices().get(0).message().content();
    System.out.println(content);
  }
}
go
package main

import (
	"context"
	"fmt"
	"log"
	"os"

	openai "github.com/openai/openai-go"
	"github.com/openai/openai-go/option"
)

func main() {
	apiKey := os.Getenv("AGENTSFLARE_API_KEY") // 建议用环境变量
	if apiKey == "" {
		log.Fatal("missing env AGENTSFLARE_API_KEY")
	}

	client := openai.NewClient(
		option.WithAPIKey(apiKey),
		// 关键:把 SDK 的 base url 指向 agentsflare
		option.WithBaseURL("https://api.agentsflare.com/v1/"),
	)

	ctx := context.Background()

	resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		Model: openai.F("gemini-2.5-flash"),
		Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
			openai.UserMessage("Hello, how are you?"),
		}),
		MaxTokens:   openai.F(int64(100)),
		Temperature: openai.F(0.7),
	})
	if err != nil {
		log.Fatalf("chat completion failed: %v", err)
	}

	// 打印回复文本
	if len(resp.Choices) > 0 && resp.Choices[0].Message.Content != "" {
		fmt.Println(resp.Choices[0].Message.Content)
	} else {
		fmt.Printf("empty response: %+v\n", resp)
	}
}
javascript
const { OpenAI } = require("openai");

const client = new OpenAI({
  apiKey: process.env.AGENTSFLARE_API_KEY, 
  baseURL: "https://api.agentsflare.com/v1"    
});

async function main() {
  try {
    const res = await client.chat.completions.create({
      model: "gemini-2.5-flash",
      messages: [{ role: "user", content: "Hello, how are you?" }],
      max_tokens: 100,
      temperature: 0.7
    });

    // 你也可以只取文本
    console.log(res.choices?.[0]?.message?.content);
    // 或打印完整响应
    // console.log(res);
  } catch (err) {
    // openai sdk 的错误对象里通常有更详细的 response
    console.error(err?.response?.data ?? err);
  }
}

main();

响应示例


保留转义
{
    "id": "portkey-183db1aa-1e64-4478-b4f0-5ac88479373d",
    "object": "chat.completion",
    "created": 1768129653,
    "model": "gemini-2.5-flash",
    "provider": "google",
    "choices": [
        {
            "message": {
                "role": "assistant",
                "content": "Hello! I'"
            },
            "index": 0,
            "finish_reason": "length"
        }
    ],
    "usage": {
        "prompt_tokens": 7,
        "completion_tokens": 4,
        "total_tokens": 103,
        "completion_tokens_details": {
            "reasoning_tokens": 92
        }
    }
}

请求参数

参数详见Gemini API Quickstart

本文档遵循 CC BY-SA 4.0 协议。