The OpenAI adapter provides access to OpenAI's GPT models, including GPT-4, GPT-3.5, and more.
npm install @tanstack/ai-openai
npm install @tanstack/ai-openai
import { chat } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "gpt-4o",
});
import { chat } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "gpt-4o",
});
import { openai, type OpenAIConfig } from "@tanstack/ai-openai";
const config: OpenAIConfig = {
apiKey: process.env.OPENAI_API_KEY!,
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = openai(config);
import { openai, type OpenAIConfig } from "@tanstack/ai-openai";
const config: OpenAIConfig = {
apiKey: process.env.OPENAI_API_KEY!,
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = openai(config);
import { chat, toStreamResponse } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "gpt-4o",
});
return toStreamResponse(stream);
}
import { chat, toStreamResponse } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "gpt-4o",
});
return toStreamResponse(stream);
}
import { chat, toolDefinition } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
import { z } from "zod";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter,
messages,
model: "gpt-4o",
tools: [getWeather],
});
import { chat, toolDefinition } from "@tanstack/ai";
import { openai } from "@tanstack/ai-openai";
import { z } from "zod";
const adapter = openai({
apiKey: process.env.OPENAI_API_KEY!,
});
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter,
messages,
model: "gpt-4o",
tools: [getWeather],
});
OpenAI supports various provider-specific options:
const stream = chat({
adapter: openai({ apiKey: process.env.OPENAI_API_KEY! }),
messages,
model: "gpt-4o",
providerOptions: {
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
frequencyPenalty: 0.5,
presencePenalty: 0.5,
},
});
const stream = chat({
adapter: openai({ apiKey: process.env.OPENAI_API_KEY! }),
messages,
model: "gpt-4o",
providerOptions: {
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
frequencyPenalty: 0.5,
presencePenalty: 0.5,
},
});
Enable reasoning for models that support it (e.g., GPT-5). This allows the model to show its reasoning process, which is streamed as thinking chunks:
providerOptions: {
reasoning: {
effort: "medium", // "minimal" | "low" | "medium" | "high"
},
}
providerOptions: {
reasoning: {
effort: "medium", // "minimal" | "low" | "medium" | "high"
},
}
Supported Models:
When reasoning is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI.
Set your API key in environment variables:
OPENAI_API_KEY=sk-...
OPENAI_API_KEY=sk-...
Creates an OpenAI adapter instance.
Parameters:
Returns: An OpenAI adapter instance.
