The OpenAI adapter provides access to OpenAI's models, including GPT-4o, GPT-5, image generation (DALL-E), text-to-speech (TTS), and audio transcription (Whisper).
npm install @tanstack/ai-openai
npm install @tanstack/ai-openai
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-4o"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-4o"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { createOpenaiChat } from "@tanstack/ai-openai";
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gpt-4o"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { createOpenaiChat } from "@tanstack/ai-openai";
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gpt-4o"),
messages: [{ role: "user", content: "Hello!" }],
});
import { createOpenaiChat, type OpenAIChatConfig } from "@tanstack/ai-openai";
const config: Omit<OpenAIChatConfig, 'apiKey'> = {
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config);
import { createOpenaiChat, type OpenAIChatConfig } from "@tanstack/ai-openai";
const config: Omit<OpenAIChatConfig, 'apiKey'> = {
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config);
import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
});
return toServerSentEventsResponse(stream);
}
import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
});
return toServerSentEventsResponse(stream);
}
import { chat, toolDefinition } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { z } from "zod";
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
tools: [getWeather],
});
import { chat, toolDefinition } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { z } from "zod";
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
tools: [getWeather],
});
OpenAI supports various provider-specific options:
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
modelOptions: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stop: ["END"],
},
});
const stream = chat({
adapter: openaiText("gpt-4o"),
messages,
modelOptions: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stop: ["END"],
},
});
Enable reasoning for models that support it (e.g., GPT-5, O3). This allows the model to show its reasoning process, which is streamed as thinking chunks:
modelOptions: {
reasoning: {
effort: "medium", // "none" | "minimal" | "low" | "medium" | "high"
summary: "detailed", // "auto" | "detailed" (optional)
},
}
modelOptions: {
reasoning: {
effort: "medium", // "none" | "minimal" | "low" | "medium" | "high"
summary: "detailed", // "auto" | "detailed" (optional)
},
}
When reasoning is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI.
Summarize long text content:
import { summarize } from "@tanstack/ai";
import { openaiSummarize } from "@tanstack/ai-openai";
const result = await summarize({
adapter: openaiSummarize("gpt-4o-mini"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);
import { summarize } from "@tanstack/ai";
import { openaiSummarize } from "@tanstack/ai-openai";
const result = await summarize({
adapter: openaiSummarize("gpt-4o-mini"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);
Generate images with DALL-E:
import { generateImage } from "@tanstack/ai";
import { openaiImage } from "@tanstack/ai-openai";
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
size: "1024x1024",
});
console.log(result.images);
import { generateImage } from "@tanstack/ai";
import { openaiImage } from "@tanstack/ai-openai";
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
size: "1024x1024",
});
console.log(result.images);
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "...",
modelOptions: {
quality: "hd", // "standard" | "hd"
style: "natural", // "natural" | "vivid"
},
});
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "...",
modelOptions: {
quality: "hd", // "standard" | "hd"
style: "natural", // "natural" | "vivid"
},
});
Generate speech from text:
import { generateSpeech } from "@tanstack/ai";
import { openaiTTS } from "@tanstack/ai-openai";
const result = await generateSpeech({
adapter: openaiTTS("tts-1"),
text: "Hello, welcome to TanStack AI!",
voice: "alloy",
format: "mp3",
});
// result.audio contains base64-encoded audio
console.log(result.format); // "mp3"
import { generateSpeech } from "@tanstack/ai";
import { openaiTTS } from "@tanstack/ai-openai";
const result = await generateSpeech({
adapter: openaiTTS("tts-1"),
text: "Hello, welcome to TanStack AI!",
voice: "alloy",
format: "mp3",
});
// result.audio contains base64-encoded audio
console.log(result.format); // "mp3"
Available voices: alloy, echo, fable, onyx, nova, shimmer, ash, ballad, coral, sage, verse
const result = await generateSpeech({
adapter: openaiTTS("tts-1-hd"),
text: "High quality speech",
modelOptions: {
speed: 1.0, // 0.25 to 4.0
},
});
const result = await generateSpeech({
adapter: openaiTTS("tts-1-hd"),
text: "High quality speech",
modelOptions: {
speed: 1.0, // 0.25 to 4.0
},
});
Transcribe audio to text:
import { generateTranscription } from "@tanstack/ai";
import { openaiTranscription } from "@tanstack/ai-openai";
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile, // File object or base64 string
language: "en",
});
console.log(result.text); // Transcribed text
import { generateTranscription } from "@tanstack/ai";
import { openaiTranscription } from "@tanstack/ai-openai";
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile, // File object or base64 string
language: "en",
});
console.log(result.text); // Transcribed text
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile,
modelOptions: {
response_format: "verbose_json", // Get timestamps
temperature: 0,
prompt: "Technical terms: API, SDK",
},
});
// Access segments with timestamps
console.log(result.segments);
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile,
modelOptions: {
response_format: "verbose_json", // Get timestamps
temperature: 0,
prompt: "Technical terms: API, SDK",
},
});
// Access segments with timestamps
console.log(result.segments);
Set your API key in environment variables:
OPENAI_API_KEY=sk-...
OPENAI_API_KEY=sk-...
Creates an OpenAI chat adapter using environment variables.
Returns: An OpenAI chat adapter instance.
Creates an OpenAI chat adapter with an explicit API key.
Parameters:
Returns: An OpenAI chat adapter instance.
Creates an OpenAI summarization adapter using environment variables.
Returns: An OpenAI summarize adapter instance.
Creates an OpenAI summarization adapter with an explicit API key.
Returns: An OpenAI summarize adapter instance.
Creates an OpenAI image generation adapter using environment variables.
Returns: An OpenAI image adapter instance.
Creates an OpenAI image generation adapter with an explicit API key.
Returns: An OpenAI image adapter instance.
Creates an OpenAI TTS adapter using environment variables.
Returns: An OpenAI TTS adapter instance.
Creates an OpenAI TTS adapter with an explicit API key.
Returns: An OpenAI TTS adapter instance.
Creates an OpenAI transcription adapter using environment variables.
Returns: An OpenAI transcription adapter instance.
Creates an OpenAI transcription adapter with an explicit API key.
Returns: An OpenAI transcription adapter instance.
