This guide helps you migrate from the previous version of TanStack AI to the latest version. The major changes focus on improved tree-shaking, clearer API naming, and simplified configuration.
The main breaking changes in this release are:
Adapters have been split into activity-specific functions to enable optimal tree-shaking. Instead of importing a monolithic adapter, you now import specific functions for each activity type.
import { chat } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }],
})
import { chat } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }],
})
import { chat } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
const stream = chat({
adapter: openaiText('gpt-4o'),
messages: [{ role: 'user', content: 'Hello!' }],
})
import { chat } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
const stream = chat({
adapter: openaiText('gpt-4o'),
messages: [{ role: 'user', content: 'Hello!' }],
})
Each provider package now exports activity-specific functions:
import {
openaiText, // Chat/text generation
openaiSummarize, // Summarization
openaiImage, // Image generation
openaiSpeech, // Text-to-speech
openaiTranscription, // Audio transcription
openaiVideo, // Video generation
} from '@tanstack/ai-openai'
import {
openaiText, // Chat/text generation
openaiSummarize, // Summarization
openaiImage, // Image generation
openaiSpeech, // Text-to-speech
openaiTranscription, // Audio transcription
openaiVideo, // Video generation
} from '@tanstack/ai-openai'
import {
anthropicText, // Chat/text generation
anthropicSummarize, // Summarization
} from '@tanstack/ai-anthropic'
import {
anthropicText, // Chat/text generation
anthropicSummarize, // Summarization
} from '@tanstack/ai-anthropic'
import {
geminiText, // Chat/text generation
geminiSummarize, // Summarization
geminiImage, // Image generation
geminiSpeech, // Text-to-speech (experimental)
} from '@tanstack/ai-gemini'
import {
geminiText, // Chat/text generation
geminiSummarize, // Summarization
geminiImage, // Image generation
geminiSpeech, // Text-to-speech (experimental)
} from '@tanstack/ai-gemini'
import {
ollamaText, // Chat/text generation
ollamaSummarize, // Summarization
} from '@tanstack/ai-ollama'
import {
ollamaText, // Chat/text generation
ollamaSummarize, // Summarization
} from '@tanstack/ai-ollama'
Here's a complete example of migrating adapter usage:
import { chat } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
import { anthropic } from '@tanstack/ai-anthropic'
type Provider = 'openai' | 'anthropic'
function getAdapter(provider: Provider) {
switch (provider) {
case 'openai':
return openai()
case 'anthropic':
return anthropic()
}
}
const stream = chat({
adapter: getAdapter(provider),
model: provider === 'openai' ? 'gpt-4o' : 'claude-sonnet-4-5',
messages,
})
import { chat } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
import { anthropic } from '@tanstack/ai-anthropic'
type Provider = 'openai' | 'anthropic'
function getAdapter(provider: Provider) {
switch (provider) {
case 'openai':
return openai()
case 'anthropic':
return anthropic()
}
}
const stream = chat({
adapter: getAdapter(provider),
model: provider === 'openai' ? 'gpt-4o' : 'claude-sonnet-4-5',
messages,
})
import { chat } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
import { anthropicText } from '@tanstack/ai-anthropic'
type Provider = 'openai' | 'anthropic'
const adapters = {
openai: () => openaiText('gpt-4o'),
anthropic: () => anthropicText('claude-sonnet-4-5'),
}
const stream = chat({
adapter: adapters[provider](),
messages,
})
import { chat } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
import { anthropicText } from '@tanstack/ai-anthropic'
type Provider = 'openai' | 'anthropic'
const adapters = {
openai: () => openaiText('gpt-4o'),
anthropic: () => anthropicText('claude-sonnet-4-5'),
}
const stream = chat({
adapter: adapters[provider](),
messages,
})
Common options that were previously nested in an options object are now flattened directly in the config.
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
options: {
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
},
})
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
options: {
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
},
})
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
})
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
temperature: 0.7,
maxTokens: 1000,
topP: 0.9,
})
These options are now available at the top level:
The providerOptions parameter has been renamed to modelOptions for clarity. This parameter contains model-specific options that vary by provider and model.
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
providerOptions: {
// OpenAI-specific options
responseFormat: { type: 'json_object' },
logitBias: { '123': 1.0 },
},
})
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
providerOptions: {
// OpenAI-specific options
responseFormat: { type: 'json_object' },
logitBias: { '123': 1.0 },
},
})
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
modelOptions: {
// OpenAI-specific options
responseFormat: { type: 'json_object' },
logitBias: { '123': 1.0 },
},
})
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
modelOptions: {
// OpenAI-specific options
responseFormat: { type: 'json_object' },
logitBias: { '123': 1.0 },
},
})
modelOptions is fully typed based on the adapter and model you're using:
import { openaiText } from '@tanstack/ai-openai'
const adapter = openaiText('gpt-4o')
// TypeScript knows the exact modelOptions type for gpt-4o
const stream = chat({
adapter,
messages,
modelOptions: {
// Autocomplete and type checking for gpt-4o options
responseFormat: { type: 'json_object' },
},
})
import { openaiText } from '@tanstack/ai-openai'
const adapter = openaiText('gpt-4o')
// TypeScript knows the exact modelOptions type for gpt-4o
const stream = chat({
adapter,
messages,
modelOptions: {
// Autocomplete and type checking for gpt-4o options
responseFormat: { type: 'json_object' },
},
})
The toResponseStream function has been renamed to toServerSentEventsStream to better reflect its purpose. Additionally, the API has changed slightly.
import { chat, toResponseStream } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
abortController,
})
return toResponseStream(stream, { abortController })
}
import { chat, toResponseStream } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
abortController,
})
return toResponseStream(stream, { abortController })
}
import { chat, toServerSentEventsStream } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
abortController,
})
const readableStream = toServerSentEventsStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
})
}
import { chat, toServerSentEventsStream } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
abortController,
})
const readableStream = toServerSentEventsStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
})
}
If you need HTTP stream format (newline-delimited JSON) instead of SSE, use toHttpStream:
import { toHttpStream } from '@tanstack/ai'
const readableStream = toHttpStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'application/x-ndjson',
},
})
import { toHttpStream } from '@tanstack/ai'
const readableStream = toHttpStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'application/x-ndjson',
},
})
Embeddings support has been removed from TanStack AI. Most vector database services (like Pinecone, Weaviate, Qdrant, etc.) have built-in support for embeddings, and most applications pick an embedding model and stick with it.
import { embedding } from '@tanstack/ai'
import { openaiEmbed } from '@tanstack/ai-openai'
const result = await embedding({
adapter: openaiEmbed(),
model: 'text-embedding-3-small',
input: 'Hello, world!',
})
import { embedding } from '@tanstack/ai'
import { openaiEmbed } from '@tanstack/ai-openai'
const result = await embedding({
adapter: openaiEmbed(),
model: 'text-embedding-3-small',
input: 'Hello, world!',
})
Use your vector database service's built-in embedding support, or call the provider's API directly:
// Example with OpenAI SDK directly
import OpenAI from 'openai'
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY })
const result = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: 'Hello, world!',
})
// Example with OpenAI SDK directly
import OpenAI from 'openai'
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY })
const result = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: 'Hello, world!',
})
Here's a complete example showing all the changes together:
import { chat, toResponseStream } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
options: {
temperature: 0.7,
maxTokens: 1000,
},
providerOptions: {
responseFormat: { type: 'json_object' },
},
abortController,
})
return toResponseStream(stream, { abortController })
}
import { chat, toResponseStream } from '@tanstack/ai'
import { openai } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openai(),
model: 'gpt-4o',
messages,
options: {
temperature: 0.7,
maxTokens: 1000,
},
providerOptions: {
responseFormat: { type: 'json_object' },
},
abortController,
})
return toResponseStream(stream, { abortController })
}
import { chat, toServerSentEventsStream } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
temperature: 0.7,
maxTokens: 1000,
modelOptions: {
responseFormat: { type: 'json_object' },
},
abortController,
})
const readableStream = toServerSentEventsStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
})
}
import { chat, toServerSentEventsStream } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai'
export async function POST(request: Request) {
const { messages } = await request.json()
const abortController = new AbortController()
const stream = chat({
adapter: openaiText('gpt-4o'),
messages,
temperature: 0.7,
maxTokens: 1000,
modelOptions: {
responseFormat: { type: 'json_object' },
},
abortController,
})
const readableStream = toServerSentEventsStream(stream, abortController)
return new Response(readableStream, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
})
}
If you encounter issues during migration:
