The Anthropic adapter provides access to Claude models, including Claude 3.5 Sonnet, Claude 3 Opus, and more.
npm install @tanstack/ai-anthropic
npm install @tanstack/ai-anthropic
import { chat } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "claude-3-5-sonnet-20241022",
});
import { chat } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "claude-3-5-sonnet-20241022",
});
import { anthropic, type AnthropicConfig } from "@tanstack/ai-anthropic";
const config: AnthropicConfig = {
apiKey: process.env.ANTHROPIC_API_KEY!,
};
const adapter = anthropic(config);
import { anthropic, type AnthropicConfig } from "@tanstack/ai-anthropic";
const config: AnthropicConfig = {
apiKey: process.env.ANTHROPIC_API_KEY!,
};
const adapter = anthropic(config);
import { chat, toStreamResponse } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "claude-3-5-sonnet-20241022",
});
return toStreamResponse(stream);
}
import { chat, toStreamResponse } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "claude-3-5-sonnet-20241022",
});
return toStreamResponse(stream);
}
import { chat, toolDefinition } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
import { z } from "zod";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
const searchDatabaseDef = toolDefinition({
name: "search_database",
description: "Search the database",
inputSchema: z.object({
query: z.string(),
}),
});
const searchDatabase = searchDatabaseDef.server(async ({ query }) => {
// Search database
return { results: [...] };
});
const stream = chat({
adapter,
messages,
model: "claude-3-5-sonnet-20241022",
tools: [searchDatabase],
});
import { chat, toolDefinition } from "@tanstack/ai";
import { anthropic } from "@tanstack/ai-anthropic";
import { z } from "zod";
const adapter = anthropic({
apiKey: process.env.ANTHROPIC_API_KEY!,
});
const searchDatabaseDef = toolDefinition({
name: "search_database",
description: "Search the database",
inputSchema: z.object({
query: z.string(),
}),
});
const searchDatabase = searchDatabaseDef.server(async ({ query }) => {
// Search database
return { results: [...] };
});
const stream = chat({
adapter,
messages,
model: "claude-3-5-sonnet-20241022",
tools: [searchDatabase],
});
Anthropic supports provider-specific options:
const stream = chat({
adapter: anthropic({ apiKey: process.env.ANTHROPIC_API_KEY! }),
messages,
model: "claude-3-5-sonnet-20241022",
providerOptions: {
thinking: {
type: "enabled",
budgetTokens: 1000,
},
cacheControl: {
type: "ephemeral",
ttl: "5m",
},
sendReasoning: true,
},
});
const stream = chat({
adapter: anthropic({ apiKey: process.env.ANTHROPIC_API_KEY! }),
messages,
model: "claude-3-5-sonnet-20241022",
providerOptions: {
thinking: {
type: "enabled",
budgetTokens: 1000,
},
cacheControl: {
type: "ephemeral",
ttl: "5m",
},
sendReasoning: true,
},
});
Enable extended thinking with a token budget. This allows Claude to show its reasoning process, which is streamed as thinking chunks and displayed as ThinkingPart in messages:
providerOptions: {
thinking: {
type: "enabled",
budget_tokens: 2048, // Maximum tokens for thinking
},
}
providerOptions: {
thinking: {
type: "enabled",
budget_tokens: 2048, // Maximum tokens for thinking
},
}
Note: max_tokens must be greater than budget_tokens. The adapter automatically adjusts max_tokens if needed.
Supported Models:
When thinking is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI.
Cache prompts for better performance:
providerOptions: {
cacheControl: {
type: "ephemeral",
ttl: "5m", // Cache TTL: '5m' or '1h'
},
}
providerOptions: {
cacheControl: {
type: "ephemeral",
ttl: "5m", // Cache TTL: '5m' or '1h'
},
}
Set your API key in environment variables:
ANTHROPIC_API_KEY=sk-ant-...
ANTHROPIC_API_KEY=sk-ant-...
Creates an Anthropic adapter instance.
Parameters:
Returns: An Anthropic adapter instance.
