The Ollama adapter provides access to local models running via Ollama, allowing you to run AI models on your own infrastructure.
npm install @tanstack/ai-ollama
npm install @tanstack/ai-ollama
import { chat } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
const adapter = ollama({
baseURL: "http://localhost:11434", // Default Ollama URL
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "llama3",
});
import { chat } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
const adapter = ollama({
baseURL: "http://localhost:11434", // Default Ollama URL
});
const stream = chat({
adapter,
messages: [{ role: "user", content: "Hello!" }],
model: "llama3",
});
import { ollama, type OllamaConfig } from "@tanstack/ai-ollama";
const config: OllamaConfig = {
baseURL: "http://localhost:11434", // Ollama server URL
// No API key needed for local Ollama
};
const adapter = ollama(config);
import { ollama, type OllamaConfig } from "@tanstack/ai-ollama";
const config: OllamaConfig = {
baseURL: "http://localhost:11434", // Ollama server URL
// No API key needed for local Ollama
};
const adapter = ollama(config);
Ollama models depend on what you have installed locally. Common models include:
To see available models, run:
ollama list
ollama list
import { chat, toStreamResponse } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
const adapter = ollama({
baseURL: "http://localhost:11434",
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "llama3", // Use a model you have installed
});
return toStreamResponse(stream);
}
import { chat, toStreamResponse } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
const adapter = ollama({
baseURL: "http://localhost:11434",
});
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter,
messages,
model: "llama3", // Use a model you have installed
});
return toStreamResponse(stream);
}
import { chat, toolDefinition } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
import { z } from "zod";
const adapter = ollama({
baseURL: "http://localhost:11434",
});
const getLocalDataDef = toolDefinition({
name: "get_local_data",
description: "Get data from local storage",
inputSchema: z.object({
key: z.string(),
}),
});
const getLocalData = getLocalDataDef.server(async ({ key }) => {
// Access local data
return { data: "..." };
});
const stream = chat({
adapter,
messages,
model: "llama3",
tools: [getLocalData],
});
import { chat, toolDefinition } from "@tanstack/ai";
import { ollama } from "@tanstack/ai-ollama";
import { z } from "zod";
const adapter = ollama({
baseURL: "http://localhost:11434",
});
const getLocalDataDef = toolDefinition({
name: "get_local_data",
description: "Get data from local storage",
inputSchema: z.object({
key: z.string(),
}),
});
const getLocalData = getLocalDataDef.server(async ({ key }) => {
// Access local data
return { data: "..." };
});
const stream = chat({
adapter,
messages,
model: "llama3",
tools: [getLocalData],
});
Install Ollama:
# macOS
brew install ollama
# Linux
curl -fsSL https://ollama.com/install.sh | sh
# Windows
# Download from https://ollama.com
# macOS
brew install ollama
# Linux
curl -fsSL https://ollama.com/install.sh | sh
# Windows
# Download from https://ollama.com
Pull a model:
ollama pull llama3
ollama pull llama3
Start Ollama server:
ollama serve
ollama serve
Ollama supports various provider-specific options:
const stream = chat({
adapter: ollama({ baseURL: "http://localhost:11434" }),
messages,
model: "llama3",
providerOptions: {
temperature: 0.7,
numPredict: 1000,
topP: 0.9,
topK: 40,
},
});
const stream = chat({
adapter: ollama({ baseURL: "http://localhost:11434" }),
messages,
model: "llama3",
providerOptions: {
temperature: 0.7,
numPredict: 1000,
topP: 0.9,
topK: 40,
},
});
If you're running Ollama on a different host or port:
const adapter = ollama({
baseURL: "http://your-server:11434",
});
const adapter = ollama({
baseURL: "http://your-server:11434",
});
Creates an Ollama adapter instance.
Parameters:
Returns: An Ollama adapter instance.
