function generateTranscription<TAdapter, TStream>(options): TranscriptionActivityResult<TStream>;function generateTranscription<TAdapter, TStream>(options): TranscriptionActivityResult<TStream>;Defined in: packages/typescript/ai/src/activities/generateTranscription/index.ts:146
Transcription activity - converts audio to text.
Uses AI speech-to-text models to transcribe audio content.
TAdapter extends TranscriptionAdapter<string, TranscriptionProviderOptions<TAdapter>>
TStream extends boolean = false
TranscriptionActivityOptions<TAdapter, TStream>
TranscriptionActivityResult<TStream>
import { generateTranscription } from '@tanstack/ai'
import { openaiTranscription } from '@tanstack/ai-openai'
const result = await generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile, // File, Blob, or base64 string
language: 'en'
})
console.log(result.text)import { generateTranscription } from '@tanstack/ai'
import { openaiTranscription } from '@tanstack/ai-openai'
const result = await generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile, // File, Blob, or base64 string
language: 'en'
})
console.log(result.text)const result = await generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile,
responseFormat: 'verbose_json'
})
result.segments?.forEach(segment => {
console.log(`[${segment.start}s - ${segment.end}s]: ${segment.text}`)
})const result = await generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile,
responseFormat: 'verbose_json'
})
result.segments?.forEach(segment => {
console.log(`[${segment.start}s - ${segment.end}s]: ${segment.text}`)
})for await (const chunk of generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile,
stream: true
})) {
console.log(chunk)
}for await (const chunk of generateTranscription({
adapter: openaiTranscription('whisper-1'),
audio: audioFile,
stream: true
})) {
console.log(chunk)
}