p.max_tokens = C.int(n)
}
+// Set audio encoder context
+func (p *Params) SetAudioCtx(n int) {
+ p.audio_ctx = C.int(n)
+}
+
///////////////////////////////////////////////////////////////////////////////
// PRIVATE METHODS
str += fmt.Sprintf(" n_max_text_ctx=%d", p.n_max_text_ctx)
str += fmt.Sprintf(" offset_ms=%d", p.offset_ms)
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
+ str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
if p.translate {
str += " translate"
}
}
func (context *context) SetSplitOnWord(v bool) {
- context.params.SetSplitOnWord(v)
+ context.params.SetSplitOnWord(v)
}
// Set number of threads to use
context.params.SetMaxTokensPerSegment(int(n))
}
+// Set audio encoder context
+func (context *context) SetAudioCtx(n uint) {
+ context.params.SetAudioCtx(int(n))
+}
+
// ResetTimings resets the mode timings. Should be called before processing
func (context *context) ResetTimings() {
context.model.ctx.Whisper_reset_timings()
SetMaxSegmentLength(uint) // Set max segment length in characters
SetTokenTimestamps(bool) // Set token timestamps flag
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
+ SetAudioCtx(uint) // Set audio encoder context
// Process mono audio data and return any errors.
// If defined, newly generated segments are passed to the