};
static const std::map<e_model, size_t> MEM_REQ_DECODE = {
- { MODEL_TINY, 94ull*MB },
- { MODEL_BASE, 96ull*MB },
- { MODEL_SMALL, 98ull*MB },
- { MODEL_MEDIUM, 100ull*MB },
- { MODEL_LARGE, 102ull*MB },
+ { MODEL_TINY, 200ull*MB },
+ { MODEL_BASE, 202ull*MB },
+ { MODEL_SMALL, 204ull*MB },
+ { MODEL_MEDIUM, 206ull*MB },
+ { MODEL_LARGE, 208ull*MB },
};
static const std::map<e_model, size_t> MEM_REQ_DECODE_LAYER = {
struct whisper_full_params params,
const float * samples,
int n_samples) {
+ // clear old results
+ auto & result_all = ctx->result_all;
+ auto & result_cur = ctx->result_cur;
+
+ result_all.clear();
+
// compute log mel spectrogram
if (whisper_pcm_to_mel(ctx, samples, n_samples, params.n_threads) != 0) {
fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__);
}
}
- auto & result_all = ctx->result_all;
- auto & result_cur = ctx->result_cur;
-
- result_all.clear();
-
int progress_prev = 0;
int progress_step = 5;
whisper_token id = 0;
whisper_token tid = whisper_token_beg(ctx);
- id = whisper_sample_best(ctx, result_len == 0);
+ id = whisper_sample_best(ctx, result_len == 0 || i > 32);
if (i > 0) {
tid = whisper_sample_timestamp(ctx);
}
// end of text token
if (id == whisper_token_eot(ctx)) {
if (result_len == 0) {
- result_len = i + 1;
+ // TODO: figure out how to resolve this
+ fprintf(stderr, "\n%s: failed to generate timestamp token - this should not happen\n\n", __func__);
+ //result_len = i + 1;
}
break;
}