float logprob_thold = -1.00f;
float temperature = 0.00f;
float temperature_inc = 0.20f;
+ float no_speech_thold = 0.6f;
bool debug_mode = false;
bool translate = false;
fprintf(stderr, " --inference-path PATH, [%-7s] Inference path for all requests\n", sparams.inference_path.c_str());
fprintf(stderr, " --convert, [%-7s] Convert audio to WAV, requires ffmpeg on the server", sparams.ffmpeg_converter ? "true" : "false");
fprintf(stderr, " -sns, --suppress-nst [%-7s] suppress non-speech tokens\n", params.suppress_nst ? "true" : "false");
+ fprintf(stderr, " -nth N, --no-speech-thold N [%-7.2f] no speech threshold\n", params.no_speech_thold);
fprintf(stderr, "\n");
}
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
else if (arg == "-sns" || arg == "--suppress-nst") { params.suppress_nst = true; }
+ else if (arg == "-nth" || arg == "--no-speech-thold") { params.no_speech_thold = std::stof(argv[++i]); }
+
// server params
else if ( arg == "--port") { sparams.port = std::stoi(argv[++i]); }
else if ( arg == "--host") { sparams.hostname = argv[++i]; }
wparams.beam_search.beam_size = params.beam_size;
wparams.temperature = params.temperature;
+ wparams.no_speech_thold = params.no_speech_thold;
wparams.temperature_inc = params.temperature_inc;
wparams.entropy_thold = params.entropy_thold;
wparams.logprob_thold = params.logprob_thold;
// TODO compression_ratio and no_speech_prob are not implemented yet
// segment["compression_ratio"] = 0;
- // segment["no_speech_prob"] = 0;
+ segment["no_speech_prob"] = whisper_full_get_segment_no_speech_prob(ctx, i);
jres["segments"].push_back(segment);
}
int64_t t1;
std::string text;
+ float no_speech_prob;
std::vector<whisper_token_data> tokens;
//printf("tt0 = %d, tt1 = %d, text = %s, token = %s, token_id = %d, tid = %d\n", tt0, tt1, text.c_str(), ctx->vocab.id_to_token[tokens_cur[i].id].c_str(), tokens_cur[i].id, tokens_cur[i].tid);
- result_all.push_back({ tt0, tt1, text, {}, speaker_turn_next });
+ result_all.push_back({ tt0, tt1, text, state->no_speech_prob, {}, speaker_turn_next });
for (int j = i0; j <= i; j++) {
result_all.back().tokens.push_back(tokens_cur[j]);
}
}
}
- result_all.push_back({ tt0, tt1, text, {} , speaker_turn_next });
+ result_all.push_back({ tt0, tt1, text, state->no_speech_prob, {}, speaker_turn_next });
for (int j = i0; j < (int) tokens_cur.size(); j++) {
result_all.back().tokens.push_back(tokens_cur[j]);
}
return ctx->state->result_all[i_segment].tokens[i_token].p;
}
+float whisper_full_get_segment_no_speech_prob(struct whisper_context * ctx, int i_segment) {
+ return ctx->state->result_all[i_segment].no_speech_prob;
+}
+
// =================================================================================================
//