// shared between all decoders
whisper_kv_cache kv_cross;
+ // padded buffer for flash-attention
+ whisper_kv_cache kv_pad;
+
whisper_mel mel;
whisper_batch batch;
whisper_decoder decoders[WHISPER_MAX_DECODERS];
- ggml_backend_t backend = nullptr;
-
// ggml-alloc:
// - stores meta info about the intermediate tensors into the `meta` buffers
// - stores the actual tensor data into the `data` buffers
}
static bool kv_cache_init(
- const struct whisper_hparams & hparams,
struct whisper_kv_cache & cache,
ggml_backend_t backend,
ggml_type wtype,
+ int64_t n_text_state,
+ int64_t n_text_layer,
int n_ctx) {
- const int64_t n_text_state = hparams.n_text_state;
- const int64_t n_text_layer = hparams.n_text_layer;
-
const int64_t n_mem = n_text_layer*n_ctx;
const int64_t n_elements = n_text_state*n_mem;
return false;
}
+ ggml_backend_buffer_clear(cache.buffer, 0);
+
return true;
}
}
}
+static uint32_t whisper_kv_cache_get_padding(const struct whisper_context & wctx) {
+ if (!wctx.params.flash_attn) {
+ return 1u;
+ }
+
+#ifdef GGML_USE_METAL
+ if (ggml_backend_is_metal(wctx.backend)) {
+ return 32u;
+ }
+#endif
+
+#ifdef GGML_USE_CUDA
+ if (ggml_backend_is_cuda(wctx.backend)) {
+ return 256u;
+ }
+#endif
+
+ return 1u;
+}
+
// [EXPERIMENTAL] Token-level timestamps with DTW
static bool aheads_masks_init(
const whisper_context_params & cparams,
const int n_head = hparams.n_audio_head;
const int n_layer = hparams.n_audio_layer;
+ const int n_state_head = n_state/n_head;
+
+ auto & kv_pad = wstate.kv_pad;
+
+ WHISPER_ASSERT(!!kv_pad.ctx);
+
+ const int n_ctx_pad = GGML_PAD(n_ctx, 256);
+
struct ggml_init_params params = {
/*.mem_size =*/ wstate.alloc_encode.meta.size(),
/*.mem_buffer =*/ wstate.alloc_encode.meta.data(),
struct ggml_tensor * cur = ggml_view_tensor(ctx0, wstate.embd_conv);
- const float KQscale = 1.0f/sqrtf(float(n_state)/n_head);
+ const float KQscale = 1.0f/sqrtf(float(n_state_head));
// ===================================================================
// NOTE: experimenting with partial evaluation of the encoder (ignore)
Qcur = ggml_add(ctx0, Qcur, layer.attn_q_b);
- //Qcur = ggml_scale(ctx0, Qcur, pow(float(n_state)/n_head, -0.25));
+ //Qcur = ggml_scale(ctx0, Qcur, pow(float(n_state_head), -0.25));
// note: no bias for Key
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0,
layer.attn_k_w,
cur);
- //Kcur = ggml_scale(ctx0, Kcur, pow(float(n_state)/n_head, -0.25));
+ //Kcur = ggml_scale(ctx0, Kcur, pow(float(n_state_head), -0.25));
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0,
layer.attn_v_w,
ggml_permute(ctx0,
ggml_cpy(ctx0,
Qcur,
- ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_state/n_head, n_head, n_ctx)),
+ ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_state_head, n_head, n_ctx)),
0, 2, 1, 3);
- struct ggml_tensor * K =
- ggml_permute(ctx0,
- ggml_cpy(ctx0,
- Kcur,
- ggml_new_tensor_3d(ctx0, wctx.itype, n_state/n_head, n_head, n_ctx)),
- 0, 2, 1, 3);
-
- // K * Q
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ if (wctx.params.flash_attn) {
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, ggml_view_1d(ctx0, kv_pad.k, n_ctx*n_state, 0)));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, ggml_view_1d(ctx0, kv_pad.v, n_ctx*n_state, 0)));
- struct ggml_tensor * KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, nullptr, KQscale, 0.0f);
+ struct ggml_tensor * K =
+ ggml_view_3d(ctx0, kv_pad.k,
+ n_state_head, n_ctx_pad, n_head,
+ ggml_element_size(kv_pad.k)*n_state,
+ ggml_element_size(kv_pad.k)*n_state_head,
+ 0);
- struct ggml_tensor * V =
- ggml_cpy(ctx0,
- ggml_permute(ctx0,
- ggml_reshape_3d(ctx0,
- Vcur,
- n_state/n_head, n_head, n_ctx),
- 1, 2, 0, 3),
- ggml_new_tensor_3d(ctx0, wctx.itype, n_ctx, n_state/n_head, n_head)
- );
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_pad.v,
+ n_state_head, n_ctx_pad, n_head,
+ ggml_element_size(kv_pad.v)*n_state,
+ ggml_element_size(kv_pad.v)*n_state_head,
+ 0);
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ cur = ggml_flash_attn_ext(ctx0, Q, K, V, nullptr, KQscale, 0.0f);
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
-
- cur = ggml_cpy(ctx0,
- KQV_merged,
- ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx));
+ cur = ggml_reshape_2d(ctx0, cur, n_state, n_ctx);
+ } else {
+ struct ggml_tensor * K =
+ ggml_permute(ctx0,
+ ggml_cpy(ctx0,
+ Kcur,
+ ggml_new_tensor_3d(ctx0, wctx.itype, n_state_head, n_head, n_ctx)),
+ 0, 2, 1, 3);
+
+ // K * Q
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, nullptr, KQscale, 0.0f);
+
+ struct ggml_tensor * V =
+ ggml_cpy(ctx0,
+ ggml_permute(ctx0,
+ ggml_reshape_3d(ctx0,
+ Vcur,
+ n_state_head, n_head, n_ctx),
+ 1, 2, 0, 3),
+ ggml_new_tensor_3d(ctx0, wctx.itype, n_ctx, n_state_head, n_head)
+ );
+
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+
+ cur = ggml_cpy(ctx0,
+ KQV_merged,
+ ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx));
+ }
}
// projection
const int n_state = hparams.n_audio_state;
const int n_head = hparams.n_audio_head;
+ const int n_state_head = n_state/n_head;
+
+ const int n_ctx_pad = GGML_PAD(n_ctx, 256);
+
struct ggml_init_params params = {
/*.mem_size =*/ wstate.alloc_cross.meta.size(),
/*.mem_buffer =*/ wstate.alloc_cross.meta.data(),
struct ggml_tensor * cur = ggml_view_tensor(ctx0, wstate.embd_enc);
- const float Kscale = pow(float(n_state) / n_head, -0.25);
+ const float Kscale = pow(float(n_state_head), -0.25);
for (int il = 0; il < model.hparams.n_text_layer; ++il) {
auto & layer = model.layers_decoder[il];
- struct ggml_tensor* Kcross = ggml_mul_mat(ctx0,
+ struct ggml_tensor * Kcross = ggml_mul_mat(ctx0,
layer.cross_attn_k_w,
cur);
Kcross = ggml_scale(ctx0, Kcross, Kscale);
- struct ggml_tensor* Vcross = ggml_mul_mat(ctx0,
+ struct ggml_tensor * Vcross = ggml_mul_mat(ctx0,
layer.cross_attn_v_w,
cur);
Vcross,
layer.cross_attn_v_b);
- Vcross = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcross, n_state, n_ctx));
+ struct ggml_tensor * k;
+ struct ggml_tensor * v;
- struct ggml_tensor * k = ggml_view_1d(ctx0, wstate.kv_cross.k,
- n_state*n_ctx,
- (ggml_element_size(wstate.kv_cross.k)*n_state)*(il*n_ctx));
+ if (wctx.params.flash_attn) {
+ k = ggml_view_1d(ctx0, wstate.kv_cross.k, n_state*n_ctx,
+ (ggml_element_size(wstate.kv_cross.k)*n_state)*(il*n_ctx_pad));
- struct ggml_tensor * v = ggml_view_2d(ctx0, wstate.kv_cross.v, n_ctx, n_state,
- ( n_ctx)*ggml_element_size(wstate.kv_cross.v),
- (il*n_ctx)*ggml_element_size(wstate.kv_cross.v)*n_state);
+ v = ggml_view_1d(ctx0, wstate.kv_cross.v, n_state*n_ctx,
+ (ggml_element_size(wstate.kv_cross.v)*n_state)*(il*n_ctx_pad));
+ } else {
+ Vcross = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcross, n_state, n_ctx));
+
+ k = ggml_view_1d(ctx0, wstate.kv_cross.k, n_state*n_ctx,
+ (ggml_element_size(wstate.kv_cross.k)*n_state)*(il*n_ctx));
+
+ v = ggml_view_2d(ctx0, wstate.kv_cross.v, n_ctx, n_state,
+ ( n_ctx)*ggml_element_size(wstate.kv_cross.v),
+ (il*n_ctx)*ggml_element_size(wstate.kv_cross.v)*n_state);
+ }
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcross, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcross, v));
}
if (!whisper_encode_external(wstate)) {
- if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
+ if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
return false;
}
} else {
return false;
}
- if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
+ if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
return false;
}
}
return false;
}
- if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
+ if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
return false;
}
}
const int n_head = hparams.n_text_head;
const int n_layer = hparams.n_text_layer;
+ const int n_state_head = n_state/n_head;
+
const int n_tokens = batch.n_tokens;
const int n_audio_ctx = wstate.exp_n_audio_ctx > 0 ? wstate.exp_n_audio_ctx : hparams.n_audio_ctx;
- const int32_t n_kv = worst_case ? n_ctx : kv_self.n;
- const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head;
+ const int n_audio_ctx_pad = GGML_PAD(n_audio_ctx, 256);
+
+ const int32_t n_kv = worst_case ? n_ctx : kv_self.n;
+ const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head;
//WHISPER_LOG_DEBUG("%s: n_past = %d, n_tokens = %d, n_audio_ctx = %d, n_ctx = %d\n", __func__, n_past, n_tokens, n_audio_ctx, n_ctx);
ggml_set_name(position, "position");
ggml_set_input(position);
- const float KQscale = pow(float(n_state)/n_head, -0.25);
+ const float KQscale = pow(float(n_state_head), -0.25);
- struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1);
ggml_set_name(KQ_mask, "KQ_mask");
ggml_set_input(KQ_mask);
+ struct ggml_tensor * KQ_mask_f16 = ggml_cast(ctx0, KQ_mask, GGML_TYPE_F16);
+
// token encoding + position encoding
struct ggml_tensor * cur =
ggml_add(ctx0,
Vcur,
layer.attn_v_b);
- Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_state, n_tokens));
+ struct ggml_tensor * k;
+ struct ggml_tensor * v;
+
+ if (wctx.params.flash_attn) {
+ k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_state,
+ (ggml_element_size(kv_self.k)*n_state)*(il*n_ctx + kv_head));
+
+ v = ggml_view_1d(ctx0, kv_self.v, n_tokens*n_state,
+ (ggml_element_size(kv_self.v)*n_state)*(il*n_ctx + kv_head));
+ } else {
+ Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_state, n_tokens));
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_state, (ggml_element_size(kv_self.k)*n_state)*(il*n_ctx + kv_head));
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_state,
- ( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_state + kv_head*ggml_element_size(kv_self.v));
+ k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_state,
+ (ggml_element_size(kv_self.k)*n_state)*(il*n_ctx + kv_head));
+
+ v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_state,
+ ( n_ctx)*ggml_element_size(kv_self.v),
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_state + kv_head*ggml_element_size(kv_self.v));
+ }
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
struct ggml_tensor * Q =
ggml_permute(ctx0,
- ggml_reshape_3d(ctx0, Qcur, n_state/n_head, n_head, n_tokens),
+ ggml_reshape_3d(ctx0, Qcur, n_state_head, n_head, n_tokens),
0, 2, 1, 3);
struct ggml_tensor * K =
ggml_view_3d(ctx0, kv_self.k,
- n_state/n_head, n_kv, n_head,
+ n_state_head, n_kv, n_head,
ggml_element_size(kv_self.k)*n_state,
- ggml_element_size(kv_self.k)*n_state/n_head,
+ ggml_element_size(kv_self.k)*n_state_head,
ggml_element_size(kv_self.k)*n_state*n_ctx*il);
- // K * Q
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ if (wctx.params.flash_attn) {
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_self.v,
+ n_state_head, n_kv, n_head,
+ ggml_element_size(kv_self.v)*n_state,
+ ggml_element_size(kv_self.v)*n_state_head,
+ ggml_element_size(kv_self.v)*n_state*n_ctx*il);
+
+ cur = ggml_flash_attn_ext(ctx0, Q, K, V, KQ_mask_f16, 1.0f, 0.0f);
+
+ cur = ggml_reshape_2d(ctx0, cur, n_state, n_tokens);
+ } else {
+ // K * Q
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
- struct ggml_tensor * KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, KQ_mask, 1.0f, 0.0f);
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, KQ_mask, 1.0f, 0.0f);
- struct ggml_tensor * V =
- ggml_view_3d(ctx0, kv_self.v,
- n_kv, n_state/n_head, n_head,
- n_ctx*ggml_element_size(kv_self.v),
- n_ctx*ggml_element_size(kv_self.v)*n_state/n_head,
- n_ctx*ggml_element_size(kv_self.v)*n_state*il);
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_self.v,
+ n_kv, n_state_head, n_head,
+ n_ctx*ggml_element_size(kv_self.v),
+ n_ctx*ggml_element_size(kv_self.v)*n_state_head,
+ n_ctx*ggml_element_size(kv_self.v)*n_state*il);
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
- cur = ggml_cpy(ctx0,
- KQV_merged,
- ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_tokens));
+ cur = ggml_cpy(ctx0,
+ KQV_merged,
+ ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_tokens));
+ }
}
// projection
Qcur,
layer.cross_attn_q_b);
- Qcur = ggml_scale(ctx0, Qcur, KQscale);
-
- // Kcross is already scaled
- struct ggml_tensor * Kcross =
- ggml_view_3d(ctx0, wstate.kv_cross.k,
- n_state/n_head, n_audio_ctx, n_head,
- ggml_element_size(wstate.kv_cross.k)*n_state,
- ggml_element_size(wstate.kv_cross.k)*n_state/n_head,
- ggml_element_size(wstate.kv_cross.k)*n_state*n_audio_ctx*il);
-
- //struct ggml_tensor * Vcross =
- // ggml_reshape_3d(ctx0,
- // ggml_view_1d(ctx0, wstate.kv_cross.v, n_audio_ctx*n_state, il*n_audio_ctx*ggml_element_size(wstate.kv_cross.v)*n_state),
- // n_state/n_head, n_head, n_audio_ctx);
-
- //struct ggml_tensor * V_trans =
- // ggml_cpy(ctx0,
- // ggml_permute(ctx0, Vcross, 1, 2, 0, 3),
- // ggml_new_tensor_3d(ctx0, Vcross->type, n_audio_ctx, n_state/n_head, n_head));
-
- struct ggml_tensor * V =
- ggml_view_3d(ctx0, wstate.kv_cross.v,
- n_audio_ctx, n_state/n_head, n_head,
- n_audio_ctx*ggml_element_size(wstate.kv_cross.v),
- n_audio_ctx*ggml_element_size(wstate.kv_cross.v)*n_state/n_head,
- n_audio_ctx*ggml_element_size(wstate.kv_cross.v)*n_state*il);
-
- // ------
-
struct ggml_tensor * Q =
ggml_permute(ctx0,
- ggml_reshape_3d(ctx0, Qcur, n_state/n_head, n_head, n_tokens),
+ ggml_reshape_3d(ctx0, Qcur, n_state_head, n_head, n_tokens),
0, 2, 1, 3);
- // K * Q
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, Kcross, Q);
-
- //struct ggml_tensor * KQ_scaled =
- // ggml_scale(ctx0,
- // KQ,
- // ggml_new_f32(ctx0, 1.0f/sqrt(float(n_state)/n_head))
- // );
+ if (wctx.params.flash_attn) {
+ struct ggml_tensor * Kcross =
+ ggml_view_3d(ctx0, wstate.kv_cross.k,
+ n_state_head, n_audio_ctx_pad, n_head,
+ ggml_element_size(wstate.kv_cross.k)*n_state,
+ ggml_element_size(wstate.kv_cross.k)*n_state_head,
+ ggml_element_size(wstate.kv_cross.k)*n_state*n_audio_ctx_pad*il);
- // no masking for cross-attention
- //struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
+ struct ggml_tensor * Vcross =
+ ggml_view_3d(ctx0, wstate.kv_cross.v,
+ n_state_head, n_audio_ctx_pad, n_head,
+ ggml_element_size(wstate.kv_cross.v)*n_state,
+ ggml_element_size(wstate.kv_cross.v)*n_state_head,
+ ggml_element_size(wstate.kv_cross.v)*n_state*n_audio_ctx_pad*il);
- struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ);
+ cur = ggml_flash_attn_ext(ctx0, Q, Kcross, Vcross, nullptr, KQscale, 0.0f);
- // [EXPERIMENTAL] Token-level timestamps with DTW
- if (wctx.params.dtw_token_timestamps) {
- if (wstate.aheads_masks.m[il] != nullptr) {
- struct ggml_tensor * aheads_KQs = ggml_reshape_2d(ctx0, KQ_soft_max, KQ_soft_max->ne[0] * KQ_soft_max->ne[1], KQ_soft_max->ne[2]);
- aheads_KQs = ggml_transpose(ctx0, aheads_KQs);
- aheads_KQs = ggml_cont(ctx0, aheads_KQs);
- aheads_KQs = ggml_mul_mat(ctx0, wstate.aheads_masks.m[il], aheads_KQs);
- aheads_KQs = ggml_transpose(ctx0, aheads_KQs);
- aheads_KQs = ggml_cont(ctx0, aheads_KQs);
- aheads_KQs = ggml_reshape_3d(ctx0, aheads_KQs, KQ_soft_max->ne[0], KQ_soft_max->ne[1], wstate.aheads_masks.m[il]->ne[1]);
- if (aheads_cross_QKs == NULL) {
- aheads_cross_QKs = aheads_KQs;
- } else {
- aheads_cross_QKs = ggml_concat(ctx0, aheads_cross_QKs, aheads_KQs);
+ cur = ggml_reshape_2d(ctx0, cur, n_state, n_tokens);
+ } else {
+ struct ggml_tensor * Kcross =
+ ggml_view_3d(ctx0, wstate.kv_cross.k,
+ n_state_head, n_audio_ctx, n_head,
+ ggml_element_size(wstate.kv_cross.k)*n_state,
+ ggml_element_size(wstate.kv_cross.k)*n_state_head,
+ ggml_element_size(wstate.kv_cross.k)*n_state*n_audio_ctx*il);
+
+ struct ggml_tensor * Vcross =
+ ggml_view_3d(ctx0, wstate.kv_cross.v,
+ n_audio_ctx, n_state_head, n_head,
+ n_audio_ctx*ggml_element_size(wstate.kv_cross.v),
+ n_audio_ctx*ggml_element_size(wstate.kv_cross.v)*n_state_head,
+ n_audio_ctx*ggml_element_size(wstate.kv_cross.v)*n_state*il);
+
+ // ------
+
+ // K * Q
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, Kcross, Q);
+
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_ext(ctx0, KQ, nullptr, KQscale, 0.0f);
+
+ // [EXPERIMENTAL] Token-level timestamps with DTW
+ if (wctx.params.dtw_token_timestamps) {
+ if (wstate.aheads_masks.m[il] != nullptr) {
+ struct ggml_tensor * aheads_KQs = ggml_reshape_2d(ctx0, KQ_soft_max, KQ_soft_max->ne[0] * KQ_soft_max->ne[1], KQ_soft_max->ne[2]);
+ aheads_KQs = ggml_transpose(ctx0, aheads_KQs);
+ aheads_KQs = ggml_cont(ctx0, aheads_KQs);
+ aheads_KQs = ggml_mul_mat(ctx0, wstate.aheads_masks.m[il], aheads_KQs);
+ aheads_KQs = ggml_transpose(ctx0, aheads_KQs);
+ aheads_KQs = ggml_cont(ctx0, aheads_KQs);
+ aheads_KQs = ggml_reshape_3d(ctx0, aheads_KQs, KQ_soft_max->ne[0], KQ_soft_max->ne[1], wstate.aheads_masks.m[il]->ne[1]);
+ if (aheads_cross_QKs == NULL) {
+ aheads_cross_QKs = aheads_KQs;
+ } else {
+ aheads_cross_QKs = ggml_concat(ctx0, aheads_cross_QKs, aheads_KQs);
+ }
}
}
- }
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, Vcross, KQ_soft_max);
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
- // cur = KQV_merged.contiguous().view(n_state, n_tokens)
- cur = ggml_cpy(ctx0,
- KQV_merged,
- ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_tokens));
+ cur = ggml_cpy(ctx0,
+ KQV_merged,
+ ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_tokens));
+ }
}
// projection
return false;
}
- kv_self.n = whisper_kv_cache_cell_max(kv_self);
+ const uint32_t pad = whisper_kv_cache_get_padding(wctx);
+ kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(whisper_kv_cache_cell_max(kv_self), pad)));
+
//kv_self.n = std::min((int32_t) hparams.n_text_ctx, std::max(32, whisper_kv_cache_cell_max(kv_self)));
//printf("n_tokens = %5d, kv_self.head = %5d, kv_self.n = %5d, seq_id = %5d\n", batch.n_tokens, kv_self.head, kv_self.n, batch.seq_id[0][0]);
}
struct ggml_tensor * KQ_mask = ggml_graph_get_tensor(gf, "KQ_mask");
auto & kv_self = wstate.kv_self;
- const int32_t n_kv = kv_self.n;
- wstate.inp_mask.resize(n_kv*n_tokens);
+ const int32_t n_kv = kv_self.n;
+
+ wstate.inp_mask.resize(ggml_nelements(KQ_mask));
float * data = wstate.inp_mask.data();
memset(data, 0, ggml_nbytes(KQ_mask));
}
}
}
+
+ for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
+ for (int j = 0; j < n_kv; ++j) {
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
+ }
+ }
}
ggml_backend_tensor_set(KQ_mask, wstate.inp_mask.data(), 0, ggml_nelements(KQ_mask)*sizeof(float));
logits = gf->nodes[gf->n_nodes - 1];
- if (!ggml_graph_compute_helper(wstate.backend, gf, n_threads)) {
+ if (!ggml_graph_compute_helper(wctx.backend, gf, n_threads)) {
return false;
}
}
whisper_state * state = new whisper_state;
- state->backend = whisper_backend_init(ctx->params);
- if (!state->backend) {
- WHISPER_LOG_ERROR("%s: whisper_backend_init() failed\n", __func__);
- whisper_free_state(state);
- return nullptr;
- }
-
// at this point, we don't know yet how many decoders will be used, so we overallocate 3x ctx
// in theory, there can be a case where this is not enough, but in practice it should always be enough
const int factor = 3;
- if (!kv_cache_init(ctx->model.hparams, state->kv_self, ctx->backend, ctx->itype, factor*ctx->model.hparams.n_text_ctx)) {
+ if (!kv_cache_init(state->kv_self, ctx->backend, ctx->itype,
+ ctx->model.hparams.n_text_state,
+ ctx->model.hparams.n_text_layer,
+ GGML_PAD(ctx->model.hparams.n_text_ctx, 256)*factor)) {
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
whisper_free_state(state);
return nullptr;
WHISPER_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1e6);
}
- if (!kv_cache_init(ctx->model.hparams, state->kv_cross, ctx->backend, ctx->itype, ctx->model.hparams.n_audio_ctx)) {
+ if (!kv_cache_init(state->kv_cross, ctx->backend, ctx->itype,
+ ctx->model.hparams.n_text_state,
+ ctx->model.hparams.n_text_layer,
+ GGML_PAD(ctx->model.hparams.n_audio_ctx, 256))) {
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for cross-attention cache\n", __func__);
whisper_free_state(state);
return nullptr;
WHISPER_LOG_INFO("%s: kv cross size = %7.2f MB\n", __func__, memory_size / 1e6);
}
+ if (!kv_cache_init(state->kv_pad, ctx->backend, ctx->itype,
+ ctx->model.hparams.n_audio_state,
+ 1,
+ GGML_PAD(ctx->model.hparams.n_audio_ctx, 256))) {
+ WHISPER_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
+ whisper_free_state(state);
+ return nullptr;
+ }
+
+ {
+ const size_t memory_size = ggml_nbytes(state->kv_pad.k) + ggml_nbytes(state->kv_pad.v);
+ WHISPER_LOG_INFO("%s: kv pad size = %7.2f MB\n", __func__, memory_size / 1e6);
+ }
+
// [EXPERIMENTAL] Token-level timestamps with DTW
if (ctx->params.dtw_token_timestamps) {
if (!aheads_masks_init(ctx->params, ctx->model.hparams, state->aheads_masks, ctx->backend)) {
struct whisper_context_params whisper_context_default_params() {
struct whisper_context_params result = {
/*.use_gpu =*/ true,
+ /*.flash_attn =*/ false,
/*.gpu_device =*/ 0,
/*.dtw_token_timestamps =*/ false,
struct whisper_context * whisper_init_with_params_no_state(struct whisper_model_loader * loader, struct whisper_context_params params) {
ggml_time_init();
+ if (params.flash_attn && params.dtw_token_timestamps) {
+ WHISPER_LOG_WARN("%s: dtw_token_timestamps is not supported with flash_attn - disabling\n", __func__);
+ params.dtw_token_timestamps = false;
+ }
+
+ WHISPER_LOG_INFO("%s: use gpu = %d\n", __func__, params.use_gpu);
+ WHISPER_LOG_INFO("%s: flash attn = %d\n", __func__, params.flash_attn);
+ WHISPER_LOG_INFO("%s: gpu_device = %d\n", __func__, params.gpu_device);
+ WHISPER_LOG_INFO("%s: dtw = %d\n", __func__, params.dtw_token_timestamps);
+
whisper_context * ctx = new whisper_context;
ctx->params = params;
if (state) {
kv_cache_free(state->kv_self);
kv_cache_free(state->kv_cross);
+ kv_cache_free(state->kv_pad);
#ifdef WHISPER_USE_COREML
if (state->ctx_coreml != nullptr) {
ggml_gallocr_free(state->alloc_cross.alloc);
ggml_gallocr_free(state->alloc_decode.alloc);
- ggml_backend_free(state->backend);
-
// [EXPERIMENTAL] Token-level timestamps with DTW
aheads_masks_free(state->aheads_masks);