]> git.djapps.eu Git - pkg/ggml/sources/ggml/commitdiff
gpt-2 : fix broken prompt due to recent experiments
authorGeorgi Gerganov <redacted>
Sun, 8 Jan 2023 18:28:38 +0000 (20:28 +0200)
committerGeorgi Gerganov <redacted>
Sun, 8 Jan 2023 18:28:38 +0000 (20:28 +0200)
No idea why I commited that!?

examples/gpt-2/main.cpp

index 6507ec2e2f62df0fe4469da617c84eacf970dec3..333d93b8b5e62cc6da13379eb7ba1502bb2f429c 100644 (file)
@@ -496,7 +496,6 @@ bool gpt2_eval(
                         ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
                         );
 
-#if 0
             // KQ_masked = mask_past(KQ_scaled)
             // [n_past + N, N, 12]
             struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
@@ -504,15 +503,6 @@ bool gpt2_eval(
             // KQ = soft_max(KQ_masked)
             // [n_past + N, N, 12]
             struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
-#else
-            // KQ_masked = mask_past(KQ_scaled)
-            // [n_past + N, N, 12]
-            //struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
-
-            // KQ = soft_max(KQ_masked)
-            // [n_past + N, N, 12]
-            struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_scaled);
-#endif
 
             // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
             // [n_past + N, 64, 12]