]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix typo in llama_tensor_get_type comment [no ci] (#8937)
authorDaniel Bevenius <redacted>
Fri, 9 Aug 2024 06:32:23 +0000 (08:32 +0200)
committerGitHub <redacted>
Fri, 9 Aug 2024 06:32:23 +0000 (09:32 +0300)
src/llama.cpp

index 68512d2ef70bbec26c8b6ee12653e188b62db073..be6dbf88a779047f60d9b96f3a40edb38422a220 100644 (file)
@@ -15304,7 +15304,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
     const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
     auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
         if (n_expert > 1) {
-            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
+            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
             // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
             // for getting the current layer as I initially thought, and we need to resort to parsing the
             // tensor name.