From: Daniel Bevenius Date: Fri, 9 Aug 2024 06:32:23 +0000 (+0200) Subject: llama : fix typo in llama_tensor_get_type comment [no ci] (#8937) X-Git-Tag: upstream/0.0.4488~934 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=6f6496bb0999d1bce5daff0cfc55ceb0dd13c888;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix typo in llama_tensor_get_type comment [no ci] (#8937) --- diff --git a/src/llama.cpp b/src/llama.cpp index 68512d2e..be6dbf88 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -15304,7 +15304,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n const int n_expert = std::max(1, (int)qs.model.hparams.n_expert); auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) { if (n_expert > 1) { - // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly + // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work // for getting the current layer as I initially thought, and we need to resort to parsing the // tensor name.