model_arch = gguf.MODEL_ARCH.PHI3
def set_vocab(self):
+ # Phi-4 model uses GPT2Tokenizer
+ tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
+ if tokenizer_config_file.is_file():
+ with open(tokenizer_config_file, "r", encoding="utf-8") as f:
+ tokenizer_config_json = json.load(f)
+ tokenizer_class = tokenizer_config_json['tokenizer_class']
+ if tokenizer_class == 'GPT2Tokenizer':
+ return self._set_vocab_gpt2()
+
from sentencepiece import SentencePieceProcessor
tokenizer_path = self.dir_model / 'tokenizer.model'
self.gguf_writer.add_rope_dimension_count(rope_dims)
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
self.gguf_writer.add_file_type(self.ftype)
- self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
+ sliding_window = self.hparams.get("sliding_window")
+ # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models
+ if sliding_window is None:
+ sliding_window = 0
+ self.gguf_writer.add_sliding_window(sliding_window)
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
n_embd = self.find_hparam(["hidden_size", "n_embd"])
struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
+ struct ggml_tensor * KQ_mask = nullptr;
+ if (hparams.n_swa == 0) {
+ // Phi-4 doesn't use sliding window attention
+ KQ_mask = build_inp_KQ_mask();
+ } else {
+ KQ_mask = build_inp_KQ_mask_swa();
+ }
for (int il = 0; il < n_layer; ++il) {
auto residual = inpL;
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
- Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
}
if (il == n_layer - 1) {