Fix: `sentencepiece` tokenizers with added tokens failed with an incorrect assertion
buf[0] = llama_token_to_byte(model->vocab, token);
return 1;
} else {
- GGML_ASSERT(false);
+ // TODO: for now we accept all unsupported token types,
+ // suppressing them like CONTROL tokens.
+ // GGML_ASSERT(false);
}
break;
}
} else if (llama_is_control_token(model->vocab, token)) {
;
} else {
- GGML_ASSERT(false);
+ // TODO: for now we accept all unsupported token types,
+ // suppressing them like CONTROL tokens.
+ // GGML_ASSERT(false);
}
break;
}