-#/bin/bash
+#!/bin/bash
#
# sample usage:
#
model_f16="${path_models}/ggml-model-f16.gguf"
- (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?</s><s>hi\nwhat is panda?</s><s>it's a bear\nwhat is panda?</s><s>The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
+ # for this model, the SEP token is "</s>"
+ (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?</s></s>hi\nwhat is panda?</s></s>it's a bear\nwhat is panda?</s></s>The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." --pooling rank --embd-normalize -1 --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
# sample output
# rerank score 0: 0.029
check_score "rerank score 0" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 0")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
check_score "rerank score 1" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 1")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
- check_score "rerank score 2" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 2")" "0.10" "0.15" | tee -a $OUT/${ci}-rk-f16.log
+ check_score "rerank score 2" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 2")" "0.10" "0.30" | tee -a $OUT/${ci}-rk-f16.log
set +e
}
return iparams;
}
+ if (params.reranking) {
+ bool ok = true;
+
+ if (llama_token_bos(model) == LLAMA_TOKEN_NULL) {
+ LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__);
+ ok = false;
+ }
+
+ if (llama_token_eos(model) == LLAMA_TOKEN_NULL) {
+ LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__);
+ ok = false;
+ }
+
+ if (llama_token_sep(model) == LLAMA_TOKEN_NULL) {
+ LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__);
+ ok = false;
+ }
+
+ if (!ok) {
+ llama_free_model(model);
+
+ return iparams;
+ }
+ }
+
auto cparams = llama_context_params_from_gpt_params(params);
llama_context * lctx = llama_new_context_with_model(model, cparams);
if (cvec.n_embd == -1) {
llama_free(lctx);
llama_free_model(model);
+
return iparams;
}
if (err) {
llama_free(lctx);
llama_free_model(model);
+
return iparams;
}
}
llama_lora_adapters_apply(lctx, iparams.lora_adapters);
}
- if (params.sparams.ignore_eos && llama_token_eos(model) == -1) {
+ if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__);
params.sparams.ignore_eos = false;
}
iparams.model = model;
iparams.context = lctx;
+
return iparams;
}
continue;
}
- // prompt: <s>query</s><s>doc</s>
+ // prompt: [BOS]query[EOS][SEP]doc[EOS]
prompt_tokens.clear();
prompt_tokens.push_back(llama_token_bos(model));
{
prompt_tokens.insert(prompt_tokens.end(), part.begin(), part.end());
}
prompt_tokens.push_back(llama_token_eos(model));
- prompt_tokens.push_back(llama_token_bos(model));
+ prompt_tokens.push_back(llama_token_sep(model));
{
const auto part = tokenize(slot.prompt[1], false);
prompt_tokens.insert(prompt_tokens.end(), part.begin(), part.end());
id special_bos_id = 1;
id special_eos_id = 2;
id special_unk_id = 0;
- id special_sep_id = -1;
- id special_pad_id = -1;
- id special_cls_id = -1;
- id special_mask_id = -1;
+ id special_sep_id = LLAMA_TOKEN_NULL;
+ id special_pad_id = LLAMA_TOKEN_NULL;
+ id special_cls_id = LLAMA_TOKEN_NULL;
+ id special_mask_id = LLAMA_TOKEN_NULL;
id linefeed_id = 13;
- id special_prefix_id = -1;
- id special_suffix_id = -1;
- id special_middle_id = -1;
- id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token
- id special_eom_id = -1;
+ id special_prefix_id = LLAMA_TOKEN_NULL;
+ id special_suffix_id = LLAMA_TOKEN_NULL;
+ id special_middle_id = LLAMA_TOKEN_NULL;
+ id special_eot_id = LLAMA_TOKEN_NULL; // TODO: move above after "eos_id", and here add "file separator" token
+ id special_eom_id = LLAMA_TOKEN_NULL;
// set of all tokens that cause "end of generation"
std::set<id> special_eog_ids;
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
- llama_token dec_start_token_id = -1;
+ llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;