mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
infill : assert prefix/suffix tokens + remove old space logic (#8351)
This commit is contained in:
parent
ffd00797d8
commit
6f0dbf6ab0
@ -204,21 +204,17 @@ int main(int argc, char ** argv) {
|
|||||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||||
LOG("add_bos: %d\n", add_bos);
|
LOG("add_bos: %d\n", add_bos);
|
||||||
|
|
||||||
bool suff_rm_leading_spc = params.escape;
|
|
||||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
std::vector<llama_token> embd_inp;
|
std::vector<llama_token> embd_inp;
|
||||||
std::vector<llama_token> embd_end;
|
std::vector<llama_token> embd_end;
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||||
const int space_token = 29871;
|
|
||||||
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
|
GGML_ASSERT(llama_token_prefix(model) >= 0);
|
||||||
inp_sfx.erase(inp_sfx.begin());
|
GGML_ASSERT(llama_token_suffix(model) >= 0);
|
||||||
}
|
|
||||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||||
|
|
||||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||||
if (add_bos) {
|
if (add_bos) {
|
||||||
@ -516,19 +512,14 @@ int main(int argc, char ** argv) {
|
|||||||
string_process_escapes(params.input_prefix);
|
string_process_escapes(params.input_prefix);
|
||||||
string_process_escapes(params.input_suffix);
|
string_process_escapes(params.input_suffix);
|
||||||
}
|
}
|
||||||
suff_rm_leading_spc = params.escape;
|
|
||||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
// tokenize new prefix and suffix
|
// tokenize new prefix and suffix
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||||
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
|
|
||||||
inp_sfx.erase(inp_sfx.begin());
|
|
||||||
}
|
|
||||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||||
|
|
||||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||||
if (add_bos) {
|
if (add_bos) {
|
||||||
|
Loading…
Reference in New Issue
Block a user