Skip to content

Commit 6f0dbf6

Browse files
authored
infill : assert prefix/suffix tokens + remove old space logic (#8351)
1 parent ffd0079 commit 6f0dbf6

File tree

2 files changed

+9
-18
lines changed

2 files changed

+9
-18
lines changed

common/log.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,7 @@ inline std::string LOG_TOKENS_TOSTR_PRETTY(const C & ctx, const T & tokens)
630630
buf << "[ ";
631631

632632
bool first = true;
633-
for (const auto &token : tokens)
633+
for (const auto & token : tokens)
634634
{
635635
if (!first) {
636636
buf << ", ";

examples/infill/infill.cpp

Lines changed: 8 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -204,21 +204,17 @@ int main(int argc, char ** argv) {
204204
GGML_ASSERT(llama_add_eos_token(model) != 1);
205205
LOG("add_bos: %d\n", add_bos);
206206

207-
bool suff_rm_leading_spc = params.escape;
208-
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
209-
params.input_suffix.erase(0, 1);
210-
suff_rm_leading_spc = false;
211-
}
212207
std::vector<llama_token> embd_inp;
213208
std::vector<llama_token> embd_end;
214209
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
215210
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
216-
const int space_token = 29871;
217-
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
218-
inp_sfx.erase(inp_sfx.begin());
219-
}
211+
212+
GGML_ASSERT(llama_token_prefix(model) >= 0);
213+
GGML_ASSERT(llama_token_suffix(model) >= 0);
214+
220215
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
221216
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
217+
222218
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
223219
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
224220
if (add_bos) {
@@ -516,19 +512,14 @@ int main(int argc, char ** argv) {
516512
string_process_escapes(params.input_prefix);
517513
string_process_escapes(params.input_suffix);
518514
}
519-
suff_rm_leading_spc = params.escape;
520-
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
521-
params.input_suffix.erase(0, 1);
522-
suff_rm_leading_spc = false;
523-
}
515+
524516
// tokenize new prefix and suffix
525517
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
526518
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
527-
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
528-
inp_sfx.erase(inp_sfx.begin());
529-
}
519+
530520
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
531521
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
522+
532523
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
533524
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
534525
if (add_bos) {

0 commit comments

Comments
 (0)