import struct
import numpy as np
import torch
-
from sentencepiece import SentencePieceProcessor
if len(sys.argv) < 3:
# Is this correct??
for i in range(32000):
- # TODO: this is probably wrong - not sure how this tokenizer works
- text = tokenizer.decode([29889, i]).encode('utf-8')
- # remove the first byte (it's always '.')
- text = text[1:]
- fout.write(struct.pack("i", len(text)))
- fout.write(text)
+ if tokenizer.is_unknown(i):
+ # "<unk>" token (translated as ??)
+ text = " \u2047 ".encode("utf-8")
+ fout.write(struct.pack("i", len(text)))
+ fout.write(text)
+ elif tokenizer.is_control(i):
+ # "<s>"/"</s>" tokens
+ fout.write(struct.pack("i", 0))
+ elif tokenizer.is_byte(i):
+ # "<U+XX>" tokens (which may be invalid UTF-8)
+ piece = tokenizer.id_to_piece(i)
+ if len(piece) != 6:
+ print("Invalid token: " + piece)
+ sys.exit(1)
+ byte_value = int(piece[3:-1], 16)
+ fout.write(struct.pack("i", 1))
+ fout.write(struct.pack("B", byte_value))
+ else:
+ # normal token. Uses U+2581 (LOWER ONE EIGHTH BLOCK) to represent spaces.
+ text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
+ fout.write(struct.pack("i", len(text)))
+ fout.write(text)
for k, v in model.items():
name = k
break;
}
}
+
+ // reset color to default if we there is no pending user input
+ if (!input_noecho && params.use_color && embd_inp.size() == input_consumed) {
+ printf(ANSI_COLOR_RESET);
+ }
}
// display text
for (auto id : embd) {
printf("%s", vocab.id_to_token[id].c_str());
}
- // reset color to default if we there is no pending user input
- if (params.use_color && embd_inp.size() <= input_consumed) {
- printf(ANSI_COLOR_RESET);
- }
fflush(stdout);
}