convert : fix set_vocab_sentencepiece (#6866)

* convert : fix set_vocab_sentencepiece

* Update convert-hf-to-gguf.py
This commit is contained in:
Georgi Gerganov 2024-05-18 08:46:20 +03:00 committed by GitHub
parent 05834841dc
commit b49a13dd2f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -573,6 +573,10 @@ class Model:
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
scores: list[float] = [-10000.0] * vocab_size
toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
for token_id in range(tokenizer.vocab_size()): for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id) piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8") text = piece.encode("utf-8")
@ -588,21 +592,23 @@ class Model:
elif tokenizer.IsByte(token_id): elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE toktype = SentencePieceTokenTypes.BYTE
tokens.append(text) tokens[token_id] = text
scores.append(score) scores[token_id] = score
toktypes.append(toktype) toktypes[token_id] = toktype
added_tokens_file = self.dir_model / 'added_tokens.json' added_tokens_file = self.dir_model / 'added_tokens.json'
if added_tokens_file.is_file(): if added_tokens_file.is_file():
with open(added_tokens_file, "r", encoding="utf-8") as f: with open(added_tokens_file, "r", encoding="utf-8") as f:
added_tokens_json = json.load(f) added_tokens_json = json.load(f)
for key in added_tokens_json: for key in added_tokens_json:
key = key.encode("utf-8") token_id = added_tokens_json[key]
if key not in tokens: if (token_id >= vocab_size):
tokens.append(key) logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
scores.append(-1000.0) continue
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
tokens[token_id] = key.encode("utf-8")
scores[token_id] = -1000.0
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
if vocab_size > len(tokens): if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens) pad_count = vocab_size - len(tokens)
@ -612,8 +618,6 @@ class Model:
scores.append(-1000.0) scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.UNUSED) toktypes.append(SentencePieceTokenTypes.UNUSED)
assert len(tokens) == vocab_size
self.gguf_writer.add_tokenizer_model("llama") self.gguf_writer.add_tokenizer_model("llama")
self.gguf_writer.add_tokenizer_pre("default") self.gguf_writer.add_tokenizer_pre("default")
self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_list(tokens)