From b8ff85efe0bc85d9f2747692ca54b3d0a1b63372 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Mon, 12 Feb 2024 16:47:00 -0500 Subject: [PATCH] convert : pad vocab size to multiple of 64, not 8 --- convert-hf-to-gguf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 257761949..e50d2e80a 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1763,9 +1763,7 @@ class NomicBertModel(BertModel): for name, data in super().get_tensors(): # Nomic Embed's token embeddings tensor is padded, but llama.cpp wants tensor sizes to match exactly. if name == 'embeddings.word_embeddings.weight' and data.shape[1] != self.vocab_size: - rounded_vocab_size = (self.vocab_size + 7) // 8 * 8 - print(data.shape) - print(rounded_vocab_size, self.hparams["n_embd"]) + rounded_vocab_size = (self.vocab_size + 63) // 64 * 64 assert data.shape == (rounded_vocab_size, self.hparams["n_embd"]) data = data[:self.vocab_size, :] yield name, data