From 2ae0e985b3ec9e4e12f9ae793107a9cca74af957 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Tue, 15 Aug 2023 19:55:13 +0200 Subject: [PATCH] convert-llama-7b-pth-to-gguf.py : add tensor data layout --- convert-llama-7b-pth-to-gguf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 96d2e0800..53ab5a3ed 100644 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -96,6 +96,7 @@ gguf_writer.add_architecture(llm_arch) gguf_writer.add_name(last_dir) gguf_writer.add_file_type( "All tensors F32" if ftype == 0 else "Most tensors F16, some F32") gguf_writer.add_source_hf_repo(hf_repo) +gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth") gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"]) gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"]) gguf_writer.add_block_count(llm_arch, block_count)