From fc0c8d286a533363a9a663510b62af85ffad58b3 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Sun, 18 Feb 2024 17:19:23 +0100 Subject: [PATCH] llava : update surgery script to not remove tensors (#5536) This commit updates the surgery script to not remove the tensors from the model file. For this to work the `--skip-unknown` flag is added as an argument to the convert.py script in README.md. The motivation for this change is that the surgery script currently removes the projector tensors from the model file. If the model was checked out from a repository, the model file will have been updated and have to be checked out again to reset this effect. If this can be avoided I think it would be preferable. I did not perform this change for BakLLaVA models as I am not sure how that part works. --- examples/llava/README.md | 2 +- examples/llava/llava-surgery.py | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/examples/llava/README.md b/examples/llava/README.md index 57eb42932..e42db6e5a 100644 --- a/examples/llava/README.md +++ b/examples/llava/README.md @@ -53,7 +53,7 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-pa 5. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF: ```sh -python ./convert.py ../llava-v1.5-7b +python ./convert.py ../llava-v1.5-7b --skip-unknown ``` Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` directory. diff --git a/examples/llava/llava-surgery.py b/examples/llava/llava-surgery.py index 0a61efdfe..8b7a62fba 100644 --- a/examples/llava/llava-surgery.py +++ b/examples/llava/llava-surgery.py @@ -19,10 +19,6 @@ mm_tensors = [k for k, v in checkpoint.items() if k.startswith("model.mm_project projector = {name: checkpoint[name].float() for name in mm_tensors} torch.save(projector, f"{args.model}/llava.projector") -# remove these tensors from the checkpoint and save it again -for name in mm_tensors: - del checkpoint[name] - # BakLLaVA models contain CLIP tensors in it clip_tensors = [k for k, v in checkpoint.items() if k.startswith("model.vision_tower")] if len(clip_tensors) > 0: @@ -39,7 +35,7 @@ if len(clip_tensors) > 0: f.write("{}\n") -torch.save(checkpoint, path) + torch.save(checkpoint, path) print("Done!") print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")