From 210eb9ed0ac3979a93e37568d96447ec3e95ad05 Mon Sep 17 00:00:00 2001 From: standby24x7 Date: Sun, 7 Jul 2024 19:37:47 +0900 Subject: [PATCH] finetune: Rename an old command name in finetune.sh (#8344) This patch replaces an old commad "main" with "llama-cli" in finetune.sh. The part that I fixed is comment, so it doesn't change the script. Signed-off-by: Masanari Iida --- examples/finetune/finetune.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/finetune/finetune.sh b/examples/finetune/finetune.sh index d7f2165e5..e3cc7f271 100644 --- a/examples/finetune/finetune.sh +++ b/examples/finetune/finetune.sh @@ -8,7 +8,7 @@ if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi # MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses. -MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "main --lora" with GPU inferencing. +MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "llama-cli --lora" with GPU inferencing. while getopts "dg" opt; do case $opt in