mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-10-31 07:00:16 +01:00
1c641e6aac
* `main`/`server`: rename to `llama` / `llama-server` for consistency w/ homebrew
* server: update refs -> llama-server
gitignore llama-server
* server: simplify nix package
* main: update refs -> llama
fix examples/main ref
* main/server: fix targets
* update more names
* Update build.yml
* rm accidentally checked in bins
* update straggling refs
* Update .gitignore
* Update server-llm.sh
* main: target name -> llama-cli
* Prefix all example bins w/ llama-
* fix main refs
* rename {main->llama}-cmake-pkg binary
* prefix more cmake targets w/ llama-
* add/fix gbnf-validator subfolder to cmake
* sort cmake example subdirs
* rm bin files
* fix llama-lookup-* Makefile rules
* gitignore /llama-*
* rename Dockerfiles
* rename llama|main -> llama-cli; consistent RPM bin prefixes
* fix some missing -cli suffixes
* rename dockerfile w/ llama-cli
* rename(make): llama-baby-llama
* update dockerfile refs
* more llama-cli(.exe)
* fix test-eval-callback
* rename: llama-cli-cmake-pkg(.exe)
* address gbnf-validator unused fread warning (switched to C++ / ifstream)
* add two missing llama- prefixes
* Updating docs for eval-callback binary to use new `llama-` prefix.
* Updating a few lingering doc references for rename of main to llama-cli
* Updating `run-with-preset.py` to use new binary names.
Updating docs around `perplexity` binary rename.
* Updating documentation references for lookup-merge and export-lora
* Updating two small `main` references missed earlier in the finetune docs.
* Update apps.nix
* update grammar/README.md w/ new llama-* names
* update llama-rpc-server bin name + doc
* Revert "update llama-rpc-server bin name + doc"
This reverts commit e474ef1df4
.
* add hot topic notice to README.md
* Update README.md
* Update README.md
* rename gguf-split & quantize bins refs in **/tests.sh
---------
Co-authored-by: HanClinto <hanclinto@gmail.com>
90 lines
2.1 KiB
Bash
Executable File
90 lines
2.1 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
set -eu
|
|
|
|
if [ $# -lt 1 ]
|
|
then
|
|
echo "usage: $0 path_to_build_binary [path_to_temp_folder]"
|
|
echo "example: $0 ../../build/bin ../../tmp"
|
|
exit 1
|
|
fi
|
|
|
|
if [ $# -gt 1 ]
|
|
then
|
|
TMP_DIR=$2
|
|
else
|
|
TMP_DIR=/tmp
|
|
fi
|
|
|
|
set -x
|
|
|
|
SPLIT=$1/llama-gguf-split
|
|
MAIN=$1/llama-cli
|
|
WORK_PATH=$TMP_DIR/gguf-split
|
|
ROOT_DIR=$(realpath $(dirname $0)/../../)
|
|
|
|
mkdir -p "$WORK_PATH"
|
|
|
|
# Clean up in case of previously failed test
|
|
rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-merge*.gguf
|
|
|
|
# 1. Get a model
|
|
(
|
|
cd $WORK_PATH
|
|
"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
|
|
)
|
|
echo PASS
|
|
|
|
# 2. Split with max tensors strategy
|
|
$SPLIT --split-max-tensors 28 $WORK_PATH/gemma-1.1-2b-it.Q8_0.gguf $WORK_PATH/ggml-model-split
|
|
echo PASS
|
|
echo
|
|
|
|
# 2b. Test the sharded model is loading properly
|
|
$MAIN --model $WORK_PATH/ggml-model-split-00001-of-00006.gguf --n-predict 32
|
|
echo PASS
|
|
echo
|
|
|
|
# 3. Merge
|
|
$SPLIT --merge $WORK_PATH/ggml-model-split-00001-of-00006.gguf $WORK_PATH/ggml-model-merge.gguf
|
|
echo PASS
|
|
echo
|
|
|
|
# 3b. Test the merged model is loading properly
|
|
$MAIN --model $WORK_PATH/ggml-model-merge.gguf --n-predict 32
|
|
echo PASS
|
|
echo
|
|
|
|
# 4. Split with no tensors in the first split
|
|
$SPLIT --split-max-tensors 32 --no-tensor-first-split $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-32-tensors
|
|
echo PASS
|
|
echo
|
|
|
|
# 4b. Test the sharded model is loading properly
|
|
$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --n-predict 32
|
|
echo PASS
|
|
echo
|
|
|
|
# 5. Merge
|
|
#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf $WORK_PATH/ggml-model-merge-2.gguf
|
|
#echo PASS
|
|
#echo
|
|
|
|
# 5b. Test the merged model is loading properly
|
|
#$MAIN --model $WORK_PATH/ggml-model-merge-2.gguf --n-predict 32
|
|
#echo PASS
|
|
#echo
|
|
|
|
# 6. Split with size strategy
|
|
$SPLIT --split-max-size 2G $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-2G
|
|
echo PASS
|
|
echo
|
|
|
|
# 6b. Test the sharded model is loading properly
|
|
$MAIN --model $WORK_PATH/ggml-model-split-2G-00001-of-00002.gguf --n-predict 32
|
|
echo PASS
|
|
echo
|
|
|
|
# Clean up
|
|
rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-merge*.gguf
|