mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
1c641e6aac
* `main`/`server`: rename to `llama` / `llama-server` for consistency w/ homebrew
* server: update refs -> llama-server
gitignore llama-server
* server: simplify nix package
* main: update refs -> llama
fix examples/main ref
* main/server: fix targets
* update more names
* Update build.yml
* rm accidentally checked in bins
* update straggling refs
* Update .gitignore
* Update server-llm.sh
* main: target name -> llama-cli
* Prefix all example bins w/ llama-
* fix main refs
* rename {main->llama}-cmake-pkg binary
* prefix more cmake targets w/ llama-
* add/fix gbnf-validator subfolder to cmake
* sort cmake example subdirs
* rm bin files
* fix llama-lookup-* Makefile rules
* gitignore /llama-*
* rename Dockerfiles
* rename llama|main -> llama-cli; consistent RPM bin prefixes
* fix some missing -cli suffixes
* rename dockerfile w/ llama-cli
* rename(make): llama-baby-llama
* update dockerfile refs
* more llama-cli(.exe)
* fix test-eval-callback
* rename: llama-cli-cmake-pkg(.exe)
* address gbnf-validator unused fread warning (switched to C++ / ifstream)
* add two missing llama- prefixes
* Updating docs for eval-callback binary to use new `llama-` prefix.
* Updating a few lingering doc references for rename of main to llama-cli
* Updating `run-with-preset.py` to use new binary names.
Updating docs around `perplexity` binary rename.
* Updating documentation references for lookup-merge and export-lora
* Updating two small `main` references missed earlier in the finetune docs.
* Update apps.nix
* update grammar/README.md w/ new llama-* names
* update llama-rpc-server bin name + doc
* Revert "update llama-rpc-server bin name + doc"
This reverts commit e474ef1df4
.
* add hot topic notice to README.md
* Update README.md
* Update README.md
* rename gguf-split & quantize bins refs in **/tests.sh
---------
Co-authored-by: HanClinto <hanclinto@gmail.com>
56 lines
1.3 KiB
CMake
56 lines
1.3 KiB
CMake
# dependencies
|
|
|
|
find_package(Threads REQUIRED)
|
|
|
|
# third-party
|
|
|
|
# ...
|
|
|
|
# examples
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
if (EMSCRIPTEN)
|
|
else()
|
|
add_subdirectory(baby-llama)
|
|
add_subdirectory(batched-bench)
|
|
add_subdirectory(batched)
|
|
add_subdirectory(benchmark)
|
|
add_subdirectory(convert-llama2c-to-ggml)
|
|
add_subdirectory(embedding)
|
|
add_subdirectory(eval-callback)
|
|
add_subdirectory(export-lora)
|
|
add_subdirectory(finetune)
|
|
add_subdirectory(gbnf-validator)
|
|
add_subdirectory(gguf-split)
|
|
add_subdirectory(gguf)
|
|
add_subdirectory(gritlm)
|
|
add_subdirectory(imatrix)
|
|
add_subdirectory(infill)
|
|
add_subdirectory(llama-bench)
|
|
add_subdirectory(llava)
|
|
add_subdirectory(lookahead)
|
|
add_subdirectory(lookup)
|
|
add_subdirectory(main)
|
|
add_subdirectory(parallel)
|
|
add_subdirectory(passkey)
|
|
add_subdirectory(perplexity)
|
|
add_subdirectory(quantize-stats)
|
|
add_subdirectory(quantize)
|
|
add_subdirectory(retrieval)
|
|
if (LLAMA_RPC)
|
|
add_subdirectory(rpc)
|
|
endif()
|
|
if (LLAMA_BUILD_SERVER)
|
|
add_subdirectory(server)
|
|
endif()
|
|
if (LLAMA_SYCL)
|
|
add_subdirectory(sycl)
|
|
endif()
|
|
add_subdirectory(save-load-state)
|
|
add_subdirectory(simple)
|
|
add_subdirectory(speculative)
|
|
add_subdirectory(tokenize)
|
|
add_subdirectory(train-text-from-scratch)
|
|
endif()
|