From 65b0d8ba4a1ddbd637fa90847f0b7d7334703a70 Mon Sep 17 00:00:00 2001 From: Mason M Date: Fri, 24 Jan 2025 12:46:57 -0400 Subject: [PATCH] Replace main-cmake-pkg with simple-cmake-pkg --- examples/main-cmake-pkg/CMakeLists.txt | 32 ---------------- examples/main-cmake-pkg/README.md | 31 --------------- .../.gitignore | 0 examples/simple-cmake-pkg/CMakeLists.txt | 11 ++++++ examples/simple-cmake-pkg/README.md | 38 +++++++++++++++++++ ggml/CMakeLists.txt | 8 ++-- ggml/cmake/ggml-config.cmake.in | 9 +++-- 7 files changed, 59 insertions(+), 70 deletions(-) delete mode 100644 examples/main-cmake-pkg/CMakeLists.txt delete mode 100644 examples/main-cmake-pkg/README.md rename examples/{main-cmake-pkg => simple-cmake-pkg}/.gitignore (100%) create mode 100644 examples/simple-cmake-pkg/CMakeLists.txt create mode 100644 examples/simple-cmake-pkg/README.md diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/examples/main-cmake-pkg/CMakeLists.txt deleted file mode 100644 index 5563f4de0..000000000 --- a/examples/main-cmake-pkg/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 3.12) -project("llama-cli-cmake-pkg" C CXX) -set(TARGET llama-cli-cmake-pkg) - -find_package(Llama 0.0.1 REQUIRED) - -# Bake common functionality in with target. Because applications -# using the relocatable Llama package should be outside of the -# source tree, llama-cli-cmake-pkg pretends the dependencies are built-in. -set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common") -add_library(common OBJECT) -file(GLOB _common_files - "${_common_path}/*.h" - "${_common_path}/*.cpp" -) -target_sources(common PRIVATE ${_common_files}) - -# If the common project was part of "llama-cli-cmake-pkg" the transient -# defines would automatically be attached. Because the common func- -# tionality is separate, but dependent upon the defines, it must be -# explicitly extracted from the "llama" target. -# -get_target_property(_llama_transient_defines llama - INTERFACE_COMPILE_DEFINITIONS) - -target_compile_definitions(common PRIVATE "${_llama_transient_defines}") - -add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../main/main.cpp) -target_include_directories(${TARGET} PRIVATE ${_common_path}) -install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/main-cmake-pkg/README.md b/examples/main-cmake-pkg/README.md deleted file mode 100644 index 08d83dd08..000000000 --- a/examples/main-cmake-pkg/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# llama.cpp/example/main-cmake-pkg - -This program builds [llama-cli](../main) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree. - -## Building - -Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions. - -### Considerations - -When hardware acceleration libraries are used (e.g. CUDA, Metal, etc.), CMake must be able to locate the associated CMake package. - -### Build llama.cpp and install to C:\LlamaCPP directory - -```cmd -git clone https://github.com/ggerganov/llama.cpp -cd llama.cpp -cmake -B build -DBUILD_SHARED_LIBS=OFF -G "Visual Studio 17 2022" -A x64 -cmake --build build --config Release -cmake --install build --prefix C:/LlamaCPP -``` - -### Build llama-cli-cmake-pkg - - -```cmd -cd ..\examples\main-cmake-pkg -cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64 -cmake --build build --config Release -cmake --install build --prefix C:/MyLlamaApp -``` diff --git a/examples/main-cmake-pkg/.gitignore b/examples/simple-cmake-pkg/.gitignore similarity index 100% rename from examples/main-cmake-pkg/.gitignore rename to examples/simple-cmake-pkg/.gitignore diff --git a/examples/simple-cmake-pkg/CMakeLists.txt b/examples/simple-cmake-pkg/CMakeLists.txt new file mode 100644 index 000000000..128e38c8f --- /dev/null +++ b/examples/simple-cmake-pkg/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.12) +project(llama-simple-cmake-pkg) + +set(TARGET llama-simple-cmake-pkg) + +find_package(Llama REQUIRED) + +add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../simple/simple.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE llama ggml::all ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/simple-cmake-pkg/README.md b/examples/simple-cmake-pkg/README.md new file mode 100644 index 000000000..26c93d9e3 --- /dev/null +++ b/examples/simple-cmake-pkg/README.md @@ -0,0 +1,38 @@ +# llama.cpp/example/simple-cmake-pkg + +This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree. + +## Building + +Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions. + +### Considerations + +When hardware acceleration libraries are used (e.g. CUDA, Metal, Vulkan, etc.), the appropriate dependencies will be searched for automatically. So, for example, when finding a package + +### Build llama.cpp and install to llama.cpp/inst + +```sh +git clone https://github.com/ggerganov/llama.cpp +cd llama.cpp +mkdir build +mkdir inst +cmake -S . -B build +cmake --build build +cmake --install build --prefix inst +``` + +### Build simple-cmake-pkg + +```sh +cd examples\simple-cmake-pkg +mkdir build +cmake -S . -B build -DCMAKE_PREFIX_PATH=../../inst/lib/cmake +cmake --build build +``` + +### Run simple-cmake-pkg + +```sh +./build/llama-simple-cmake-pkg -m ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" +``` diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 17abdfede..1b5478918 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -269,7 +269,7 @@ endif() # Create CMake package # -# * Generate version info based on git commit. +# Generate version info based on git commit. find_program(GIT_EXE NAMES git git.exe REQUIRED NO_CMAKE_FIND_ROOT_PATH) execute_process(COMMAND ${GIT_EXE} rev-list --count HEAD @@ -288,7 +288,7 @@ execute_process(COMMAND ${GIT_EXE} rev-parse --short HEAD OUTPUT_STRIP_TRAILING_WHITESPACE ) -# * Capture variables prefixed with GGML_. +# Capture variables prefixed with GGML_. set(variable_set_statements " @@ -297,6 +297,8 @@ set(variable_set_statements ") +set(GGML_SHARED_LIB ${BUILD_SHARED_LIBS}) + get_cmake_property(all_variables VARIABLES) foreach(variable_name IN LISTS all_variables) if(variable_name MATCHES "^GGML_") @@ -310,7 +312,7 @@ endforeach() set(GGML_VARIABLES_EXPANDED ${variable_set_statements}) -# * Create the CMake package and set install location. +# Create the CMake package and set install location. set(GGML_INSTALL_VERSION 0.0.${GGML_BUILD_NUMBER}) set(GGML_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") diff --git a/ggml/cmake/ggml-config.cmake.in b/ggml/cmake/ggml-config.cmake.in index c0e95aa2f..18b9ca828 100644 --- a/ggml/cmake/ggml-config.cmake.in +++ b/ggml/cmake/ggml-config.cmake.in @@ -17,7 +17,7 @@ find_library(GGML_LIBRARY ggml add_library(ggml::ggml UNKNOWN IMPORTED) set_target_properties(ggml::ggml PROPERTIES - INTERFACE_LINK_LIBRARIES "${GGML_LIBRARY}") + IMPORTED_LOCATION "${GGML_LIBRARY}") find_library(GGML_BASE_LIBRARY ggml-base REQUIRED @@ -27,7 +27,7 @@ find_library(GGML_BASE_LIBRARY ggml-base add_library(ggml::ggml-base UNKNOWN IMPORTED) set_target_properties(ggml::ggml-base PROPERTIES - INTERFACE_LINK_LIBRARIES "${GGML_BASE_LIBRARY}") + IMPORTED_LOCATION "${GGML_BASE_LIBRARY}") if (NOT GGML_SHARED_LIB) if (APPLE AND GGML_ACCELERATE) @@ -91,7 +91,8 @@ endif() set(_ggml_all_targets "") foreach(_ggml_backend ${GGML_AVAILABLE_BACKENDS}) - string(replace "-" "_" _ggml_backend_pfx "${_ggml_backend}") + string(REPLACE "-" "_" _ggml_backend_pfx "${_ggml_backend}") + string(TOUPPER "${_ggml_backend_pfx}" _ggml_backend_pfx) find_library(${_ggml_backend_pfx}_LIBRARY ${_ggml_backend} REQUIRED @@ -105,7 +106,7 @@ foreach(_ggml_backend ${GGML_AVAILABLE_BACKENDS}) PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${GGML_INCLUDE_DIR}" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" - IMPORTED_LOCATION "${_ggml_backend_pfx}_LIBRARY" + IMPORTED_LOCATION "${${_ggml_backend_pfx}_LIBRARY}" INTERFACE_COMPILE_FEATURES cxx_std_17 POSITION_INDEPENDENT_CODE ON)