mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
938943cdbf
* llama : move sampling code into llama-sampling ggml-ci * llama : move grammar code into llama-grammar ggml-ci * cont ggml-ci * cont : pre-fetch rules * cont ggml-ci * llama : deprecate llama_sample_grammar * llama : move tokenizers into llama-vocab ggml-ci * make : update llama.cpp deps [no ci] * llama : redirect external API to internal APIs ggml-ci * llama : suffix the internal APIs with "_impl" ggml-ci * llama : clean-up
34 lines
749 B
CMake
34 lines
749 B
CMake
# TODO: should not use this
|
|
if (WIN32)
|
|
if (BUILD_SHARED_LIBS)
|
|
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
|
endif()
|
|
endif()
|
|
|
|
#
|
|
# libraries
|
|
#
|
|
|
|
# llama
|
|
|
|
add_library(llama
|
|
../include/llama.h
|
|
llama.cpp
|
|
llama-vocab.cpp
|
|
llama-grammar.cpp
|
|
llama-sampling.cpp
|
|
unicode.h
|
|
unicode.cpp
|
|
unicode-data.cpp
|
|
)
|
|
|
|
target_include_directories(llama PUBLIC . ../include)
|
|
target_compile_features (llama PUBLIC cxx_std_11) # don't bump
|
|
|
|
target_link_libraries(llama PUBLIC ggml)
|
|
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
endif()
|