mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
dd7eff57d8
* Sample interface, new samplers. New samplers: - locally typical sampling - tail free sampling - frequency and presence penalty - mirostat Ignore EOS fix: -inf should be used. * mirostat * Added --logit-bias and --no-penalize-nl, removed std::span * Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k) Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k) * Save and load example adjust * Tests * Windows build fix * Windows test fix
13 lines
532 B
CMake
13 lines
532 B
CMake
function(llama_add_test source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source})
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
endfunction()
|
|
|
|
# llama_add_test(test-double-float.c) # SLOW
|
|
llama_add_test(test-quantize-fns.cpp)
|
|
llama_add_test(test-quantize-perf.cpp)
|
|
llama_add_test(test-sampling.cpp)
|
|
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|