mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 06:10:29 +01:00
f5a77a629b
* Major refactoring - introduce C-style API * Clean up * Add <cassert> * Add <iterator> * Add <algorithm> .... * Fix timing reporting and accumulation * Measure eval time only for single-token calls * Change llama_tokenize return meaning
5 lines
268 B
CMake
5 lines
268 B
CMake
set(TEST_TARGET test-tokenizer-0)
|
|
add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|