mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
f4ab2a4147
* merged the changes from deepseeker models to main branch * Moved regex patterns to unicode.cpp and updated unicode.h * Moved header files * Resolved issues * added and refactored unicode_regex_split and related functions * Updated/merged the deepseek coder pr * Refactored code * Adding unicode regex mappings * Adding unicode regex function * Added needed functionality, testing remains * Fixed issues * Fixed issue with gpt2 regex custom preprocessor * unicode : fix? unicode_wstring_to_utf8 * lint : fix whitespaces * tests : add tokenizer tests for numbers * unicode : remove redundant headers * tests : remove and rename tokenizer test scripts * tests : add sample usage * gguf-py : reader prints warnings on duplicate keys * llama : towards llama3 tokenization support (wip) * unicode : shot in the dark to fix tests on Windows * unicode : first try custom implementations * convert : add "tokenizer.ggml.pre" GGUF KV (wip) * llama : use new pre-tokenizer type * convert : fix pre-tokenizer type writing * lint : fix * make : add test-tokenizer-0-llama-v3 * wip * models : add llama v3 vocab file * llama : adapt punctuation regex + add llama 3 regex * minor * unicode : set bomb * unicode : set bomb * unicode : always use std::wregex * unicode : support \p{N}, \p{L} and \p{P} natively * unicode : try fix windows * unicode : category support via std::regex * unicode : clean-up * unicode : simplify * convert : add convert-hf-to-gguf-update.py ggml-ci * lint : update * convert : add falcon ggml-ci * unicode : normalize signatures * lint : fix * lint : fix * convert : remove unused functions * convert : add comments * convert : exercise contractions ggml-ci * lint : fix * cmake : refactor test targets * tests : refactor vocab tests ggml-ci * tests : add more vocabs and tests ggml-ci * unicode : cleanup * scripts : ignore new update script in check-requirements.sh * models : add phi-3, mpt, gpt-2, starcoder * tests : disable obsolete ggml-ci * tests : use faster bpe test ggml-ci * llama : more prominent warning for old BPE models * tests : disable test-tokenizer-1-bpe due to slowness ggml-ci --------- Co-authored-by: Jaggzh <jaggz.h@gmail.com> Co-authored-by: Kazim Abrar Mahi <kazimabrarmahi135@gmail.com>
135 lines
6.8 KiB
CMake
135 lines
6.8 KiB
CMake
function(llama_test target)
|
|
include(CMakeParseArguments)
|
|
set(options)
|
|
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
|
|
set(multiValueArgs ARGS)
|
|
cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
|
|
|
if (NOT DEFINED LLAMA_TEST_LABEL)
|
|
set(LLAMA_TEST_LABEL "main")
|
|
endif()
|
|
if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
|
|
set(LLAMA_TEST_WORKING_DIRECTORY .)
|
|
endif()
|
|
if (DEFINED LLAMA_TEST_NAME)
|
|
set(TEST_NAME ${LLAMA_TEST_NAME})
|
|
else()
|
|
set(TEST_NAME ${target})
|
|
endif()
|
|
|
|
set(TEST_TARGET ${target})
|
|
|
|
add_test(
|
|
NAME ${TEST_NAME}
|
|
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
|
|
COMMAND $<TARGET_FILE:${TEST_TARGET}>
|
|
${LLAMA_TEST_ARGS})
|
|
|
|
set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
|
|
endfunction()
|
|
|
|
# Builds and runs a test source file.
|
|
# Optional args:
|
|
# - NAME: name of the executable & test target (defaults to the source file name without extension)
|
|
# - LABEL: label for the test (defaults to main)
|
|
# - ARGS: arguments to pass to the test executable
|
|
# - WORKING_DIRECTORY
|
|
function(llama_target_and_test source)
|
|
include(CMakeParseArguments)
|
|
set(options)
|
|
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
|
|
set(multiValueArgs ARGS)
|
|
cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
|
|
|
if (NOT DEFINED LLAMA_TEST_LABEL)
|
|
set(LLAMA_TEST_LABEL "main")
|
|
endif()
|
|
if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
|
|
set(LLAMA_TEST_WORKING_DIRECTORY .)
|
|
endif()
|
|
if (DEFINED LLAMA_TEST_NAME)
|
|
set(TEST_TARGET ${LLAMA_TEST_NAME})
|
|
else()
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
endif()
|
|
|
|
add_executable(${TEST_TARGET} ${source} get-model.cpp)
|
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE common)
|
|
add_test(
|
|
NAME ${TEST_TARGET}
|
|
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
|
|
COMMAND $<TARGET_FILE:${TEST_TARGET}>
|
|
${LLAMA_TEST_ARGS})
|
|
|
|
set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${LLAMA_TEST_LABEL})
|
|
endfunction()
|
|
|
|
# build test-tokenizer-0 target once and add many tests
|
|
add_executable(test-tokenizer-0 test-tokenizer-0.cpp)
|
|
target_link_libraries(test-tokenizer-0 PRIVATE common)
|
|
install(TARGETS test-tokenizer-0 RUNTIME)
|
|
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
|
|
# TODO: enable when fixed
|
|
#llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
|
|
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
|
|
|
|
# build test-tokenizer-1-bpe target once and add many tests
|
|
add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
|
|
target_link_libraries(test-tokenizer-1-bpe PRIVATE common)
|
|
install(TARGETS test-tokenizer-1-bpe RUNTIME)
|
|
|
|
# TODO: disabled due to slowness
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-stablelm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
|
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-bloom ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf)
|
|
|
|
# build test-tokenizer-1-spm target once and add many tests
|
|
add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp)
|
|
target_link_libraries(test-tokenizer-1-spm PRIVATE common)
|
|
install(TARGETS test-tokenizer-1-spm RUNTIME)
|
|
|
|
llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
|
|
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
|
|
|
|
# llama_target_and_test(test-double-float.cpp) # SLOW
|
|
llama_target_and_test(test-quantize-fns.cpp)
|
|
llama_target_and_test(test-quantize-perf.cpp)
|
|
llama_target_and_test(test-sampling.cpp)
|
|
llama_target_and_test(test-chat-template.cpp)
|
|
|
|
llama_target_and_test(test-grammar-parser.cpp)
|
|
llama_target_and_test(test-llama-grammar.cpp)
|
|
llama_target_and_test(test-grammar-integration.cpp)
|
|
llama_target_and_test(test-grad0.cpp)
|
|
# llama_target_and_test(test-opt.cpp) # SLOW
|
|
llama_target_and_test(test-backend-ops.cpp)
|
|
|
|
llama_target_and_test(test-rope.cpp)
|
|
|
|
llama_target_and_test(test-model-load-cancel.cpp LABEL "model")
|
|
llama_target_and_test(test-autorelease.cpp LABEL "model")
|
|
|
|
llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
|
|
target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server)
|
|
|
|
# dummy executable - not installed
|
|
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
|
add_executable(${TEST_TARGET} test-c.c)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|