mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 13:27:21 +01:00
llama : Add test for model load cancellation
This commit is contained in:
parent
9abe2e44d1
commit
3425e62745
@ -50,6 +50,7 @@ llama_build_and_test_executable(test-grad0.cpp)
|
|||||||
llama_build_and_test_executable(test-backend-ops.cpp)
|
llama_build_and_test_executable(test-backend-ops.cpp)
|
||||||
|
|
||||||
llama_build_and_test_executable(test-rope.cpp)
|
llama_build_and_test_executable(test-rope.cpp)
|
||||||
|
llama_build_and_test_executable(test-model-load-cancel.cpp)
|
||||||
|
|
||||||
# dummy executable - not installed
|
# dummy executable - not installed
|
||||||
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
||||||
|
17
tests/test-model-load-cancel.cpp
Normal file
17
tests/test-model-load-cancel.cpp
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
int main(void) {
|
||||||
|
llama_backend_init(false);
|
||||||
|
auto params = llama_model_params{};
|
||||||
|
params.use_mmap = false;
|
||||||
|
params.progress_callback = [](float progress, void * ctx){
|
||||||
|
std::ignore = ctx;
|
||||||
|
return progress > 0.50;
|
||||||
|
};
|
||||||
|
auto * model = llama_load_model_from_file("../models/7B/ggml-model-f16.gguf", params);
|
||||||
|
llama_backend_free();
|
||||||
|
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user