2024-01-17 17:38:39 +01:00
|
|
|
// ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
|
|
|
|
|
|
|
|
#include <cstdio>
|
|
|
|
#include <string>
|
|
|
|
#include <thread>
|
|
|
|
|
|
|
|
#include "llama.h"
|
2024-01-26 13:18:00 +01:00
|
|
|
#include "get-model.h"
|
2024-01-17 17:38:39 +01:00
|
|
|
|
|
|
|
// This creates a new context inside a pthread and then tries to exit cleanly.
|
|
|
|
int main(int argc, char ** argv) {
|
2024-01-26 13:18:00 +01:00
|
|
|
auto * model_path = get_model_or_exit(argc, argv);
|
2024-01-17 17:38:39 +01:00
|
|
|
|
2024-01-26 13:18:00 +01:00
|
|
|
std::thread([&model_path]() {
|
2024-02-16 10:31:07 +01:00
|
|
|
llama_backend_init();
|
2024-01-26 13:18:00 +01:00
|
|
|
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
2024-01-17 17:38:39 +01:00
|
|
|
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
|
|
|
llama_free(ctx);
|
|
|
|
llama_free_model(model);
|
|
|
|
llama_backend_free();
|
|
|
|
}).join();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|