2024-03-23 01:24:36 +01:00
|
|
|
#include "ggml.h"
|
|
|
|
#include "llama.h"
|
|
|
|
#include "common.h"
|
|
|
|
#include "ngram-cache.h"
|
|
|
|
|
|
|
|
#include <cstdint>
|
|
|
|
#include <fstream>
|
|
|
|
#include <iostream>
|
|
|
|
#include <string>
|
|
|
|
#include <unordered_map>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
int main(int argc, char ** argv){
|
|
|
|
gpt_params params;
|
|
|
|
|
|
|
|
if (!gpt_params_parse(argc, argv, params)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
// init llama.cpp
|
|
|
|
llama_backend_init();
|
|
|
|
llama_numa_init(params.numa);
|
|
|
|
|
|
|
|
llama_model * model = NULL;
|
|
|
|
llama_context * ctx = NULL;
|
|
|
|
|
|
|
|
// load the model
|
|
|
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
|
|
|
GGML_ASSERT(model != nullptr);
|
|
|
|
|
|
|
|
// tokenize the prompt
|
|
|
|
std::vector<llama_token> inp;
|
2024-04-09 19:44:08 +02:00
|
|
|
inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
2024-03-23 01:24:36 +01:00
|
|
|
fprintf(stderr, "%s: tokenization done\n", __func__);
|
|
|
|
|
|
|
|
|
|
|
|
llama_ngram_cache ngram_cache;
|
|
|
|
llama_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
|
|
|
|
fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str());
|
|
|
|
|
|
|
|
llama_ngram_cache_save(ngram_cache, params.lookup_cache_static);
|
|
|
|
}
|