mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
llama : fix --mtest option (close #1414)
This commit is contained in:
parent
773ee249fb
commit
fb62f92433
@ -121,7 +121,7 @@ int main(int argc, char ** argv) {
|
|||||||
// uncomment the "used_mem" line in llama.cpp to see the results
|
// uncomment the "used_mem" line in llama.cpp to see the results
|
||||||
if (params.mem_test) {
|
if (params.mem_test) {
|
||||||
{
|
{
|
||||||
const std::vector<llama_token> tmp(params.n_batch, 0);
|
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos());
|
||||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user