mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
embedding : evaluate prompt in batches (#2713)
This commit is contained in:
parent
1123f7fbdf
commit
519c981f8b
@ -72,12 +72,20 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.embedding){
|
if (embd_inp.size() > (size_t)params.n_ctx) {
|
||||||
if (embd_inp.size() > 0) {
|
fprintf(stderr, "%s: error: prompt is longer than the context window (%zu tokens, n_ctx = %d)\n",
|
||||||
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
|
__func__, embd_inp.size(), params.n_ctx);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (!embd_inp.empty()) {
|
||||||
|
int n_tokens = std::min(params.n_batch, (int) embd_inp.size());
|
||||||
|
if (llama_eval(ctx, embd_inp.data(), n_tokens, n_past, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
n_past += n_tokens;
|
||||||
|
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_tokens);
|
||||||
}
|
}
|
||||||
|
|
||||||
const int n_embd = llama_n_embd(ctx);
|
const int n_embd = llama_n_embd(ctx);
|
||||||
@ -87,7 +95,6 @@ int main(int argc, char ** argv) {
|
|||||||
printf("%f ", embeddings[i]);
|
printf("%f ", embeddings[i]);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
|
||||||
|
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
Loading…
Reference in New Issue
Block a user