fix embeddings when using CUDA (#3657)

This commit is contained in:
slaren 2023-10-17 22:24:50 +02:00 committed by GitHub
parent e1675d133c
commit cb33f43a2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -5903,6 +5903,13 @@ static int llama_decode_internal(
ggml_allocr_alloc_graph(lctx.alloc, gf); ggml_allocr_alloc_graph(lctx.alloc, gf);
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
GGML_ASSERT(strcmp(res->name, "result_output") == 0);
GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
#ifdef GGML_USE_CUBLAS #ifdef GGML_USE_CUBLAS
for (int i = 0; i < gf->n_leafs; i++) { for (int i = 0; i < gf->n_leafs; i++) {
ggml_tensor * node = gf->leafs[i]; ggml_tensor * node = gf->leafs[i];
@ -5920,6 +5927,12 @@ static int llama_decode_internal(
} }
ggml_cuda_set_mul_mat_q(cparams.mul_mat_q); ggml_cuda_set_mul_mat_q(cparams.mul_mat_q);
// HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
if (!lctx.embedding.empty()) {
embeddings->backend = GGML_BACKEND_CPU;
}
res->backend = GGML_BACKEND_CPU;
#endif #endif
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
@ -5944,12 +5957,6 @@ static int llama_decode_internal(
n_threads = 1; n_threads = 1;
} }
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
GGML_ASSERT(strcmp(res->name, "result_output") == 0);
GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
#if GGML_USE_MPI #if GGML_USE_MPI
const int64_t n_layer = hparams.n_layer; const int64_t n_layer = hparams.n_layer;
ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer); ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);