From 8781013ef654270cbead3e0011e33a6d690fb168 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Mon, 18 Sep 2023 10:03:53 -0400 Subject: [PATCH 01/26] make : restore build-info.h dependency for several targets (#3205) --- Makefile | 14 +++++++------- common/common.h | 1 - examples/benchmark/benchmark-matmult.cpp | 1 + examples/embd-input/embd-input-lib.cpp | 1 + examples/embedding/embedding.cpp | 1 + examples/perplexity/perplexity.cpp | 1 + examples/quantize-stats/quantize-stats.cpp | 1 + examples/quantize/quantize.cpp | 1 + examples/save-load-state/save-load-state.cpp | 1 + 9 files changed, 14 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index dc8ae3807..e07db8afa 100644 --- a/Makefile +++ b/Makefile @@ -514,22 +514,22 @@ main: examples/main/main.cpp build-info.h ggml. @echo '==== Run ./main -h for help. ====' @echo -simple: examples/simple/simple.cpp ggml.o llama.o common.o $(OBJS) +simple: examples/simple/simple.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -quantize: examples/quantize/quantize.cpp ggml.o llama.o $(OBJS) +quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -quantize-stats: examples/quantize-stats/quantize-stats.cpp ggml.o llama.o $(OBJS) +quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(OBJS) +perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o $(OBJS) +embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o common.o $(OBJS) +save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp build-info.h ggml.o llama.o common.o grammar-parser.o $(OBJS) @@ -582,7 +582,7 @@ build-info.h: $(wildcard .git/index) scripts/build-info.sh tests: $(TEST_TARGETS) -benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o $(OBJS) +benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) ./$@ diff --git a/common/common.h b/common/common.h index f9dfd4a2c..18aea38ce 100644 --- a/common/common.h +++ b/common/common.h @@ -3,7 +3,6 @@ #pragma once #include "llama.h" -#include "build-info.h" #define LOG_NO_FILE_LINE_FUNCTION #include "log.h" diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 561309acb..b16ad24d3 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "ggml.h" diff --git a/examples/embd-input/embd-input-lib.cpp b/examples/embd-input/embd-input-lib.cpp index fc6e44eb2..c995eef35 100644 --- a/examples/embd-input/embd-input-lib.cpp +++ b/examples/embd-input/embd-input-lib.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "embd-input.h" diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 0788f362c..27d605f4e 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 4958cdfb9..2b375e34e 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 9f930dede..94edb94d9 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,4 +1,5 @@ #define LLAMA_API_INTERNAL +#include "build-info.h" #include "common.h" #include "ggml.h" #include "llama.h" diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index acb79e690..1c1d957e6 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index eac307904..95527bb86 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -1,3 +1,4 @@ +#include "build-info.h" #include "common.h" #include "llama.h" From d119c04c159d015a93567df7e73e0e45a22d0f1d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 20 Sep 2023 10:02:39 +0300 Subject: [PATCH 02/26] examples : fix benchmark-matmult (#1554) The precision for Q4_0 has degraded since #1508 --- examples/benchmark/benchmark-matmult.cpp | 28 +++++++++++++----------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index b16ad24d3..c8f7d4869 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -33,11 +33,11 @@ void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, } float tensor_sum_elements(const ggml_tensor * tensor) { - float sum = 0; - if (tensor->type==GGML_TYPE_F32) { + double sum = 0; + if (tensor->type == GGML_TYPE_F32) { for (int j = 0; j < tensor->ne[1]; j++) { for (int k = 0; k < tensor->ne[0]; k++) { - sum += ((float *) tensor->data)[j*tensor->ne[0]+k]; + sum += ((float *) tensor->data)[j*tensor->ne[0] + k]; } } } @@ -126,12 +126,15 @@ int main(int argc, char ** argv) { //printf("Memsize required = %i\n", sizex*sizex); + // TODO: perform the bench for all types or for a user specified type + const ggml_type qtype = GGML_TYPE_Q4_1; + size_t ctx_size = 0; ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32); - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0); - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0); + ctx_size += sizex*sizey*ggml_type_sizef(qtype); + ctx_size += sizex*sizey*ggml_type_sizef(qtype); ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS ctx_size += 1024*1024*16; @@ -164,7 +167,7 @@ int main(int argc, char ** argv) { struct ggml_tensor * m2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizez); ggml_set_f32(m2, 2.0f); - printf("\n------ Test 1 - Matrix Mult via F32 code ------------------------------------------------------------------------------\n"); + printf("\n------ Test 1 - Matrix Mult via F32 code\n"); // printf("Creating new tensor m11xm2\n"); struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2); @@ -182,17 +185,16 @@ int main(int argc, char ** argv) { TENSOR_DUMP(gf.nodes[0]); - printf("\n------ Test 2 - Matrix Mult via Q4_0 code ------------------------------------------------------------------------------\n"); + printf("\n------ Test 2 - Matrix Mult via %s code\n", ggml_type_name(qtype)); int32_t nelements = sizex*sizey; - int32_t ne[2] = { sizex, sizey }; std::vector hist_cur(1 << 4, 0); // Set up a the benchmark matrices // printf("Creating new tensor q11 & Running quantize\n"); - struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey); - ggml_quantize_q4_0((const float *) m11->data, q11->data, nelements, ne[0], hist_cur.data()); + struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); + ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements, hist_cur.data()); // Set up a the compute graph // printf("Creating new tensor q31\n"); @@ -203,8 +205,8 @@ int main(int argc, char ** argv) { // Set up a second graph computation to make sure we override the CPU cache lines // printf("Creating new tensor q12 & Running quantize\n"); - struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey); - ggml_quantize_q4_0((const float *) m12->data, q12->data, nelements, ne[0], hist_cur.data()); + struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); + ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements, hist_cur.data()); // printf("Creating new tensor q32\n"); struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2); @@ -221,7 +223,7 @@ int main(int argc, char ** argv) { printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000); - // Let's use the F32 result from above as a reference for the q4_0 multiplication + // Let's use the F32 result from above as a reference for the quantized multiplication float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]); printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n"); From a40f2b656fab364ce0aff98dbefe9bd9c3721cc9 Mon Sep 17 00:00:00 2001 From: Alon Date: Wed, 20 Sep 2023 15:06:36 +0300 Subject: [PATCH 03/26] CI: FreeBSD fix (#3258) * - freebsd ci: use qemu --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4b6071f5a..aecebaf93 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -468,6 +468,7 @@ jobs: with: operating_system: freebsd version: '13.2' + hypervisor: 'qemu' run: | sudo pkg update sudo pkg install -y gmake automake autoconf pkgconf llvm15 clinfo clover opencl clblast openblas From 80834daecf4b9021770361a6d5e1b9c7a60e6854 Mon Sep 17 00:00:00 2001 From: kang Date: Wed, 20 Sep 2023 22:48:22 +0900 Subject: [PATCH 04/26] flake : Restore default package's buildInputs (#3262) --- flake.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index b0fb8642c..7723357af 100644 --- a/flake.nix +++ b/flake.nix @@ -52,7 +52,8 @@ in { packages.default = pkgs.stdenv.mkDerivation { - inherit name src meta postPatch nativeBuildInputs buildInputs postInstall; + inherit name src meta postPatch nativeBuildInputs postInstall; + buildInputs = osSpecific; cmakeFlags = cmakeFlags ++ (if isAarch64 && isDarwin then [ "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1" From 65c2c1c5ab7c5089dbc6d10bc49b9c58f0164317 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 20 Sep 2023 12:06:08 -0400 Subject: [PATCH 05/26] benchmark-matmult : do not use integer abs() on a float (#3277) --- examples/benchmark/benchmark-matmult.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index c8f7d4869..f1c382aa9 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -21,7 +21,7 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { +static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); if (plan.work_size > 0) { @@ -32,7 +32,7 @@ void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, ggml_graph_compute(graph, &plan); } -float tensor_sum_elements(const ggml_tensor * tensor) { +static float tensor_sum_elements(const ggml_tensor * tensor) { double sum = 0; if (tensor->type == GGML_TYPE_F32) { for (int j = 0; j < tensor->ne[1]; j++) { @@ -44,7 +44,7 @@ float tensor_sum_elements(const ggml_tensor * tensor) { return sum; } -void tensor_dump(const ggml_tensor * tensor, const char * name) { +static void tensor_dump(const ggml_tensor * tensor, const char * name) { printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name, tensor->type, ggml_type_name(tensor->type), tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]); @@ -59,7 +59,7 @@ struct benchmark_params_struct { int32_t n_iterations = 10; }; -void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) { +static void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); @@ -253,7 +253,7 @@ int main(int argc, char ** argv) { // Check that the matrix multiplication result is in the right ballpark // We cannot use the exact value from the F32 multiplication because the quantizuation will be slightly different float sum_of_Q4_result = tensor_sum_elements(gf31.nodes[0]); - float delta = abs(sum_of_Q4_result - sum_of_F32_reference); + float delta = std::abs(sum_of_Q4_result - sum_of_F32_reference); float allowed_delta = (sum_of_F32_reference) / 1000 / 1000; // Let's accept an epsilon of 10^-6 if (delta > allowed_delta) { From a5661d7e71d15b8dfc81bc0510ba912ebe85dfa3 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 20 Sep 2023 12:12:47 -0400 Subject: [PATCH 06/26] llama : allow gguf RoPE keys to be overridden with defaults (#3240) --- common/common.cpp | 6 ++-- examples/server/server.cpp | 4 +-- llama.cpp | 56 +++++++++++++++----------------------- 3 files changed, 27 insertions(+), 39 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 6d655fd55..2597ba06a 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -647,9 +647,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --cfg-negative-prompt-file FNAME\n"); printf(" negative prompt file to use for guidance. (default: empty)\n"); printf(" --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale); - printf(" --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale (default: %g)\n", 1.0f/params.rope_freq_scale); - printf(" --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: %.1f)\n", params.rope_freq_base); - printf(" --rope-freq-scale N RoPE frequency linear scaling factor, inverse of --rope-scale (default: %g)\n", params.rope_freq_scale); + printf(" --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale\n"); + printf(" --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: loaded from model)\n"); + printf(" --rope-freq-scale N RoPE frequency linear scaling factor (default: loaded from model)\n"); printf(" --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n"); printf(" --no-penalize-nl do not penalize newline token\n"); printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 1bb8e92c0..ebd7f2fc5 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -701,8 +701,8 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); - printf(" --rope-freq-base N RoPE base frequency (default: %.1f)\n", params.rope_freq_base); - printf(" --rope-freq-scale N RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale); + printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n"); + printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n"); printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); printf(" not recommended: doubles context memory required and no measurable increase in quality\n"); diff --git a/llama.cpp b/llama.cpp index 79b48897d..358bf5ec8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -929,23 +929,22 @@ static const size_t kB = 1024; static const size_t MB = kB*kB; static const size_t GB = kB*kB*kB; -// default hparams (LLaMA 7B) struct llama_hparams { - uint32_t n_vocab = 32000; - uint32_t n_ctx_train = 2048; // the context size used during training - uint32_t n_ctx = 512; // the context size used during inference - uint32_t n_embd = 4096; - uint32_t n_head = 32; - uint32_t n_head_kv = 32; - uint32_t n_layer = 32; - uint32_t n_rot = 64; - uint32_t n_ff = 11008; + uint32_t n_vocab; + uint32_t n_ctx_train; // context size the model was trained on + uint32_t n_ctx; // context size used during inference + uint32_t n_embd; + uint32_t n_head; + uint32_t n_head_kv; + uint32_t n_layer; + uint32_t n_rot; + uint32_t n_ff; - float f_norm_eps = 1e-5; - float f_norm_rms_eps = 1e-5; + float f_norm_eps; + float f_norm_rms_eps; - float rope_freq_base = 10000.0f; - float rope_freq_scale = 1.0f; + float rope_freq_base; + float rope_freq_scale; bool operator!=(const llama_hparams & other) const { return static_cast(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT @@ -1076,7 +1075,7 @@ struct llama_model { std::string name = "n/a"; - llama_hparams hparams; + llama_hparams hparams = {}; llama_vocab vocab; struct ggml_tensor * tok_embeddings; @@ -1674,28 +1673,17 @@ static void llm_load_hparams( hparams.n_head_kv = hparams.n_head; GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV)); - // TODO: manually setting rope freq base and scale should override this - // FIXME: partial fix when the param specified is not the default value, but - // will not work for overriding the model value to the params default - - llama_context_params defaults = llama_context_default_params(); - - // rope_freq_base - { - float ropebase = 10000.0f; - GGUF_GET_KEY(ctx, ropebase, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE)); - if (ropebase != 10000.0f && rope_freq_base == defaults.rope_freq_base) { - rope_freq_base = ropebase; - } + // rope_freq_base (optional) + if (rope_freq_base == 0.0f) { + rope_freq_base = 10000.0f; + GGUF_GET_KEY(ctx, rope_freq_base, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE)); } // rope_freq_scale (inverse of the kv) is optional - { + if (rope_freq_scale == 0.0f) { float ropescale = 1.0f; GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR)); - if (ropescale != 1.0f && rope_freq_scale == defaults.rope_freq_scale) { - rope_freq_scale = 1.0f/ropescale; - } + rope_freq_scale = 1.0f/ropescale; } // sanity check for n_rot (optional) @@ -6188,8 +6176,8 @@ struct llama_context_params llama_context_default_params() { /*.n_gpu_layers =*/ 0, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, - /*.rope_freq_base =*/ 10000.0f, - /*.rope_freq_scale =*/ 1.0f, + /*.rope_freq_base =*/ 0.0f, + /*.rope_freq_scale =*/ 0.0f, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, /*.low_vram =*/ false, From 7eb41179edc56083ef4eb2df7967ac9ff38b34fb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 20 Sep 2023 20:48:22 +0300 Subject: [PATCH 07/26] readme : update hot topics --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index d8fd8bc44..670e2e673 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,8 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ ### Hot topics +- Parallel decoding + continuous batching support incoming: [#3228](https://github.com/ggerganov/llama.cpp/pull/3228) \ + **Devs should become familiar with the new API** - Local Falcon 180B inference on Mac Studio https://github.com/ggerganov/llama.cpp/assets/1991296/98abd4e8-7077-464c-ae89-aebabca7757e From 8185710a80531e9ee0c0cb99d3a9c9af1019ab67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Thu, 21 Sep 2023 10:43:53 +0200 Subject: [PATCH 08/26] CUDA: use only 1 thread if fully offloaded (#2915) --- llama.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/llama.cpp b/llama.cpp index 358bf5ec8..346636501 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3765,6 +3765,15 @@ static bool llama_eval_internal( n_threads = std::min(4, n_threads); } + // If all tensors can be run on the GPU then using more than 1 thread is detrimental. + const bool full_offload_supported = model.arch == LLM_ARCH_LLAMA || + model.arch == LLM_ARCH_BAICHUAN || + model.arch == LLM_ARCH_FALCON; + const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; + if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { + n_threads = 1; + } + struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; From f56c418ab0a635c020bcb5bf44b8f00cb3c9e514 Mon Sep 17 00:00:00 2001 From: yuiseki Date: Thu, 21 Sep 2023 17:57:40 +0900 Subject: [PATCH 09/26] embedding : update README.md (#3224) --- examples/embedding/README.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/examples/embedding/README.md b/examples/embedding/README.md index fe8f5dcc6..6929454c5 100644 --- a/examples/embedding/README.md +++ b/examples/embedding/README.md @@ -1,3 +1,21 @@ -# embedding +# llama.cpp/example/embedding -TODO +This example demonstrates generate high-dimensional embedding vector of a given text with llama.cpp. + +## Quick Start + +To get started right away, run the following command, making sure to use the correct path for the model you have: + +### Unix-based systems (Linux, macOS, etc.): + +```bash +./embedding -m ./path/to/model --log-disable -p "Hello World!" 2>/dev/null +``` + +### Windows: + +```powershell +embedding.exe -m ./path/to/model --log-disable -p "Hello World!" 2>$null +``` + +The above command will output space-separated float values. From 324f3403d54ae4499a1d68623161015f7419fb76 Mon Sep 17 00:00:00 2001 From: Edward Taylor Date: Thu, 21 Sep 2023 21:08:20 +1200 Subject: [PATCH 10/26] zig : fix for updated c lib (#3259) --- build.zig | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/build.zig b/build.zig index f2769ba8c..3a8978bc3 100644 --- a/build.zig +++ b/build.zig @@ -36,17 +36,20 @@ const Maker = struct { } fn init(builder: *std.build.Builder) !Maker { - const commit_hash = @embedFile(".git/refs/heads/master"); + // const commit_hash = @embedFile(".git/refs/heads/master"); + const target = builder.standardTargetOptions(.{}); const config_header = builder.addConfigHeader( .{ .style = .blank, .include_path = "build-info.h" }, .{ .BUILD_NUMBER = 0, - .BUILD_COMMIT = commit_hash[0 .. commit_hash.len - 1], // omit newline + .BUILD_COMMIT = "12345", // omit newline + .BUILD_COMPILER = "Zig 0.11.0", + .BUILD_TARGET = try target.allocDescription(builder.allocator), }, ); var m = Maker{ .builder = builder, - .target = builder.standardTargetOptions(.{}), + .target = target, .optimize = builder.standardOptimizeOption(.{}), .config_header = config_header, .enable_lto = false, @@ -58,7 +61,7 @@ const Maker = struct { try m.addCFlag("-std=c11"); try m.addCxxFlag("-std=c++11"); try m.addProjectInclude(&.{}); - try m.addProjectInclude(&.{"examples"}); + try m.addProjectInclude(&.{"common"}); return m; } @@ -71,6 +74,7 @@ const Maker = struct { o.addCSourceFiles(&.{src}, m.cxxflags.items); o.linkLibCpp(); } + o.addConfigHeader(m.config_header); for (m.include_dirs.items) |i| o.addIncludePath(.{ .path = i }); o.want_lto = m.enable_lto; return o; @@ -104,15 +108,15 @@ pub fn build(b: *std.build.Builder) !void { const ggml = make.obj("ggml", "ggml.c"); const ggml_alloc = make.obj("ggml-alloc", "ggml-alloc.c"); const llama = make.obj("llama", "llama.cpp"); - const common = make.obj("common", "examples/common.cpp"); - const console = make.obj("common", "examples/console.cpp"); - const grammar_parser = make.obj("grammar-parser", "examples/grammar-parser.cpp"); + const common = make.obj("common", "common/common.cpp"); + const console = make.obj("common", "common/console.cpp"); + const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp"); _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, llama, common, console, grammar_parser }); - _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, llama }); + _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, llama, common }); _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, llama, common }); _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, llama, common }); - _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, llama }); + _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, llama, common }); const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, llama, common, grammar_parser }); if (server.target.isWindows()) { From 36b904e20003017f50108ae68359ef87a192dae2 Mon Sep 17 00:00:00 2001 From: shibe2 Date: Thu, 21 Sep 2023 22:10:26 +0400 Subject: [PATCH 11/26] ggml-opencl.cpp: Make private functions static (#3300) --- ggml-opencl.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 777048d01..c7d9150fe 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -847,7 +847,7 @@ std::array mul_str_values = { "mul_f32", "float" }; -std::string& replace(std::string& s, const std::string& from, const std::string& to) { +static std::string& replace(std::string& s, const std::string& from, const std::string& to) { size_t pos = 0; while ((pos = s.find(from, pos)) != std::string::npos) { s.replace(pos, from.length(), to); @@ -856,7 +856,7 @@ std::string& replace(std::string& s, const std::string& from, const std::string& return s; } -std::string generate_kernels() { +static std::string generate_kernels() { std::stringstream src; src << program_source << '\n'; src << k_quants_source << '\n'; @@ -1788,7 +1788,7 @@ bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tens return false; } -bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) { +static bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) { // If device doesn't support FP16 if (!fp16_support) { return false; From bc9d3e3971e5607a10ff4c24e39568ce1ac87271 Mon Sep 17 00:00:00 2001 From: Lee Drake Date: Thu, 21 Sep 2023 13:00:24 -0600 Subject: [PATCH 12/26] Update README.md (#3289) * Update README.md * Update README.md Co-authored-by: slaren --------- Co-authored-by: slaren --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 670e2e673..42686aacc 100644 --- a/README.md +++ b/README.md @@ -557,6 +557,10 @@ python3 convert.py models/7B/ # quantize the model to 4-bits (using q4_0 method) ./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0 +# update the gguf filetype to current if older version is unsupported by another application +./quantize ./models/7B/ggml-model-q4_0.gguf ./models/7B/ggml-model-q4_0-v2.gguf COPY + + # run the inference ./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 ``` From bedb92b603886768ad51e629f81eda15ff6b86f5 Mon Sep 17 00:00:00 2001 From: Kevin Ji <1146876+kevinji@users.noreply.github.com> Date: Fri, 22 Sep 2023 23:52:23 -0400 Subject: [PATCH 13/26] scripts : use `/usr/bin/env` in shebang (#3313) --- scripts/verify-checksum-models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/verify-checksum-models.py b/scripts/verify-checksum-models.py index 307b7c08d..dff4b4734 100755 --- a/scripts/verify-checksum-models.py +++ b/scripts/verify-checksum-models.py @@ -1,4 +1,4 @@ -#!/bin/env python3 +#!/usr/bin/env python3 import os import hashlib From 51a7cf5c6e490b2f51c82daa76c4ca4f8d845826 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Sat, 23 Sep 2023 05:28:50 -0400 Subject: [PATCH 14/26] examples : fix RoPE defaults to match PR #3240 (#3315) --- common/common.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/common.h b/common/common.h index 18aea38ce..2761503b8 100644 --- a/common/common.h +++ b/common/common.h @@ -48,8 +48,8 @@ struct gpt_params { float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. int32_t n_beams = 0; // if non-zero then use beam search of given width. - float rope_freq_base = 10000.0f; // RoPE base frequency - float rope_freq_scale = 1.0f; // RoPE frequency scaling factor + float rope_freq_base = 0.0f; // RoPE base frequency + float rope_freq_scale = 0.0f; // RoPE frequency scaling factor // sampling parameters int32_t top_k = 40; // <= 0 to use vocab size From c091cdfb24621710c617ea85c92fcd347d0bf340 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 23 Sep 2023 21:48:24 +0200 Subject: [PATCH 15/26] llama-bench : add README (#3317) * llama-bench : add README * minor edit --- examples/llama-bench/README.md | 271 +++++++++++++++++++++++++++++++++ 1 file changed, 271 insertions(+) create mode 100644 examples/llama-bench/README.md diff --git a/examples/llama-bench/README.md b/examples/llama-bench/README.md new file mode 100644 index 000000000..d02824bfa --- /dev/null +++ b/examples/llama-bench/README.md @@ -0,0 +1,271 @@ +# llama.cpp/example/llama-bench + +Performance testing tool for llama.cpp. + +## Table of contents + +1. [Syntax](#syntax) +2. [Examples](#examples) + 1. [Text generation with different models](#text-generation-with-different-models) + 2. [Prompt processing with different batch sizes](#prompt-processing-with-different-batch-sizes) + 3. [Different numbers of threads](#different-numbers-of-threads) + 4. [Different numbers of layers offloaded to the GPU](#different-numbers-of-layers-offloaded-to-the-gpu) +3. [Output formats](#output-formats) + 1. [Markdown](#markdown) + 2. [CSV](#csv) + 3. [JSON](#json) + 4. [SQL](#sql) + +## Syntax + +``` +usage: ./llama-bench [options] + +options: + -h, --help + -m, --model (default: models/7B/ggml-model-q4_0.gguf) + -p, --n-prompt (default: 512) + -n, --n-gen (default: 128) + -b, --batch-size (default: 512) + --memory-f32 <0|1> (default: 0) + -t, --threads (default: 16) + -ngl N, --n-gpu-layers (default: 99) + -mg i, --main-gpu (default: 0) + -mmq, --mul-mat-q <0|1> (default: 1) + -ts, --tensor_split + -r, --repetitions (default: 5) + -o, --output (default: md) + -v, --verbose (default: 0) + +Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times. +``` + +llama-bench can perform two types of tests: + +- Prompt processing (pp): processing a prompt in batches (`-p`) +- Text generation (tg): generating a sequence of tokens (`-n`) + +With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`). + +Each test is repeated the number of times given by `-r`, and the results are averaged. The results are given in average tokens per second (t/s) and standard deviation. Some output formats (e.g. json) also include the individual results of each repetition. + +For a description of the other options, see the [main example](../main/README.md). + +## Examples + +### Text generation with different models + +```sh +$ ./llama-bench -m models/7B/ggml-model-q4_0.gguf -m models/13B/ggml-model-q4_0.gguf -p 0 -n 128,256,512 +``` + +| model | size | params | backend | ngl | test | t/s | +| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 132.19 ± 0.55 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 256 | 129.37 ± 0.54 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 512 | 123.83 ± 0.25 | +| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 128 | 82.17 ± 0.31 | +| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 256 | 80.74 ± 0.23 | +| llama 13B mostly Q4_0 | 6.86 GiB | 13.02 B | CUDA | 99 | tg 512 | 78.08 ± 0.07 | + +### Prompt processing with different batch sizes + +```sh +$ ./llama-bench -n 0 -p 1024 -b 128,256,512,1024 +``` + +| model | size | params | backend | ngl | n_batch | test | t/s | +| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------: | ---------- | ---------------: | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 128 | pp 1024 | 1436.51 ± 3.66 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 256 | pp 1024 | 1932.43 ± 23.48 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 512 | pp 1024 | 2254.45 ± 15.59 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | 1024 | pp 1024 | 2498.61 ± 13.58 | + +### Different numbers of threads + +```sh +$ ./llama-bench -n 0 -n 16 -p 64 -t 1,2,4,8,16,32 +``` + +| model | size | params | backend | threads | test | t/s | +| ------------------------------ | ---------: | ---------: | ---------- | ---------: | ---------- | ---------------: | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | pp 64 | 6.17 ± 0.07 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 1 | tg 16 | 4.05 ± 0.02 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | pp 64 | 12.31 ± 0.13 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 2 | tg 16 | 7.80 ± 0.07 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | pp 64 | 23.18 ± 0.06 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 4 | tg 16 | 12.22 ± 0.07 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | pp 64 | 32.29 ± 1.21 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 8 | tg 16 | 16.71 ± 0.66 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | pp 64 | 33.52 ± 0.03 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 16 | tg 16 | 15.32 ± 0.05 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | pp 64 | 59.00 ± 1.11 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CPU | 32 | tg 16 | 16.41 ± 0.79 || + +### Different numbers of layers offloaded to the GPU + +```sh +$ ./llama-bench -ngl 10,20,30,31,32,33,34,35 +``` + +| model | size | params | backend | ngl | test | t/s | +| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | pp 512 | 373.36 ± 2.25 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 10 | tg 128 | 13.45 ± 0.93 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | pp 512 | 472.65 ± 1.25 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 20 | tg 128 | 21.36 ± 1.94 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | pp 512 | 631.87 ± 11.25 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 30 | tg 128 | 40.04 ± 1.82 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | pp 512 | 657.89 ± 5.08 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 31 | tg 128 | 48.19 ± 0.81 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | pp 512 | 688.26 ± 3.29 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 32 | tg 128 | 54.78 ± 0.65 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | pp 512 | 704.27 ± 2.24 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 33 | tg 128 | 60.62 ± 1.76 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | pp 512 | 881.34 ± 5.40 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 34 | tg 128 | 71.76 ± 0.23 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | pp 512 | 2400.01 ± 7.72 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 35 | tg 128 | 131.66 ± 0.49 | + +## Output formats + +By default, llama-bench outputs the results in markdown format. The results can be output in other formats by using the `-o` option. + +### Markdown + +```sh +$ ./llama-bench -o md +``` + +| model | size | params | backend | ngl | test | t/s | +| ------------------------------ | ---------: | ---------: | ---------- | --: | ---------- | ---------------: | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | pp 512 | 2368.80 ± 93.24 | +| llama 7B mostly Q4_0 | 3.56 GiB | 6.74 B | CUDA | 99 | tg 128 | 131.42 ± 0.59 | + +### CSV + +```sh +$ ./llama-bench -o csv +``` + +```csv +build_commit,build_number,cuda,opencl,metal,gpu_blas,blas,cpu_info,gpu_info,model_filename,model_type,model_size,model_n_params,n_batch,n_threads,f16_kv,n_gpu_layers,main_gpu,mul_mat_q,tensor_split,n_prompt,n_gen,test_time,avg_ns,stddev_ns,avg_ts,stddev_ts +"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","512","0","2023-09-23T12:09:01Z","212155977","732372","2413.341687","8.305961" +"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","0","128","2023-09-23T12:09:02Z","969320879","2728399","132.052051","0.371342" +``` + +### JSON + +```sh +$ ./llama-bench -o json +``` + +```json +[ + { + "build_commit": "3469684", + "build_number": 1275, + "cuda": true, + "opencl": false, + "metal": false, + "gpu_blas": true, + "blas": true, + "cpu_info": "13th Gen Intel(R) Core(TM) i9-13900K", + "gpu_info": "NVIDIA GeForce RTX 3090 Ti", + "model_filename": "models/7B/ggml-model-q4_0.gguf", + "model_type": "llama 7B mostly Q4_0", + "model_size": 3825065984, + "model_n_params": 6738415616, + "n_batch": 512, + "n_threads": 16, + "f16_kv": true, + "n_gpu_layers": 99, + "main_gpu": 0, + "mul_mat_q": true, + "tensor_split": "0.00", + "n_prompt": 512, + "n_gen": 0, + "test_time": "2023-09-23T12:09:57Z", + "avg_ns": 212365953, + "stddev_ns": 985423, + "avg_ts": 2410.974041, + "stddev_ts": 11.163766, + "samples_ns": [ 213837238, 211635853, 212328053, 211329715, 212698907 ], + "samples_ts": [ 2394.34, 2419.25, 2411.36, 2422.75, 2407.16 ] + }, + { + "build_commit": "3469684", + "build_number": 1275, + "cuda": true, + "opencl": false, + "metal": false, + "gpu_blas": true, + "blas": true, + "cpu_info": "13th Gen Intel(R) Core(TM) i9-13900K", + "gpu_info": "NVIDIA GeForce RTX 3090 Ti", + "model_filename": "models/7B/ggml-model-q4_0.gguf", + "model_type": "llama 7B mostly Q4_0", + "model_size": 3825065984, + "model_n_params": 6738415616, + "n_batch": 512, + "n_threads": 16, + "f16_kv": true, + "n_gpu_layers": 99, + "main_gpu": 0, + "mul_mat_q": true, + "tensor_split": "0.00", + "n_prompt": 0, + "n_gen": 128, + "test_time": "2023-09-23T12:09:59Z", + "avg_ns": 977425219, + "stddev_ns": 9268593, + "avg_ts": 130.965708, + "stddev_ts": 1.238924, + "samples_ns": [ 984472709, 974901233, 989474741, 970729355, 967548060 ], + "samples_ts": [ 130.019, 131.295, 129.362, 131.86, 132.293 ] + } +] +``` + +### SQL + +SQL output is suitable for importing into a SQLite database. The output can be piped into the `sqlite3` command line tool to add the results to a database. + +```sh +$ ./llama-bench -o sql +``` + +```sql +CREATE TABLE IF NOT EXISTS test ( + build_commit TEXT, + build_number INTEGER, + cuda INTEGER, + opencl INTEGER, + metal INTEGER, + gpu_blas INTEGER, + blas INTEGER, + cpu_info TEXT, + gpu_info TEXT, + model_filename TEXT, + model_type TEXT, + model_size INTEGER, + model_n_params INTEGER, + n_batch INTEGER, + n_threads INTEGER, + f16_kv INTEGER, + n_gpu_layers INTEGER, + main_gpu INTEGER, + mul_mat_q INTEGER, + tensor_split TEXT, + n_prompt INTEGER, + n_gen INTEGER, + test_time TEXT, + avg_ns INTEGER, + stddev_ns INTEGER, + avg_ts REAL, + stddev_ts REAL +); + +INSERT INTO test (build_commit, build_number, cuda, opencl, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '512', '0', '2023-09-23T12:10:30Z', '212693772', '743623', '2407.240204', '8.409634'); +INSERT INTO test (build_commit, build_number, cuda, opencl, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '0', '128', '2023-09-23T12:10:31Z', '977925003', '4037361', '130.891159', '0.537692'); +``` From a98b1633d5a94d0aa84c7c16e1f8df5ac21fc850 Mon Sep 17 00:00:00 2001 From: Erik Scholz Date: Mon, 25 Sep 2023 13:48:30 +0200 Subject: [PATCH 16/26] nix : add cuda, use a symlinked toolkit for cmake (#3202) --- flake.nix | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/flake.nix b/flake.nix index 7723357af..433d3d942 100644 --- a/flake.nix +++ b/flake.nix @@ -35,6 +35,20 @@ ); pkgs = import nixpkgs { inherit system; }; nativeBuildInputs = with pkgs; [ cmake ninja pkg-config ]; + cudatoolkit_joined = with pkgs; symlinkJoin { + # HACK(Green-Sky): nix currently has issues with cmake findcudatoolkit + # see https://github.com/NixOS/nixpkgs/issues/224291 + # copied from jaxlib + name = "${cudaPackages.cudatoolkit.name}-merged"; + paths = [ + cudaPackages.cudatoolkit.lib + cudaPackages.cudatoolkit.out + ] ++ lib.optionals (lib.versionOlder cudaPackages.cudatoolkit.version "11") [ + # for some reason some of the required libs are in the targets/x86_64-linux + # directory; not sure why but this works around it + "${cudaPackages.cudatoolkit}/targets/${system}" + ]; + }; llama-python = pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]); postPatch = '' @@ -70,6 +84,13 @@ "-DLLAMA_CLBLAST=ON" ]; }; + packages.cuda = pkgs.stdenv.mkDerivation { + inherit name src meta postPatch nativeBuildInputs postInstall; + buildInputs = with pkgs; buildInputs ++ [ cudatoolkit_joined ]; + cmakeFlags = cmakeFlags ++ [ + "-DLLAMA_CUBLAS=ON" + ]; + }; packages.rocm = pkgs.stdenv.mkDerivation { inherit name src meta postPatch nativeBuildInputs postInstall; buildInputs = with pkgs; buildInputs ++ [ hip hipblas rocblas ]; From 1726f9626f21f102d8e01df06c23a7f94add7990 Mon Sep 17 00:00:00 2001 From: 2f38b454 Date: Tue, 26 Sep 2023 02:24:52 +0800 Subject: [PATCH 17/26] docs: Fix typo CLBlast_DIR var. (#3330) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 42686aacc..f41250147 100644 --- a/README.md +++ b/README.md @@ -501,7 +501,7 @@ Building the program with BLAS support may lead to some performance improvements ```sh mkdir build cd build - cmake .. -DLLAMA_CLBLAST=ON -DCLBlast_dir=/some/path + cmake .. -DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path cmake --build . --config Release ``` - CMake (Windows): From 99115f3fa654b593099c6719ad30e3f54ce231e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?DAN=E2=84=A2?= Date: Mon, 25 Sep 2023 18:45:33 -0400 Subject: [PATCH 18/26] cmake : fix build-info.h on MSVC (#3309) --- CMakeLists.txt | 2 +- scripts/build-info.cmake | 51 ++++++++++++++-------------------------- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c0b93564a..47425c9c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -118,7 +118,7 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git") add_custom_command( OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h" COMMENT "Generating build details from Git" - COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake" + COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DCMAKE_C_COMPILER_VERSION=${CMAKE_C_COMPILER_VERSION} -DCMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID} -DCMAKE_VS_PLATFORM_NAME=${CMAKE_VS_PLATFORM_NAME} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -P "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS "${GIT_DIR}/index" VERBATIM diff --git a/scripts/build-info.cmake b/scripts/build-info.cmake index e33f3349a..c86ab4379 100644 --- a/scripts/build-info.cmake +++ b/scripts/build-info.cmake @@ -8,16 +8,12 @@ set(BUILD_TARGET "unknown") # Look for git find_package(Git) if(NOT Git_FOUND) - execute_process( - COMMAND which git - OUTPUT_VARIABLE GIT_EXECUTABLE - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if(NOT GIT_EXECUTABLE STREQUAL "") + find_program(GIT_EXECUTABLE NAMES git git.exe) + if(GIT_EXECUTABLE) set(Git_FOUND TRUE) - message(STATUS "Found Git using 'which': ${GIT_EXECUTABLE}") + message(STATUS "Found Git: ${GIT_EXECUTABLE}") else() - message(WARNING "Git not found using 'find_package' or 'which'. Build info will not be accurate. Consider installing Git or ensuring it is in the PATH.") + message(WARNING "Git not found. Build info will not be accurate.") endif() endif() @@ -28,43 +24,32 @@ if(Git_FOUND) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE HEAD OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE GIT_HEAD_RESULT ) execute_process( COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE COUNT OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE GIT_COUNT_RESULT ) - if(GIT_HEAD_RESULT EQUAL 0 AND GIT_COUNT_RESULT EQUAL 0) - set(BUILD_COMMIT ${HEAD}) - set(BUILD_NUMBER ${COUNT}) - endif() -endif() - -if(GIT_HEAD_RESULT EQUAL 0 AND GIT_COUNT_RESULT EQUAL 0) set(BUILD_COMMIT ${HEAD}) set(BUILD_NUMBER ${COUNT}) endif() -execute_process( - COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER} - OUTPUT_VARIABLE OUT - OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE RES -) -if (RES EQUAL 0) +if(MSVC) + set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") + set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME}) +else() + execute_process( + COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER} + OUTPUT_VARIABLE OUT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) set(BUILD_COMPILER ${OUT}) -endif() - -execute_process( - COMMAND ${CMAKE_C_COMPILER} -dumpmachine - OUTPUT_VARIABLE OUT - OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE RES -) -if (RES EQUAL 0) + execute_process( + COMMAND ${CMAKE_C_COMPILER} -dumpmachine + OUTPUT_VARIABLE OUT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) set(BUILD_TARGET ${OUT}) endif() From ffe88a36a913e5792aa383f0726bdbcf632e7191 Mon Sep 17 00:00:00 2001 From: BarfingLemurs <128182951+BarfingLemurs@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:30:36 -0400 Subject: [PATCH 19/26] readme : add some recent perplexity and bpw measurements to READMES, link for k-quants (#3340) * Update README.md * Update README.md * Update README.md with k-quants bpw measurements --- README.md | 5 +++++ examples/perplexity/README.md | 18 +++++++++++++++ examples/quantize/README.md | 41 +++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) diff --git a/README.md b/README.md index f41250147..09c5b1b92 100644 --- a/README.md +++ b/README.md @@ -597,6 +597,11 @@ Several quantization methods are supported. They differ in the resulting model d | 13B | ms/tok @ 8th | - | 73 | 82 | 98 | 105 | 128 | | 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 | +- [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684) +- recent k-quants improvements + - [#2707](https://github.com/ggerganov/llama.cpp/pull/2707) + - [#2807](https://github.com/ggerganov/llama.cpp/pull/2807) + ### Perplexity (measuring model quality) You can use the `perplexity` example to measure perplexity over a given prompt (lower perplexity is better). diff --git a/examples/perplexity/README.md b/examples/perplexity/README.md index eacfb17c6..50e1af011 100644 --- a/examples/perplexity/README.md +++ b/examples/perplexity/README.md @@ -1,3 +1,21 @@ # perplexity TODO + +## Llama 2 70B Scorechart +Quantization | Model size (GiB) | Perplexity | Delta to fp16 +-- | -- | -- | -- +Q4_0 | 36.20 | 3.5550 | 3.61% +Q4_1 | 40.20 | 3.5125 | 2.37% +Q5_0 | 44.20 | 3.4744 | 1.26% +Q2_K | 27.27 | 3.7339 | 8.82% +Q3_K_S | 27.86 | 3.7019 | 7.89% +Q3_K_M | 30.83 | 3.5932 | 4.72% +Q3_K_L | 33.67 | 3.5617 | 3.80% +Q4_K_S | 36.39 | 3.4852 | 1.57% +Q4_K_M | 38.54 | 3.4725 | 1.20% +Q5_K_S | 44.20 | 3.4483 | 0.50% +Q5_K_M | 45.41 | 3.4451 | 0.40% +Q6_K | 52.70 | 3.4367 | 0.16% +fp16 | 128.5 | 3.4313 | - + diff --git a/examples/quantize/README.md b/examples/quantize/README.md index f349e913e..c8b9a27a0 100644 --- a/examples/quantize/README.md +++ b/examples/quantize/README.md @@ -1,3 +1,44 @@ # quantize TODO + +## Llama 2 7B + +Quantization | Bits per Weight (BPW) +-- | -- +Q2_K | 3.35 +Q3_K_S | 3.50 +Q3_K_M | 3.91 +Q3_K_L | 4.27 +Q4_K_S | 4.58 +Q4_K_M | 4.84 +Q5_K_S | 5.52 +Q5_K_M | 5.68 +Q6_K | 6.56 + +## Llama 2 13B +Quantization | Bits per Weight (BPW) +-- | -- +Q2_K | 3.34 +Q3_K_S | 3.48 +Q3_K_M | 3.89 +Q3_K_L | 4.26 +Q4_K_S | 4.56 +Q4_K_M | 4.83 +Q5_K_S | 5.51 +Q5_K_M | 5.67 +Q6_K | 6.56 + +# Llama 2 70B + +Quantization | Bits per Weight (BPW) +-- | -- +Q2_K | 3.40 +Q3_K_S | 3.47 +Q3_K_M | 3.85 +Q3_K_L | 4.19 +Q4_K_S | 4.53 +Q4_K_M | 4.80 +Q5_K_S | 5.50 +Q5_K_M | 5.65 +Q6_K | 6.56 From 527e57cfd8a9a26bf622c0510c21c2508a24be26 Mon Sep 17 00:00:00 2001 From: Jag Chadha Date: Wed, 27 Sep 2023 11:34:32 -0400 Subject: [PATCH 20/26] build : add ACCELERATE_NEW_LAPACK to fix warning on macOS Sonoma (#3342) --- CMakeLists.txt | 2 ++ Makefile | 2 ++ Package.swift | 2 ++ 3 files changed, 6 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 47425c9c6..c4a649a97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -162,6 +162,8 @@ if (APPLE AND LLAMA_ACCELERATE) message(STATUS "Accelerate framework found") add_compile_definitions(GGML_USE_ACCELERATE) + add_compile_definitions(ACCELERATE_NEW_LAPACK) + add_compile_definitions(ACCELERATE_LAPACK_ILP64) set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK}) else() message(WARNING "Accelerate framework not found") diff --git a/Makefile b/Makefile index e07db8afa..f170f2293 100644 --- a/Makefile +++ b/Makefile @@ -305,6 +305,8 @@ ifndef LLAMA_NO_ACCELERATE # `-framework Accelerate` works both with Apple Silicon and Mac Intel ifeq ($(UNAME_S),Darwin) MK_CPPFLAGS += -DGGML_USE_ACCELERATE + MK_CPPFLAGS += -DACCELERATE_NEW_LAPACK + MK_CPPFLAGS += -DACCELERATE_LAPACK_ILP64 MK_LDFLAGS += -framework Accelerate endif endif # LLAMA_NO_ACCELERATE diff --git a/Package.swift b/Package.swift index fb95ef7eb..442463ba3 100644 --- a/Package.swift +++ b/Package.swift @@ -45,6 +45,8 @@ let package = Package( .unsafeFlags(["-Wno-shorten-64-to-32"]), .define("GGML_USE_K_QUANTS"), .define("GGML_USE_ACCELERATE") + .define("ACCELERATE_NEW_LAPACK") + .define("ACCELERATE_LAPACK_ILP64") ] + additionalSettings, linkerSettings: [ .linkedFramework("Accelerate") From dc6897404e141c74cbbf8030ecfebd74e1815411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rickard=20Hallerb=C3=A4ck?= Date: Wed, 27 Sep 2023 17:48:33 +0200 Subject: [PATCH 21/26] metal : reusing llama.cpp logging (#3152) * metal : reusing llama.cpp logging * cmake : build fix * metal : logging callback * metal : logging va_args memory fix * metal : minor cleanup * metal : setting function like logging macro to capital letters * llama.cpp : trailing whitespace fix * ggml : log level enum used by llama * Makefile : cleanup ggml-metal recipe * ggml : ggml_log_callback typedef * ggml : minor --------- Co-authored-by: Georgi Gerganov --- examples/llama-bench/llama-bench.cpp | 2 +- ggml-metal.h | 4 + ggml-metal.m | 116 +++++++++++++++++---------- ggml.h | 7 ++ llama.cpp | 21 ++--- llama.h | 15 +--- 6 files changed, 98 insertions(+), 67 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 34ddfde39..2f1a1d9ff 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -903,7 +903,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) } } -static void llama_null_log_callback(enum llama_log_level level, const char * text, void * user_data) { +static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) { (void) level; (void) text; (void) user_data; diff --git a/ggml-metal.h b/ggml-metal.h index fca28d37e..790cf0bf7 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -19,6 +19,8 @@ #pragma once +#include "ggml.h" + #include #include @@ -33,6 +35,8 @@ struct ggml_cgraph; extern "C" { #endif +void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data); + struct ggml_metal_context; // number of command buffers to use diff --git a/ggml-metal.m b/ggml-metal.m index 1139ee311..654eb67f3 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -11,11 +11,14 @@ #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) -// TODO: temporary - reuse llama.cpp logging #ifdef GGML_METAL_NDEBUG -#define metal_printf(...) +#define GGML_METAL_LOG_INFO(...) +#define GGML_METAL_LOG_WARN(...) +#define GGML_METAL_LOG_ERROR(...) #else -#define metal_printf(...) fprintf(stderr, __VA_ARGS__) +#define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__) +#define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__) +#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) #endif #define UNUSED(x) (void)(x) @@ -120,8 +123,37 @@ static NSString * const msl_library_source = @"see metal.metal"; @implementation GGMLMetalClass @end +ggml_log_callback ggml_metal_log_callback = NULL; +void * ggml_metal_log_user_data = NULL; + +void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) { + ggml_metal_log_callback = log_callback; + ggml_metal_log_user_data = user_data; +} + +static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ + if (ggml_metal_log_callback != NULL) { + va_list args; + va_start(args, format); + char buffer[128]; + int len = vsnprintf(buffer, 128, format, args); + if (len < 128) { + ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data); + } else { + char* buffer2 = malloc(len+1); + vsnprintf(buffer2, len+1, format, args); + buffer2[len] = 0; + ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data); + free(buffer2); + } + va_end(args); + } +} + + + struct ggml_metal_context * ggml_metal_init(int n_cb) { - metal_printf("%s: allocating\n", __func__); + GGML_METAL_LOG_INFO("%s: allocating\n", __func__); id device; NSString * s; @@ -131,14 +163,14 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { NSArray * devices = MTLCopyAllDevices(); for (device in devices) { s = [device name]; - metal_printf("%s: found device: %s\n", __func__, [s UTF8String]); + GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [s UTF8String]); } #endif // Pick and show default Metal device device = MTLCreateSystemDefaultDevice(); s = [device name]; - metal_printf("%s: picking default device: %s\n", __func__, [s UTF8String]); + GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [s UTF8String]); // Configure context struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context)); @@ -165,7 +197,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { ctx->library = [ctx->device newLibraryWithURL:libURL error:&error]; if (error) { - metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]); + GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return NULL; } } @@ -179,11 +211,11 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"]; NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; - metal_printf("%s: loading '%s'\n", __func__, [path UTF8String]); + GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path UTF8String]); NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error]; if (error) { - metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]); + GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return NULL; } @@ -195,7 +227,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error]; #endif if (error) { - metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]); + GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return NULL; } } @@ -207,11 +239,11 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { #define GGML_METAL_ADD_KERNEL(name) \ ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ - metal_printf("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ + GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \ (int) ctx->pipeline_##name.threadExecutionWidth); \ if (error) { \ - metal_printf("%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ + GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ return NULL; \ } @@ -270,13 +302,13 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { #undef GGML_METAL_ADD_KERNEL } - metal_printf("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false"); + GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false"); #if TARGET_OS_OSX - metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); + GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (ctx->device.maxTransferRate != 0) { - metal_printf("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0); + GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0); } else { - metal_printf("%s: maxTransferRate = built-in GPU\n", __func__); + GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__); } #endif @@ -284,7 +316,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) { } void ggml_metal_free(struct ggml_metal_context * ctx) { - metal_printf("%s: deallocating\n", __func__); + GGML_METAL_LOG_INFO("%s: deallocating\n", __func__); #define GGML_METAL_DEL_KERNEL(name) \ [ctx->function_##name release]; \ [ctx->pipeline_##name release]; @@ -360,7 +392,7 @@ void * ggml_metal_host_malloc(size_t n) { void * data = NULL; const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n); if (result != 0) { - metal_printf("%s: error: posix_memalign failed\n", __func__); + GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); return NULL; } @@ -388,7 +420,7 @@ int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) { // Metal buffer based on the host memory pointer // static id ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) { - //metal_printf("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach); + //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach); const int64_t tsize = ggml_nbytes(t); @@ -400,13 +432,13 @@ static id ggml_metal_get_buffer(struct ggml_metal_context * ctx, stru if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) { *offs = (size_t) ioffs; - //metal_printf("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs); + //GGML_METAL_LOG_INFO("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs); return ctx->buffers[i].metal; } } - metal_printf("%s: error: buffer is nil\n", __func__); + GGML_METAL_LOG_ERROR("%s: error: buffer is nil\n", __func__); return nil; } @@ -418,7 +450,7 @@ bool ggml_metal_add_buffer( size_t size, size_t max_size) { if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) { - metal_printf("%s: too many buffers\n", __func__); + GGML_METAL_LOG_ERROR("%s: error: too many buffers\n", __func__); return false; } @@ -428,7 +460,7 @@ bool ggml_metal_add_buffer( const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data; if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) { - metal_printf("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name); + GGML_METAL_LOG_ERROR("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name); return false; } } @@ -449,11 +481,11 @@ bool ggml_metal_add_buffer( ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (ctx->buffers[ctx->n_buffers].metal == nil) { - metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0); + GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0); return false; } - metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0); + GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0); ++ctx->n_buffers; } else { @@ -473,13 +505,13 @@ bool ggml_metal_add_buffer( ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (ctx->buffers[ctx->n_buffers].metal == nil) { - metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0); + GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0); return false; } - metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i); + GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i); if (i + size_step < size) { - metal_printf("\n"); + GGML_METAL_LOG_INFO("\n"); } ++ctx->n_buffers; @@ -487,17 +519,17 @@ bool ggml_metal_add_buffer( } #if TARGET_OS_OSX - metal_printf(", (%8.2f / %8.2f)", + GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", ctx->device.currentAllocatedSize / 1024.0 / 1024.0, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) { - metal_printf(", warning: current allocated size is greater than the recommended max working set size\n"); + GGML_METAL_LOG_WARN(", warning: current allocated size is greater than the recommended max working set size\n", __func__); } else { - metal_printf("\n"); + GGML_METAL_LOG_INFO("\n"); } #else - metal_printf(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0); + GGML_METAL_LOG_INFO(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0); #endif } @@ -610,7 +642,7 @@ void ggml_metal_graph_find_concurrency( } if (ctx->concur_list_len > GGML_MAX_CONCUR) { - metal_printf("%s: too many elements for metal ctx->concur_list!\n", __func__); + GGML_METAL_LOG_WARN("%s: too many elements for metal ctx->concur_list!\n", __func__); } } @@ -664,7 +696,7 @@ void ggml_metal_graph_compute( continue; } - //metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op)); + //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op)); struct ggml_tensor * src0 = gf->nodes[i]->src[0]; struct ggml_tensor * src1 = gf->nodes[i]->src[1]; @@ -708,17 +740,17 @@ void ggml_metal_graph_compute( id id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil; id id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil; - //metal_printf("%s: op - %s\n", __func__, ggml_op_name(dst->op)); + //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op)); //if (src0) { - // metal_printf("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02, + // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02, // ggml_is_contiguous(src0), src0->name); //} //if (src1) { - // metal_printf("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12, + // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12, // ggml_is_contiguous(src1), src1->name); //} //if (dst) { - // metal_printf("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2, + // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2, // dst->name); //} @@ -830,7 +862,7 @@ void ggml_metal_graph_compute( } break; default: { - metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); + GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); GGML_ASSERT(false); } } break; @@ -1019,7 +1051,7 @@ void ggml_metal_graph_compute( } break; default: { - metal_printf("Asserting on type %d\n",(int)src0t); + GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t); GGML_ASSERT(false && "not implemented"); } }; @@ -1261,7 +1293,7 @@ void ggml_metal_graph_compute( } break; default: { - metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); + GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); GGML_ASSERT(false); } } @@ -1286,7 +1318,7 @@ void ggml_metal_graph_compute( MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status]; if (status != MTLCommandBufferStatusCompleted) { - metal_printf("%s: command buffer %d failed with status %lu\n", __func__, i, status); + GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); GGML_ASSERT(false); } } diff --git a/ggml.h b/ggml.h index f45456876..b2251acef 100644 --- a/ggml.h +++ b/ggml.h @@ -445,6 +445,12 @@ extern "C" { GGML_OBJECT_WORK_BUFFER }; + enum ggml_log_level { + GGML_LOG_LEVEL_ERROR = 2, + GGML_LOG_LEVEL_WARN = 3, + GGML_LOG_LEVEL_INFO = 4 + }; + // ggml object struct ggml_object { size_t offs; @@ -1691,6 +1697,7 @@ extern "C" { }; typedef void (*ggml_opt_callback)(void * data, float * sched); + typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); // optimization parameters // diff --git a/llama.cpp b/llama.cpp index 346636501..1327fde6f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -92,12 +92,12 @@ // LLAMA_ATTRIBUTE_FORMAT(2, 3) -static void llama_log_internal (llama_log_level level, const char* format, ...); -static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); +static void llama_log_internal (ggml_log_level level, const char* format, ...); +static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data); -#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) -#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) -#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) +#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) +#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) +#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) // // helpers @@ -904,7 +904,7 @@ static std::string llama_token_to_str(const struct llama_context * ctx, llama_to struct llama_state { // We save the log callback globally - llama_log_callback log_callback = llama_log_callback_default; + ggml_log_callback log_callback = llama_log_callback_default; void * log_callback_user_data = nullptr; }; @@ -6366,6 +6366,7 @@ struct llama_context * llama_new_context_with_model( llama_free(ctx); return NULL; } + ggml_metal_log_set_callback(llama_log_callback_default, NULL); ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false); ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); } @@ -7199,12 +7200,12 @@ const std::vector> & llama_internal return ctx->model.tensors_by_name; } -void llama_log_set(llama_log_callback log_callback, void * user_data) { +void llama_log_set(ggml_log_callback log_callback, void * user_data) { g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; g_state.log_callback_user_data = user_data; } -static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) { +static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) { va_list args_copy; va_copy(args_copy, args); char buffer[128]; @@ -7221,14 +7222,14 @@ static void llama_log_internal_v(llama_log_level level, const char * format, va_ va_end(args_copy); } -static void llama_log_internal(llama_log_level level, const char * format, ...) { +static void llama_log_internal(ggml_log_level level, const char * format, ...) { va_list args; va_start(args, format); llama_log_internal_v(level, format, args); va_end(args); } -static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) { +static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); diff --git a/llama.h b/llama.h index 369be048c..350268b9a 100644 --- a/llama.h +++ b/llama.h @@ -62,12 +62,6 @@ extern "C" { typedef int llama_token; - enum llama_log_level { - LLAMA_LOG_LEVEL_ERROR = 2, - LLAMA_LOG_LEVEL_WARN = 3, - LLAMA_LOG_LEVEL_INFO = 4 - }; - enum llama_vocab_type { LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding @@ -151,13 +145,6 @@ extern "C" { bool embedding; // embedding mode only }; - // Signature for logging events - // Note that text includes the new line character at the end for most events. - // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it - // if it exists. - // It might not exist for progress report where '.' is output repeatedly. - typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); - // model quantization parameters typedef struct llama_model_quantize_params { int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() @@ -526,7 +513,7 @@ extern "C" { // Set callback for all future logging events. // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx); From 20c7e1e804690f3db58bd33eb56f8c6aa4735c63 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 27 Sep 2023 12:18:07 -0400 Subject: [PATCH 22/26] gguf : fix a few general keys (#3341) --- examples/gptneox-wip/falcon-main.cpp | 4 ++-- examples/gptneox-wip/gptneox-main.cpp | 4 ++-- gguf-py/gguf/gguf.py | 2 +- llama.cpp | 20 ++++++++++---------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/gptneox-wip/falcon-main.cpp b/examples/gptneox-wip/falcon-main.cpp index 7f9a1620b..e9197f6b5 100644 --- a/examples/gptneox-wip/falcon-main.cpp +++ b/examples/gptneox-wip/falcon-main.cpp @@ -367,10 +367,10 @@ bool falcon_model_load(const std::string & fname, falcon_model & model, gpt2bpe_ keyidx = gguf_find_key(ggufctx, "general.architecture"); if (keyidx != -1) { printf("%s: model architecture = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } keyidx = gguf_find_key(ggufctx, "general.file_type"); - if (keyidx != -1) { printf("%s: model file type = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + if (keyidx != -1) { printf("%s: model file type = %" PRIu32 "\n", __func__, gguf_get_val_u32(ggufctx, keyidx)); } keyidx = gguf_find_key(ggufctx, "gptneox.tensor_data_layout"); if (keyidx != -1) { printf("%s: model data layout = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } - keyidx = gguf_find_key(ggufctx, "general.source.hugginface.repository"); + keyidx = gguf_find_key(ggufctx, "general.source.huggingface.repository"); if (keyidx != -1) { printf("%s: model source HF repo = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } } diff --git a/examples/gptneox-wip/gptneox-main.cpp b/examples/gptneox-wip/gptneox-main.cpp index 55eba0cdc..b76bafaa8 100644 --- a/examples/gptneox-wip/gptneox-main.cpp +++ b/examples/gptneox-wip/gptneox-main.cpp @@ -380,10 +380,10 @@ bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt2 keyidx = gguf_find_key(ggufctx, "general.architecture"); if (keyidx != -1) { printf("%s: model architecture = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } keyidx = gguf_find_key(ggufctx, "general.file_type"); - if (keyidx != -1) { printf("%s: model file type = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + if (keyidx != -1) { printf("%s: model file type = %" PRIu32 "\n", __func__, gguf_get_val_u32(ggufctx, keyidx)); } keyidx = gguf_find_key(ggufctx, "gptneox.tensor_data_layout"); if (keyidx != -1) { printf("%s: model data layout = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } - keyidx = gguf_find_key(ggufctx, "general.source.hugginface.repository"); + keyidx = gguf_find_key(ggufctx, "general.source.huggingface.repository"); if (keyidx != -1) { printf("%s: model source HF repo = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } } diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index e0e0dbcbb..598cf8e59 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -32,7 +32,7 @@ KEY_GENERAL_URL = "general.url" KEY_GENERAL_DESCRIPTION = "general.description" KEY_GENERAL_LICENSE = "general.license" KEY_GENERAL_SOURCE_URL = "general.source.url" -KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository" +KEY_GENERAL_SOURCE_HF_REPO = "general.source.huggingface.repository" KEY_GENERAL_FILE_TYPE = "general.file_type" # LLM diff --git a/llama.cpp b/llama.cpp index 1327fde6f..6e23a0772 100644 --- a/llama.cpp +++ b/llama.cpp @@ -221,16 +221,16 @@ enum llm_kv { }; static std::map LLM_KV_NAMES = { - { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" }, - { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" }, - { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" }, - { LLM_KV_GENERAL_NAME, "general.name" }, - { LLM_KV_GENERAL_AUTHOR, "general.author" }, - { LLM_KV_GENERAL_URL, "general.url" }, - { LLM_KV_GENERAL_DESCRIPTION, "general.description" }, - { LLM_KV_GENERAL_LICENSE, "general.license" }, - { LLM_KV_GENERAL_SOURCE_URL, "general.source_url" }, - { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source_hf_repo" }, + { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" }, + { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" }, + { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" }, + { LLM_KV_GENERAL_NAME, "general.name" }, + { LLM_KV_GENERAL_AUTHOR, "general.author" }, + { LLM_KV_GENERAL_URL, "general.url" }, + { LLM_KV_GENERAL_DESCRIPTION, "general.description" }, + { LLM_KV_GENERAL_LICENSE, "general.license" }, + { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" }, + { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" }, { LLM_KV_CONTEXT_LENGTH, "%s.context_length" }, { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" }, From ac43576124a75c2de6e333ac31a3444ff9eb9458 Mon Sep 17 00:00:00 2001 From: Richard Roberson Date: Wed, 27 Sep 2023 10:25:12 -0600 Subject: [PATCH 23/26] make-ggml.py : compatibility with more models and GGUF (#3290) * Resync my fork with new llama.cpp commits * examples : rename to use dash instead of underscore * New model conversions --------- Co-authored-by: Georgi Gerganov --- examples/make-ggml.py | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/examples/make-ggml.py b/examples/make-ggml.py index 6a34eeac5..c73485ebf 100755 --- a/examples/make-ggml.py +++ b/examples/make-ggml.py @@ -1,22 +1,25 @@ #!/usr/bin/env python3 """ -This script converts Hugging Face llama models to GGML and quantizes them. +This script converts Hugging Face Llama, StarCoder, Falcon, Baichuan, and GPT-NeoX models to GGUF and quantizes them. Usage: -python make-ggml.py --model {model_dir_or_hf_repo_name} [--outname {output_name} (Optional)] [--outdir {output_directory} (Optional)] [--quants {quant_types} (Optional)] [--keep_fp16 (Optional)] +python make-ggml.py {model_dir_or_hf_repo_name} --model_type {model_type} [--outname {output_name} (Optional)] [--outdir {output_directory} (Optional)] [--quants {quant_types} (Optional)] [--keep_fp16 (Optional)] Arguments: -- --model: (Required) The directory of the downloaded Hugging Face model or the name of the Hugging Face model repository. If the model directory does not exist, it will be downloaded from the Hugging Face model hub. +- model: (Required) The directory of the downloaded Hugging Face model or the name of the Hugging Face model repository. If the model directory does not exist, it will be downloaded from the Hugging Face model hub. +- --model_type: (Required) The type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox. - --outname: (Optional) The name of the output model. If not specified, the last part of the model directory path or the Hugging Face model repo name will be used. - --outdir: (Optional) The directory where the output model(s) will be stored. If not specified, '../models/{outname}' will be used. - --quants: (Optional) The types of quantization to apply. This should be a space-separated list. The default is 'Q4_K_M Q5_K_S'. - --keep_fp16: (Optional) If specified, the FP16 model will not be deleted after the quantized models are created. -Quant types: +Old quant types (some base model types require these): - Q4_0: small, very high quality loss - legacy, prefer using Q3_K_M - Q4_1: small, substantial quality loss - legacy, prefer using Q3_K_L - Q5_0: medium, balanced quality - legacy, prefer using Q4_K_M - Q5_1: medium, low quality loss - legacy, prefer using Q5_K_M + +New quant types (recommended): - Q2_K: smallest, extreme quality loss - not recommended - Q3_K: alias for Q3_K_M - Q3_K_S: very small, very high quality loss @@ -40,9 +43,7 @@ import argparse import os from huggingface_hub import snapshot_download -def main(model, outname, outdir, quants, keep_fp16): - ggml_version = "v3" - +def main(model, model_type, outname, outdir, quants, keep_fp16): if not os.path.isdir(model): print(f"Model not found at {model}. Downloading...") try: @@ -63,17 +64,20 @@ def main(model, outname, outdir, quants, keep_fp16): print("Building llama.cpp") subprocess.run(f"cd .. && make quantize", shell=True, check=True) - fp16 = f"{outdir}/{outname}.ggml{ggml_version}.fp16.bin" + fp16 = f"{outdir}/{outname}.gguf.fp16.bin" - print(f"Making unquantised GGML at {fp16}") + print(f"Making unquantised GGUF at {fp16}") if not os.path.isfile(fp16): - subprocess.run(f"python3 ../convert.py {model} --outtype f16 --outfile {fp16}", shell=True, check=True) + if model_type != "llama": + subprocess.run(f"python3 ../convert-{model_type}-hf-to-gguf.py {model} 1 --outfile {fp16}", shell=True, check=True) + else: + subprocess.run(f"python3 ../convert.py {model} --outtype f16 --outfile {fp16}", shell=True, check=True) else: print(f"Unquantised GGML already exists at: {fp16}") print("Making quants") for type in quants: - outfile = f"{outdir}/{outname}.ggml{ggml_version}.{type}.bin" + outfile = f"{outdir}/{outname}.gguf.{type}.bin" print(f"Making {type} : {outfile}") subprocess.run(f"../quantize {fp16} {outfile} {type}", shell=True, check=True) @@ -81,8 +85,9 @@ def main(model, outname, outdir, quants, keep_fp16): os.remove(fp16) if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Convert/Quantize HF to GGML. If you have the HF model downloaded already, pass the path to the model dir. Otherwise, pass the Hugging Face model repo name. You need to be in the /examples folder for it to work.') - parser.add_argument('--model', required=True, help='Downloaded model dir or Hugging Face model repo name') + parser = argparse.ArgumentParser(description='Convert/Quantize HF models to GGUF. If you have the HF model downloaded already, pass the path to the model dir. Otherwise, pass the Hugging Face model repo name. You need to be in the /examples folder for it to work.') + parser.add_argument('model', help='Downloaded model dir or Hugging Face model repo name') + parser.add_argument('--model_type', required=True, choices=['llama', 'starcoder', 'falcon', 'baichuan', 'gptneox'], help='Type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox.') parser.add_argument('--outname', default=None, help='Output model(s) name') parser.add_argument('--outdir', default=None, help='Output directory') parser.add_argument('--quants', nargs='*', default=["Q4_K_M", "Q5_K_S"], help='Quant types') @@ -90,4 +95,4 @@ if __name__ == "__main__": args = parser.parse_args() - main(args.model, args.outname, args.outdir, args.quants, args.keep_fp16) + main(args.model, args.model_type, args.outname, args.outdir, args.quants, args.keep_fp16) From e519621010cac02c6fec0f8f3b16cda0591042c0 Mon Sep 17 00:00:00 2001 From: Zhang Peiyuan Date: Thu, 28 Sep 2023 02:45:20 +0800 Subject: [PATCH 24/26] convert : remove bug in convert.py permute function (#3364) --- convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert.py b/convert.py index 4ac5030db..8bb6c7e41 100755 --- a/convert.py +++ b/convert.py @@ -439,7 +439,7 @@ Vocab: TypeAlias = 'BpeVocab | SentencePieceVocab' def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: #print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) ) if n_head_kv is not None and n_head != n_head_kv: - n_head //= n_head_kv + n_head = n_head_kv return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) .swapaxes(1, 2) .reshape(weights.shape)) From da0400344be12074e67dcabc565140289cf7efaa Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 28 Sep 2023 12:08:28 +0200 Subject: [PATCH 25/26] ggml-cuda : perform cublas fp16 matrix multiplication as fp16 (#3370) * ggml-cuda : perform cublas fp16 matrix multiplication as fp16 * try to fix rocm build * restrict fp16 mat mul to volta and up --- ggml-cuda.cu | 120 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 24 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 08428ea3f..79e2d313a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -14,9 +14,11 @@ // for rocblas_initialize() #include "rocblas/rocblas.h" #endif // __HIP_PLATFORM_AMD__ +#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F #define CUBLAS_COMPUTE_32F HIPBLAS_R_32F #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F #define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT +#define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT #define CUBLAS_OP_N HIPBLAS_OP_N #define CUBLAS_OP_T HIPBLAS_OP_T #define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS @@ -235,8 +237,12 @@ static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } +template +using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int k, cudaStream_t stream); +typedef to_t_cuda_t to_fp32_cuda_t; +typedef to_t_cuda_t to_fp16_cuda_t; + typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v); -typedef void (*to_fp32_cuda_t)(const void * __restrict__ x, float * __restrict__ y, int k, cudaStream_t stream); typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v); typedef void (*cpy_kernel_t)(const char * cx, char * cdst); typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); @@ -1515,6 +1521,14 @@ static __device__ void convert_f16(const void * vx, const int ib, const int iqs, v.y = x[ib + iqs + 1]; } +static __device__ void convert_f32(const void * vx, const int ib, const int iqs, dfloat2 & v){ + const float * x = (const float *) vx; + + // automatic half -> float type cast if dfloat == float + v.x = x[ib + iqs + 0]; + v.y = x[ib + iqs + 1]; +} + static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) { const int ix = blockDim.x*blockIdx.x + threadIdx.x; @@ -1554,8 +1568,8 @@ static __global__ void quantize_q8_1(const float * __restrict__ x, void * __rest reinterpret_cast(y[ib].ds.y) = sum; } -template -static __global__ void dequantize_block(const void * __restrict__ vx, float * __restrict__ y, const int k) { +template +static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { const int i = blockDim.x*blockIdx.x + 2*threadIdx.x; if (i >= k) { @@ -4826,6 +4840,11 @@ static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, c dequantize_block<1, 1, convert_f16><<>>(vx, y, k); } +static void convert_fp32_to_fp16_cuda(const void * vx, half * y, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE; + dequantize_block<1, 1, convert_f32><<>>(vx, y, k); +} + static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; @@ -4835,6 +4854,15 @@ static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, floa <<>>(vx, y, dst, ncols, nrows); } +static to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_F32: + return convert_fp32_to_fp16_cuda; + default: + return nullptr; + } +} + static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: @@ -6016,8 +6044,6 @@ inline void ggml_cuda_op_mul_mat_cublas( GGML_ASSERT(src1_ddf_i != nullptr); GGML_ASSERT(dst_dd_i != nullptr); - const float alpha = 1.0f; - const float beta = 0.0f; const int64_t ne00 = src0->ne[0]; @@ -6026,16 +6052,6 @@ inline void ggml_cuda_op_mul_mat_cublas( const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; - float * src0_ddq_as_f32; - size_t src0_as = 0; - - if (src0->type != GGML_TYPE_F32) { - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); - src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT - to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); - } - const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; - int id; CUDA_CHECK(cudaGetDevice(&id)); @@ -6043,16 +6059,72 @@ inline void ggml_cuda_op_mul_mat_cublas( // ldc == nrows of the matrix that cuBLAS writes into int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff; - CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); - CUBLAS_CHECK( - cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, - row_diff, src1_ncols, ne10, - &alpha, src0_ddf_i, ne00, - src1_ddf_i, ne10, - &beta, dst_dd_i, ldc)); + const int compute_capability = g_compute_capabilities[id]; - if (src0_as > 0) { - ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); + if (compute_capability >= CC_TURING && src0->type == GGML_TYPE_F16 && ggml_is_contiguous(src0) && ldc == row_diff) { + // convert src1 to fp16, multiply as fp16, convert dst to fp32 + half * src1_as_f16 = nullptr; + size_t src1_as = 0; + if (src1->type != GGML_TYPE_F16) { + const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); + GGML_ASSERT(to_fp16_cuda != nullptr); + size_t ne = src1_ncols*ne10; + src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); + to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); + } + const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; + + size_t dst_as = 0; + half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); + + const half alpha_f16 = 1.0f; + const half beta_f16 = 0.0f; + + CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); + CUBLAS_CHECK( + cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, + row_diff, src1_ncols, ne10, + &alpha_f16, src0_dd_i, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta_f16, dst_f16, CUDA_R_16F, ldc, + CUBLAS_COMPUTE_16F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP)); + + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); + to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); + + ggml_cuda_pool_free(dst_f16, dst_as); + + if (src1_as != 0) { + ggml_cuda_pool_free(src1_as_f16, src1_as); + } + } + else { + float * src0_ddq_as_f32 = nullptr; + size_t src0_as = 0; + + if (src0->type != GGML_TYPE_F32) { + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); + GGML_ASSERT(to_fp32_cuda != nullptr); + src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT + to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); + } + const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; + + const float alpha = 1.0f; + const float beta = 0.0f; + + CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream)); + CUBLAS_CHECK( + cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, + row_diff, src1_ncols, ne10, + &alpha, src0_ddf_i, ne00, + src1_ddf_i, ne10, + &beta, dst_dd_i, ldc)); + + if (src0_as != 0) { + ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); + } } (void) dst; From 4aea3b846ec151cc6d08f93a8889eae13b286b06 Mon Sep 17 00:00:00 2001 From: Pierre Alexandre SCHEMBRI Date: Thu, 28 Sep 2023 14:13:37 +0200 Subject: [PATCH 26/26] readme : add Mistral AI release 0.1 (#3362) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 09c5b1b92..9675ce1e7 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,7 @@ as the main playground for developing new features for the [ggml](https://github - [X] [WizardLM](https://github.com/nlpxucan/WizardLM) - [X] [Baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B) and its derivations (such as [baichuan-7b-sft](https://huggingface.co/hiyouga/baichuan-7b-sft)) - [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) / [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B) +- [X] Mistral AI v0.1 **Bindings:**