From 42f8fe19272554c2aafe1be5ab2366d0e136ce3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Thu, 17 Aug 2023 08:56:42 +0300 Subject: [PATCH] examples/gguf : no need to keep q option for quantization any more --- examples/gguf/gguf.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index d742dce17..dee00df87 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -233,16 +233,13 @@ int main(int argc, char ** argv) { const std::string fname(argv[1]); const std::string mode (argv[2]); - GGML_ASSERT((mode == "r" || mode == "w" || mode == "q") && "mode must be r, w or q"); + GGML_ASSERT((mode == "r" || mode == "w") && "mode must be r or w"); if (mode == "w") { GGML_ASSERT(gguf_ex_write(fname) && "failed to write gguf file"); } else if (mode == "r") { GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file"); GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file"); - } else if (mode == "q") { - llama_model_quantize_params params = llama_model_quantize_default_params(); - llama_model_quantize(fname.c_str(), "quant.gguf", ¶ms); } return 0;