quantize: be able to specify the token embedding tensor type

This commit is contained in:
Iwan Kawrakow 2024-03-22 16:27:34 +02:00
parent 7883796f71
commit 0e826d12a5
3 changed files with 28 additions and 17 deletions

View File

@ -221,6 +221,12 @@ int main(int argc, char ** argv) {
} else { } else {
usage(argv[0]); usage(argv[0]);
} }
} else if (strcmp(argv[arg_idx], "--token-embedding-type") == 0) {
if (arg_idx < argc-1) {
params.token_embedding_type = parse_ggml_type(argv[++arg_idx]);
} else {
usage(argv[0]);
}
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
params.allow_requantize = true; params.allow_requantize = true;
} else if (strcmp(argv[arg_idx], "--pure") == 0) { } else if (strcmp(argv[arg_idx], "--pure") == 0) {

View File

@ -11987,15 +11987,18 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
} }
} }
} else if (name == "token_embd.weight") { } else if (name == "token_embd.weight") {
if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) { new_type = qs.params->token_embedding_type;
new_type = GGML_TYPE_Q2_K; } else {
} if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { new_type = GGML_TYPE_Q2_K;
new_type = GGML_TYPE_IQ3_S; }
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) { new_type = GGML_TYPE_IQ3_S;
new_type = GGML_TYPE_IQ3_S; }
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
new_type = GGML_TYPE_IQ3_S;
}
} }
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
@ -12892,6 +12895,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
/*.nthread =*/ 0, /*.nthread =*/ 0,
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
/*.output_tensor_type =*/ GGML_TYPE_COUNT, /*.output_tensor_type =*/ GGML_TYPE_COUNT,
/*.token_embedding_type =*/ GGML_TYPE_COUNT,
/*.allow_requantize =*/ false, /*.allow_requantize =*/ false,
/*.quantize_output_tensor =*/ true, /*.quantize_output_tensor =*/ true,
/*.only_copy =*/ false, /*.only_copy =*/ false,

17
llama.h
View File

@ -275,14 +275,15 @@ extern "C" {
// model quantization parameters // model quantization parameters
typedef struct llama_model_quantize_params { typedef struct llama_model_quantize_params {
int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
enum llama_ftype ftype; // quantize to this llama_ftype enum llama_ftype ftype; // quantize to this llama_ftype
enum ggml_type output_tensor_type; // output tensor type enum ggml_type output_tensor_type; // output tensor type
bool allow_requantize; // allow quantizing non-f32/f16 tensors enum ggml_type token_embedding_type; // itoken embeddings tensor type
bool quantize_output_tensor; // quantize output.weight bool allow_requantize; // allow quantizing non-f32/f16 tensors
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored bool quantize_output_tensor; // quantize output.weight
bool pure; // quantize all tensors to the default type bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
void * imatrix; // pointer to importance matrix data bool pure; // quantize all tensors to the default type
void * imatrix; // pointer to importance matrix data
} llama_model_quantize_params; } llama_model_quantize_params;
// grammar types // grammar types