mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 14:20:31 +01:00
Include IQ2_XXS and IQ2_XS in teet-quantize-fns
This commit is contained in:
parent
43139cc528
commit
6f20e2672f
@ -138,11 +138,6 @@ int main(int argc, char * argv[]) {
|
||||
|
||||
const ggml_type ei = (ggml_type)i;
|
||||
|
||||
if (ei == GGML_TYPE_IQ2_XXS || ei == GGML_TYPE_IQ2_XS) {
|
||||
printf("Skip %s due to missing quantization functionality\n", ggml_type_name(ei));
|
||||
continue;
|
||||
}
|
||||
|
||||
printf("Testing %s\n", ggml_type_name((ggml_type) i));
|
||||
ggml_quantize_init(ei);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user