mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
ggml : add and use ggml_cpu_has_llamafile() (#8664)
This commit is contained in:
parent
be6d7c0791
commit
eddcb5238b
@ -2400,6 +2400,7 @@ extern "C" {
|
||||
GGML_API int ggml_cpu_has_vsx (void);
|
||||
GGML_API int ggml_cpu_has_matmul_int8(void);
|
||||
GGML_API int ggml_cpu_has_cann (void);
|
||||
GGML_API int ggml_cpu_has_llamafile (void);
|
||||
|
||||
//
|
||||
// Internal types and functions exposed for tests and benchmarks
|
||||
|
@ -22005,6 +22005,14 @@ int ggml_cpu_has_cann(void) {
|
||||
#endif
|
||||
}
|
||||
|
||||
int ggml_cpu_has_llamafile(void) {
|
||||
#if defined(GGML_USE_LLAMAFILE)
|
||||
return 1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ggml_cpu_has_gpublas(void) {
|
||||
return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
|
||||
}
|
||||
|
@ -19146,11 +19146,7 @@ const char * llama_print_system_info(void) {
|
||||
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
||||
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
||||
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
||||
#ifdef GGML_USE_LLAMAFILE
|
||||
s += "LLAMAFILE = 1 | ";
|
||||
#else
|
||||
s += "LLAMAFILE = 0 | ";
|
||||
#endif
|
||||
s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | ";
|
||||
|
||||
return s.c_str();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user