mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 14:20:31 +01:00
95f57bb5d5
* ggml : remove ggml_task_type and GGML_PERF * check abort_callback on main thread only * vulkan : remove usage of ggml_compute_params * remove LLAMA_PERF
15 lines
302 B
C
15 lines
302 B
C
#pragma once
|
|
#include <stdint.h>
|
|
#include <stdbool.h>
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t,
|
|
const void *, int64_t, void *, int64_t, int, int,
|
|
int, int, int);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|