mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 15:18:26 +01:00
ae8de6d50a
* ggml : build backends as libraries --------- Signed-off-by: Xiaodong Ye <xiaodong.ye@mthreads.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: R0CKSTAR <xiaodong.ye@mthreads.com>
13 lines
250 B
C++
13 lines
250 B
C++
#include "ggml-threading.h"
|
|
#include <mutex>
|
|
|
|
std::mutex ggml_critical_section_mutex;
|
|
|
|
void ggml_critical_section_start() {
|
|
ggml_critical_section_mutex.lock();
|
|
}
|
|
|
|
void ggml_critical_section_end(void) {
|
|
ggml_critical_section_mutex.unlock();
|
|
}
|