flask-llama / llama.cpp /ggml /src /ggml-threading.cpp
YZ-TAN's picture
Upload 2821 files
5a29263 verified
raw
history blame
262 Bytes
#include "ggml-threading.h"
#include <mutex>
std::mutex ggml_critical_section_mutex;
void ggml_critical_section_start() {
ggml_critical_section_mutex.lock();
}
void ggml_critical_section_end(void) {
ggml_critical_section_mutex.unlock();
}