mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-23 21:17:54 +01:00
clip : use ggml_backend_buffer_is_host (#4205)
This commit is contained in:
parent
ce18d727a4
commit
0235b9b571
@ -598,11 +598,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
return nullptr;
|
||||
}
|
||||
int num_bytes = ggml_nbytes(cur);
|
||||
if (ggml_backend_is_cpu(new_clip->backend)
|
||||
#ifdef GGML_USE_METAL
|
||||
|| ggml_backend_is_metal(new_clip->backend)
|
||||
#endif
|
||||
) {
|
||||
if (ggml_backend_buffer_is_host(new_clip->params_buffer)) {
|
||||
// for the CPU and Metal backend, we can read directly into the tensor
|
||||
fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user