mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
clip : use ggml_backend_buffer_is_host (#4205)
This commit is contained in:
parent
ce18d727a4
commit
0235b9b571
@ -598,11 +598,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
int num_bytes = ggml_nbytes(cur);
|
int num_bytes = ggml_nbytes(cur);
|
||||||
if (ggml_backend_is_cpu(new_clip->backend)
|
if (ggml_backend_buffer_is_host(new_clip->params_buffer)) {
|
||||||
#ifdef GGML_USE_METAL
|
|
||||||
|| ggml_backend_is_metal(new_clip->backend)
|
|
||||||
#endif
|
|
||||||
) {
|
|
||||||
// for the CPU and Metal backend, we can read directly into the tensor
|
// for the CPU and Metal backend, we can read directly into the tensor
|
||||||
fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
|
fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user