mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-23 09:59:18 +01:00
cuda : fix LLAMA_CUDA_F16 build (#6298)
This commit is contained in:
parent
ae1f211ce2
commit
2f34b865b6
@ -1,5 +1,6 @@
|
|||||||
#include "dmmv.cuh"
|
#include "dmmv.cuh"
|
||||||
#include "dequantize.cuh"
|
#include "dequantize.cuh"
|
||||||
|
#include "convert.cuh"
|
||||||
|
|
||||||
// dmmv = dequantize_mul_mat_vec
|
// dmmv = dequantize_mul_mat_vec
|
||||||
#ifndef GGML_CUDA_DMMV_X
|
#ifndef GGML_CUDA_DMMV_X
|
||||||
|
Loading…
Reference in New Issue
Block a user