mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-29 07:34:18 +01:00
cuda : add comment
This commit is contained in:
parent
dd86df82e6
commit
4adb1d69d9
@ -7849,6 +7849,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
|
||||
#endif // GGML_CUDA_FORCE_DMMV
|
||||
|
||||
if (use_mul_mat_vec_q) {
|
||||
// NOTE: this kernel does not support ggml_nrows(src1) > 1
|
||||
ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, true);
|
||||
} else {
|
||||
ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false);
|
||||
|
Loading…
Reference in New Issue
Block a user