llama.cpp/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu
Johannes Gäßler 7d1a378b8f
CUDA: refactor mmq, dmmv, mmvq (#7716)
* CUDA: refactor mmq, dmmv, mmvq

* fix out-of-bounds write

* struct for qk, qr, qi

* fix cmake build

* mmq_type_traits
2024-06-05 16:53:00 +02:00

11 lines
367 B
Plaintext

// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../fattn-wmma-f16.cuh"
DECL_FATTN_WMMA_F16_CASE(64, 16, float);
DECL_FATTN_WMMA_F16_CASE(80, 16, float);
DECL_FATTN_WMMA_F16_CASE(96, 16, float);
DECL_FATTN_WMMA_F16_CASE(112, 16, float);
DECL_FATTN_WMMA_F16_CASE(128, 16, float);
DECL_FATTN_WMMA_F16_CASE(256, 16, float);