mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-29 07:34:18 +01:00
f3f65429c4
* scripts : update sync [no ci] * files : relocate [no ci] * ci : disable kompute build [no ci] * cmake : fixes [no ci] * server : fix mingw build ggml-ci * cmake : minor [no ci] * cmake : link math library [no ci] * cmake : build normal ggml library (not object library) [no ci] * cmake : fix kompute build ggml-ci * make,cmake : fix LLAMA_CUDA + replace GGML_CDEF_PRIVATE ggml-ci * move public backend headers to the public include directory (#8122) * move public backend headers to the public include directory * nix test * spm : fix metal header --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * scripts : fix sync paths [no ci] * scripts : sync ggml-blas.h [no ci] --------- Co-authored-by: slaren <slarengh@gmail.com>
11 lines
367 B
Plaintext
11 lines
367 B
Plaintext
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
|
|
|
#include "../fattn-wmma-f16.cuh"
|
|
|
|
DECL_FATTN_WMMA_F16_CASE(64, 16, float);
|
|
DECL_FATTN_WMMA_F16_CASE(80, 16, float);
|
|
DECL_FATTN_WMMA_F16_CASE(96, 16, float);
|
|
DECL_FATTN_WMMA_F16_CASE(112, 16, float);
|
|
DECL_FATTN_WMMA_F16_CASE(128, 16, float);
|
|
DECL_FATTN_WMMA_F16_CASE(256, 16, float);
|