mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 06:10:29 +01:00
4 lines
115 B
Plaintext
4 lines
115 B
Plaintext
#include "common.cuh"
|
|
|
|
void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|