mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-01 07:30:17 +01:00
4 lines
106 B
Plaintext
4 lines
106 B
Plaintext
|
#include "common.cuh"
|
||
|
|
||
|
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|