1
0
mirror of https://github.com/ggerganov/llama.cpp.git synced 2025-01-31 06:03:11 +01:00
llama.cpp/ggml-cuda/cpy.cuh

8 lines
227 B
Plaintext

#include "common.cuh"
#define CUDA_CPY_BLOCK_SIZE 32
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);