mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 06:10:29 +01:00
7c7836d9d4
* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
30 lines
592 B
Plaintext
30 lines
592 B
Plaintext
#version 450
|
|
|
|
#extension GL_EXT_control_flow_attributes : enable
|
|
|
|
layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;
|
|
|
|
layout (binding = 0) readonly buffer A {float data_a[];};
|
|
layout (binding = 1) writeonly buffer D {float data_d[];};
|
|
|
|
layout (push_constant) uniform parameter {
|
|
uint ne;
|
|
uint k_num;
|
|
} p;
|
|
|
|
void main() {
|
|
const uint idx = gl_GlobalInvocationID.x;
|
|
|
|
if (idx >= p.ne) {
|
|
return;
|
|
}
|
|
|
|
float result = 0.0f;
|
|
|
|
[[unroll]] for (uint i = 0; i < p.k_num; i++) {
|
|
result += data_a[i * p.ne + idx];
|
|
}
|
|
|
|
data_d[idx] = result;
|
|
}
|