mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
fbf1ddec69
Signed-off-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: niansa <anton-sa@web.de> Co-authored-by: Adam Treat <treat.adam@gmail.com> Co-authored-by: Aaron Miller <apage43@ninjawhale.com> Co-authored-by: ToKiNoBug <tokinobug@163.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: slaren <slarengh@gmail.com>
23 lines
604 B
Plaintext
23 lines
604 B
Plaintext
#version 450
|
|
|
|
#include "common.comp"
|
|
|
|
layout(local_size_x = 1) in;
|
|
|
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
|
layout(push_constant) uniform PushConstants {
|
|
uint inOff;
|
|
uint outOff;
|
|
} pcs;
|
|
|
|
void main() {
|
|
const uint baseIndex = gl_WorkGroupID.x * 8;
|
|
|
|
for (uint x = 0; x < 8; x++) {
|
|
const uint i = baseIndex + x;
|
|
const float y = in_[i + pcs.inOff];
|
|
out_[i + pcs.outOff] = 0.5*y*(1.0 + tanh(clamp(SQRT_2_OVER_PI*y*(1.0 + GELU_COEF_A*y*y), -15.0, 15.0)));
|
|
}
|
|
}
|