mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
metal : use F16 math in mul_mat kernels
ggml-ci
This commit is contained in:
parent
b756441104
commit
4ff0831ce6
@ -2021,7 +2021,8 @@ static void ggml_metal_encode_node(
|
|||||||
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:2];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:3];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:3];
|
||||||
|
|
||||||
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
||||||
|
//[encoder setThreadgroupMemoryLength:4096 + 2048 atIndex:0];
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
||||||
} else {
|
} else {
|
||||||
int nth0 = 32;
|
int nth0 = 32;
|
||||||
|
@ -5439,8 +5439,8 @@ kernel void kernel_mul_mm(
|
|||||||
ushort tiitg[[thread_index_in_threadgroup]],
|
ushort tiitg[[thread_index_in_threadgroup]],
|
||||||
ushort sgitg[[simdgroup_index_in_threadgroup]]) {
|
ushort sgitg[[simdgroup_index_in_threadgroup]]) {
|
||||||
|
|
||||||
threadgroup T * sa = (threadgroup T *)(shmem);
|
threadgroup T * sa = (threadgroup T *)(shmem);
|
||||||
threadgroup float * sb = (threadgroup float *)(shmem + 4096);
|
threadgroup half * sb = (threadgroup half *)(shmem + 4096);
|
||||||
|
|
||||||
const int r0 = tgpig.y;
|
const int r0 = tgpig.y;
|
||||||
const int r1 = tgpig.x;
|
const int r1 = tgpig.x;
|
||||||
@ -5454,12 +5454,12 @@ kernel void kernel_mul_mm(
|
|||||||
const short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
|
const short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
|
||||||
const short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
|
const short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
|
||||||
|
|
||||||
simdgroup_T8x8 ma[4];
|
simdgroup_T8x8 ma[4];
|
||||||
simdgroup_float8x8 mb[2];
|
simdgroup_half8x8 mb[2];
|
||||||
simdgroup_float8x8 mc[8];
|
simdgroup_half8x8 mc[8];
|
||||||
|
|
||||||
for (short i = 0; i < 8; i++){
|
for (short i = 0; i < 8; i++){
|
||||||
mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
|
mc[i] = make_filled_simdgroup_matrix<half, 8>(0.h);
|
||||||
}
|
}
|
||||||
|
|
||||||
short il = (tiitg % THREAD_PER_ROW);
|
short il = (tiitg % THREAD_PER_ROW);
|
||||||
@ -5493,7 +5493,7 @@ kernel void kernel_mul_mm(
|
|||||||
+ (tiitg/THREAD_PER_ROW)%8 + (i&7)*8) = temp_a[i/4][i%4];
|
+ (tiitg/THREAD_PER_ROW)%8 + (i&7)*8) = temp_a[i/4][i%4];
|
||||||
}
|
}
|
||||||
|
|
||||||
*(threadgroup float2x4 *)(sb + 32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL)) = *((device float2x4 *) y);
|
*(threadgroup half2x4 *)(sb + 32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL)) = (half2x4)(*((device float2x4 *)y));
|
||||||
|
|
||||||
il = (il + 2 < nl) ? il + 2 : il % 2;
|
il = (il + 2 < nl) ? il + 2 : il % 2;
|
||||||
x = (il < 2) ? x + (2 + nl - 1)/nl : x;
|
x = (il < 2) ? x + (2 + nl - 1)/nl : x;
|
||||||
@ -5502,8 +5502,8 @@ kernel void kernel_mul_mm(
|
|||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
// load matrices from threadgroup memory and conduct outer products
|
// load matrices from threadgroup memory and conduct outer products
|
||||||
threadgroup const T * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2));
|
threadgroup const T * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2));
|
||||||
threadgroup const float * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2));
|
threadgroup const half * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2));
|
||||||
|
|
||||||
#pragma unroll(4)
|
#pragma unroll(4)
|
||||||
for (short ik = 0; ik < BLOCK_SIZE_K/8; ik++) {
|
for (short ik = 0; ik < BLOCK_SIZE_K/8; ik++) {
|
||||||
@ -5535,7 +5535,11 @@ kernel void kernel_mul_mm(
|
|||||||
(BLOCK_SIZE_N * r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0;
|
(BLOCK_SIZE_N * r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0;
|
||||||
|
|
||||||
for (short i = 0; i < 8; i++) {
|
for (short i = 0; i < 8; i++) {
|
||||||
simdgroup_store(mc[i], C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0);
|
// cast to f32
|
||||||
|
simdgroup_float8x8 mc_f32(1.0f);
|
||||||
|
simdgroup_multiply(mc_f32, mc[i], mc_f32);
|
||||||
|
simdgroup_store(mc_f32, C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0);
|
||||||
|
//simdgroup_store(mc[i], C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
||||||
@ -5543,7 +5547,10 @@ kernel void kernel_mul_mm(
|
|||||||
threadgroup float * temp_str = ((threadgroup float *) shmem) \
|
threadgroup float * temp_str = ((threadgroup float *) shmem) \
|
||||||
+ 32*(sgitg&1) + (16*(sgitg >> 1))*BLOCK_SIZE_M;
|
+ 32*(sgitg&1) + (16*(sgitg >> 1))*BLOCK_SIZE_M;
|
||||||
for (short i = 0; i < 8; i++) {
|
for (short i = 0; i < 8; i++) {
|
||||||
simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*BLOCK_SIZE_M*(i/4), BLOCK_SIZE_M);
|
simdgroup_float8x8 mc_f32(1.0f);
|
||||||
|
simdgroup_multiply(mc_f32, mc[i], mc_f32);
|
||||||
|
simdgroup_store(mc_f32, temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
||||||
|
//simdgroup_store(mc[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
||||||
}
|
}
|
||||||
|
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
Loading…
Reference in New Issue
Block a user