llamafile : use 64-bit integers in sgemm (#6928)

This commit is contained in:
Justine Tunney 2024-04-26 10:05:33 -04:00 committed by GitHub
parent bbe3c6e761
commit 4b1c3c98b4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 87 additions and 89 deletions

170
sgemm.cpp
View File

@ -50,7 +50,6 @@
#pragma GCC diagnostic ignored "-Wignored-attributes" #pragma GCC diagnostic ignored "-Wignored-attributes"
#include "sgemm.h" #include "sgemm.h"
#include <algorithm>
#include "ggml-impl.h" #include "ggml-impl.h"
#include "ggml-quants.h" #include "ggml-quants.h"
@ -243,23 +242,23 @@ template <> inline __m512 load(const ggml_fp16_t *p) {
template <int KN, typename D, typename V, typename TA, typename TB, typename TC> template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
class tinyBLAS { class tinyBLAS {
public: public:
tinyBLAS(int k, tinyBLAS(int64_t k,
const TA *A, int lda, const TA *A, int64_t lda,
const TB *B, int ldb, const TB *B, int64_t ldb,
TC *C, int ldc, TC *C, int64_t ldc,
int ith, int nth) int ith, int nth)
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
} }
void matmul(int m, int n, int task) { void matmul(int64_t m, int64_t n, int task) {
if (task == GGML_TASK_TYPE_COMPUTE) if (task == GGML_TASK_TYPE_COMPUTE)
mnpack(0, m, 0, n); mnpack(0, m, 0, n);
} }
private: private:
NOINLINE void mnpack(int m0, int m, int n0, int n) { NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int mc, nc, mp, np; int64_t mc, nc, mp, np;
switch ((std::min(m - m0, 5) << 4) | std::min(n - n0, 5)) { switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) {
#if VECTOR_REGISTERS == 32 #if VECTOR_REGISTERS == 32
case 0x55: case 0x55:
mc = 5; mc = 5;
@ -409,27 +408,27 @@ class tinyBLAS {
} }
template <int RM, int RN> template <int RM, int RN>
NOINLINE void gemm(int m0, int m, int n0, int n) { NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int ytiles = (m - m0) / RM; int64_t ytiles = (m - m0) / RM;
int xtiles = (n - n0) / RN; int64_t xtiles = (n - n0) / RN;
int tiles = xtiles * ytiles; int64_t tiles = xtiles * ytiles;
int duty = (tiles + nth - 1) / nth; int64_t duty = (tiles + nth - 1) / nth;
int start = duty * ith; int64_t start = duty * ith;
int end = start + duty; int64_t end = start + duty;
if (end > tiles) if (end > tiles)
end = tiles; end = tiles;
for (int job = start; job < end; ++job) { for (int64_t job = start; job < end; ++job) {
int ii = m0 + job / xtiles * RM; int64_t ii = m0 + job / xtiles * RM;
int jj = n0 + job % xtiles * RN; int64_t jj = n0 + job % xtiles * RN;
D Cv[RN][RM] = {}; D Cv[RN][RM] = {};
for (int l = 0; l < k; l += KN) for (int64_t l = 0; l < k; l += KN)
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l), Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l),
load<V>(B + ldb * (jj + j) + l), load<V>(B + ldb * (jj + j) + l),
Cv[j][i]); Cv[j][i]);
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
} }
} }
@ -437,10 +436,10 @@ class tinyBLAS {
const TA *const A; const TA *const A;
const TB *const B; const TB *const B;
TC *const C; TC *const C;
const int k; const int64_t k;
const int lda; const int64_t lda;
const int ldb; const int64_t ldb;
const int ldc; const int64_t ldc;
const int ith; const int ith;
const int nth; const int nth;
}; };
@ -452,23 +451,23 @@ class tinyBLAS {
template <typename TA> template <typename TA>
class tinyBLAS_Q0_ARM { class tinyBLAS_Q0_ARM {
public: public:
tinyBLAS_Q0_ARM(int k, tinyBLAS_Q0_ARM(int64_t k,
const TA *A, int lda, const TA *A, int64_t lda,
const block_q8_0 *B, int ldb, const block_q8_0 *B, int64_t ldb,
float *C, int ldc, float *C, int64_t ldc,
int ith, int nth) int ith, int nth)
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
} }
void matmul(int m, int n, int task) { void matmul(int64_t m, int64_t n, int task) {
if (task == GGML_TASK_TYPE_COMPUTE) if (task == GGML_TASK_TYPE_COMPUTE)
mnpack(0, m, 0, n); mnpack(0, m, 0, n);
} }
private: private:
NOINLINE void mnpack(int m0, int m, int n0, int n) { NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int mc, nc, mp, np; int64_t mc, nc, mp, np;
switch ((std::min(m - m0, 3) << 4) | std::min(n - n0, 3)) { switch ((MIN(m - m0, 3) << 4) | MIN(n - n0, 3ll)) {
case 0x33: case 0x33:
mc = 3; mc = 3;
nc = 3; nc = 3;
@ -524,22 +523,22 @@ class tinyBLAS_Q0_ARM {
} }
template <int RM, int RN> template <int RM, int RN>
NOINLINE void gemm(int m0, int m, int n0, int n) { NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int ytiles = (m - m0) / RM; int64_t ytiles = (m - m0) / RM;
int xtiles = (n - n0) / RN; int64_t xtiles = (n - n0) / RN;
int tiles = xtiles * ytiles; int64_t tiles = xtiles * ytiles;
int duty = (tiles + nth - 1) / nth; int64_t duty = (tiles + nth - 1) / nth;
int start = duty * ith; int64_t start = duty * ith;
int end = start + duty; int64_t end = start + duty;
if (end > tiles) if (end > tiles)
end = tiles; end = tiles;
for (int job = start; job < end; ++job) { for (int64_t job = start; job < end; ++job) {
int ii = m0 + job / xtiles * RM; int64_t ii = m0 + job / xtiles * RM;
int jj = n0 + job % xtiles * RN; int64_t jj = n0 + job % xtiles * RN;
float32x4_t Cv[RN][RM] = {}; float32x4_t Cv[RN][RM] = {};
for (int l = 0; l < k; ++l) for (int64_t l = 0; l < k; ++l)
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
Cv[j][i] = vmlaq_n_f32(Cv[j][i], Cv[j][i] = vmlaq_n_f32(Cv[j][i],
vcvtq_f32_s32(vdotq_s32( vcvtq_f32_s32(vdotq_s32(
vdotq_s32(vdupq_n_s32(0), vdotq_s32(vdupq_n_s32(0),
@ -549,8 +548,8 @@ class tinyBLAS_Q0_ARM {
load_hi(B + ldb * (jj + j) + l))), load_hi(B + ldb * (jj + j) + l))),
unhalf(A[lda * (ii + i) + l].d) * unhalf(A[lda * (ii + i) + l].d) *
unhalf(B[ldb * (jj + j) + l].d)); unhalf(B[ldb * (jj + j) + l].d));
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
} }
} }
@ -577,10 +576,10 @@ class tinyBLAS_Q0_ARM {
const TA *const A; const TA *const A;
const block_q8_0 *const B; const block_q8_0 *const B;
float *const C; float *const C;
const int k; const int64_t k;
const int lda; const int64_t lda;
const int ldb; const int64_t ldb;
const int ldc; const int64_t ldc;
const int ith; const int ith;
const int nth; const int nth;
}; };
@ -590,23 +589,23 @@ class tinyBLAS_Q0_ARM {
template <typename TA, typename TB, typename TC> template <typename TA, typename TB, typename TC>
class tinyBLAS_Q0_AVX2 { class tinyBLAS_Q0_AVX2 {
public: public:
tinyBLAS_Q0_AVX2(int k, tinyBLAS_Q0_AVX2(int64_t k,
const TA *A, int lda, const TA *A, int64_t lda,
const TB *B, int ldb, const TB *B, int64_t ldb,
TC *C, int ldc, TC *C, int64_t ldc,
int ith, int nth) int ith, int nth)
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
} }
void matmul(int m, int n, int task) { void matmul(int64_t m, int64_t n, int task) {
if (task == GGML_TASK_TYPE_COMPUTE) if (task == GGML_TASK_TYPE_COMPUTE)
mnpack(0, m, 0, n); mnpack(0, m, 0, n);
} }
private: private:
void mnpack(int m0, int m, int n0, int n) { void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int mc, nc, mp, np; int64_t mc, nc, mp, np;
switch ((std::min(m - m0, 4) << 4) | std::min(n - n0, 4)) { switch ((MIN(m - m0, 4) << 4) | MIN(n - n0, 4)) {
#if VECTOR_REGISTERS == 32 #if VECTOR_REGISTERS == 32
case 0x44: case 0x44:
mc = 4; mc = 4;
@ -714,22 +713,22 @@ class tinyBLAS_Q0_AVX2 {
} }
template <int RM, int RN> template <int RM, int RN>
NOINLINE void gemm(int m0, int m, int n0, int n) { NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
int ytiles = (m - m0) / RM; int64_t ytiles = (m - m0) / RM;
int xtiles = (n - n0) / RN; int64_t xtiles = (n - n0) / RN;
int tiles = xtiles * ytiles; int64_t tiles = xtiles * ytiles;
int duty = (tiles + nth - 1) / nth; int64_t duty = (tiles + nth - 1) / nth;
int start = duty * ith; int64_t start = duty * ith;
int end = start + duty; int64_t end = start + duty;
if (end > tiles) if (end > tiles)
end = tiles; end = tiles;
for (int job = start; job < end; ++job) { for (int64_t job = start; job < end; ++job) {
int ii = m0 + job / xtiles * RM; int64_t ii = m0 + job / xtiles * RM;
int jj = n0 + job % xtiles * RN; int64_t jj = n0 + job % xtiles * RN;
__m256 Cv[RN][RM] = {}; __m256 Cv[RN][RM] = {};
for (int l = 0; l < k; ++l) for (int64_t l = 0; l < k; ++l)
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) * Cv[j][i] = madd(_mm256_set1_ps(unhalf(A[lda * (ii + i) + l].d) *
unhalf(B[ldb * (jj + j) + l].d)), unhalf(B[ldb * (jj + j) + l].d)),
updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l), updot(_mm256_sign_epi8(load(A + lda * (ii + i) + l),
@ -737,8 +736,8 @@ class tinyBLAS_Q0_AVX2 {
_mm256_sign_epi8(load(B + ldb * (jj + j) + l), _mm256_sign_epi8(load(B + ldb * (jj + j) + l),
load(A + lda * (ii + i) + l))), load(A + lda * (ii + i) + l))),
Cv[j][i]); Cv[j][i]);
for (int j = 0; j < RN; ++j) for (int64_t j = 0; j < RN; ++j)
for (int i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i)
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
} }
} }
@ -771,10 +770,10 @@ class tinyBLAS_Q0_AVX2 {
const TA *const A; const TA *const A;
const TB *const B; const TB *const B;
TC *const C; TC *const C;
const int k; const int64_t k;
const int lda; const int64_t lda;
const int ldb; const int64_t ldb;
const int ldc; const int64_t ldc;
const int ith; const int ith;
const int nth; const int nth;
}; };
@ -813,8 +812,8 @@ class tinyBLAS_Q0_AVX2 {
* @param Ctype is GGML data type of `C` * @param Ctype is GGML data type of `C`
* @return true if this function was able to service the matmul request * @return true if this function was able to service the matmul request
*/ */
bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B, int ldb, void *C, bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C,
int ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) { int64_t ldc, int ith, int nth, int task, int Atype, int Btype, int Ctype) {
assert(m >= 0); assert(m >= 0);
assert(n >= 0); assert(n >= 0);
@ -824,9 +823,6 @@ bool llamafile_sgemm(int m, int n, int k, const void *A, int lda, const void *B,
assert(ldc >= m); assert(ldc >= m);
assert(nth > 0); assert(nth > 0);
assert(ith < nth); assert(ith < nth);
assert(1ll * lda * m <= 0x7fffffff);
assert(1ll * ldb * n <= 0x7fffffff);
assert(1ll * ldc * n <= 0x7fffffff);
if (Ctype != GGML_TYPE_F32) if (Ctype != GGML_TYPE_F32)
return false; return false;

View File

@ -1,11 +1,13 @@
#pragma once #pragma once
#include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
bool llamafile_sgemm(int, int, int, const void *, int, const void *, int, bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t,
void *, int, int, int, int, int, int, int); const void *, int64_t, void *, int64_t, int, int,
int, int, int, int);
#ifdef __cplusplus #ifdef __cplusplus
} }