2023-10-30 18:19:15 +01:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "ggml.h"
|
|
|
|
|
|
|
|
// GGML internal header
|
|
|
|
|
|
|
|
#include <assert.h>
|
2024-01-04 09:12:26 +01:00
|
|
|
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
2023-10-30 18:19:15 +01:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h> // memcpy
|
|
|
|
#include <math.h> // fabsf
|
|
|
|
|
2024-04-25 14:12:28 +02:00
|
|
|
#undef MIN
|
|
|
|
#undef MAX
|
|
|
|
|
|
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
|
|
|
2024-06-17 20:23:17 +02:00
|
|
|
#if defined(_MSC_VER)
|
2024-05-20 04:18:39 +02:00
|
|
|
|
|
|
|
#define m512bh(p) p
|
|
|
|
#define m512i(p) p
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define m512bh(p) (__m512bh)(p)
|
|
|
|
#define m512i(p) (__m512i)(p)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2024-05-08 08:30:09 +02:00
|
|
|
/**
|
|
|
|
* Converts brain16 to float32.
|
|
|
|
*
|
|
|
|
* The bfloat16 floating point format has the following structure:
|
|
|
|
*
|
|
|
|
* ┌sign
|
|
|
|
* │
|
|
|
|
* │ ┌exponent
|
|
|
|
* │ │
|
|
|
|
* │ │ ┌mantissa
|
|
|
|
* │ │ │
|
|
|
|
* │┌──┴───┐┌─┴───┐
|
|
|
|
* 0b0000000000000000 brain16
|
|
|
|
*
|
|
|
|
* Since bf16 has the same number of exponent bits as a 32bit float,
|
|
|
|
* encoding and decoding numbers becomes relatively straightforward.
|
|
|
|
*
|
|
|
|
* ┌sign
|
|
|
|
* │
|
|
|
|
* │ ┌exponent
|
|
|
|
* │ │
|
|
|
|
* │ │ ┌mantissa
|
|
|
|
* │ │ │
|
|
|
|
* │┌──┴───┐┌─┴───────────────────┐
|
|
|
|
* 0b00000000000000000000000000000000 IEEE binary32
|
|
|
|
*
|
|
|
|
* For comparison, the standard fp16 format has fewer exponent bits.
|
|
|
|
*
|
|
|
|
* ┌sign
|
|
|
|
* │
|
|
|
|
* │ ┌exponent
|
|
|
|
* │ │
|
|
|
|
* │ │ ┌mantissa
|
|
|
|
* │ │ │
|
|
|
|
* │┌─┴─┐┌─┴──────┐
|
|
|
|
* 0b0000000000000000 IEEE binary16
|
|
|
|
*
|
|
|
|
* @see IEEE 754-2008
|
|
|
|
*/
|
|
|
|
static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
|
|
|
|
union {
|
|
|
|
float f;
|
|
|
|
uint32_t i;
|
|
|
|
} u;
|
|
|
|
u.i = (uint32_t)h.bits << 16;
|
|
|
|
return u.f;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Converts float32 to brain16.
|
|
|
|
*
|
2024-08-02 21:11:39 +02:00
|
|
|
* This is binary identical with Google Brain float conversion.
|
|
|
|
* Floats shall round to nearest even, and NANs shall be quiet.
|
|
|
|
* Subnormals aren't flushed to zero, except perhaps when used.
|
2024-05-08 08:30:09 +02:00
|
|
|
* This code should vectorize nicely if using modern compilers.
|
|
|
|
*/
|
|
|
|
static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
|
|
|
|
ggml_bf16_t h;
|
|
|
|
union {
|
|
|
|
float f;
|
|
|
|
uint32_t i;
|
|
|
|
} u;
|
|
|
|
u.f = s;
|
|
|
|
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
|
|
|
|
h.bits = (u.i >> 16) | 64; /* force to quiet */
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
|
|
|
|
#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
|
|
|
|
|
2023-10-30 18:19:15 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// static_assert should be a #define, but if it's not,
|
|
|
|
// fall back to the _Static_assert C11 keyword.
|
|
|
|
// if C99 - static_assert is noop
|
|
|
|
// ref: https://stackoverflow.com/a/53923785/4039976
|
2024-02-05 13:09:47 +01:00
|
|
|
#ifndef __cplusplus
|
2023-10-30 18:19:15 +01:00
|
|
|
#ifndef static_assert
|
|
|
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
|
|
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
|
|
#else
|
|
|
|
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
|
|
#endif
|
|
|
|
#endif
|
2024-02-05 13:09:47 +01:00
|
|
|
#endif
|
2023-10-30 18:19:15 +01:00
|
|
|
|
|
|
|
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
|
|
|
#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
|
|
|
|
#ifndef __FMA__
|
|
|
|
#define __FMA__
|
|
|
|
#endif
|
|
|
|
#ifndef __F16C__
|
|
|
|
#define __F16C__
|
|
|
|
#endif
|
2024-05-08 17:33:43 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available
|
|
|
|
#if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__))
|
2023-10-30 18:19:15 +01:00
|
|
|
#ifndef __SSE3__
|
|
|
|
#define __SSE3__
|
|
|
|
#endif
|
2024-05-08 17:33:43 +02:00
|
|
|
#ifndef __SSSE3__
|
|
|
|
#define __SSSE3__
|
|
|
|
#endif
|
2023-10-30 18:19:15 +01:00
|
|
|
#endif
|
|
|
|
|
2024-05-25 10:42:31 +02:00
|
|
|
#if defined(__ARM_FEATURE_SVE)
|
|
|
|
#include <arm_sve.h>
|
2024-08-03 18:34:41 +02:00
|
|
|
#include <sys/prctl.h>
|
2024-05-25 10:42:31 +02:00
|
|
|
#endif
|
|
|
|
|
2023-10-30 18:19:15 +01:00
|
|
|
// 16-bit float
|
|
|
|
// on Arm, we use __fp16
|
|
|
|
// on x86, we use uint16_t
|
2024-04-24 11:00:07 +02:00
|
|
|
#if defined(__ARM_NEON)
|
2023-10-30 18:19:15 +01:00
|
|
|
|
|
|
|
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
|
|
//
|
|
|
|
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
|
|
//
|
|
|
|
#include <arm_neon.h>
|
|
|
|
|
2024-04-24 11:00:07 +02:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
|
|
|
|
typedef uint16_t ggml_fp16_internal_t;
|
|
|
|
|
|
|
|
#define ggml_vld1q_u32(w,x,y,z) { ((w) + ((uint64_t)(x) << 32)), ((y) + ((uint64_t)(z) << 32)) }
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2024-03-11 10:28:51 +01:00
|
|
|
typedef __fp16 ggml_fp16_internal_t;
|
|
|
|
|
2024-04-24 11:00:07 +02:00
|
|
|
#define ggml_vld1q_u32(w,x,y,z) { (w), (x), (y), (z) }
|
|
|
|
|
|
|
|
#endif // _MSC_VER
|
|
|
|
|
|
|
|
#if !defined(__aarch64__)
|
|
|
|
|
|
|
|
// 32-bit ARM compatibility
|
|
|
|
|
ggml-quants : ternary packing for TriLMs and BitNet b1.58 (#8151)
* ggml-quants : 1.625 bpw ternary packing for BitNet 1.58b
* ggml-quants : faster 1.625 bpw AVX2 vec_dot
Not using a lookup table anymore makes it match q4_0 speed.
* gguf-py : fix formatting
* llama : remove spaces on empty line
* ggml-quants : subtract 1 when back in epi8
This makes the 1.625 bpw type go faster than q4_0. Still not the fastest.
* ggml-quants : Q2_2 now faster than Q4_K on with AVX2
* ggml-quants : cleanup Q1_3 code formatting
* ggml-quants : ARM NEON vec_dot for q2_2 and q1_3
* ggml-quants : use ceiling division when quantizing q1_3
* convert-hf : simplify BitNet pre-quantization
This still results in the exact same tensor weights and scales,
but it reveals some weirdness in the current algorithm.
* convert-hf : allow converting the weird BitNet 1.3B
Its FFN size is 5460 which is not convenient.
The offending tensors are kept in F16,
which makes the final model 5.01 bpw.
* bitnet : replace 1.58b with b1.58, as in the paper
* ggml-quants : fix build failure on Windows
* ggml-quants : attempt to fix Arm 32-bit support
* ggml : add some informative comments in q1_3 vec_dot
* ggml : add TQ1_0 and TQ2_0 ternary quantization types
* ggml : even faster TQ2_0
* ggml : also faster TQ1_0
Same optimization as for TQ2_0 by offsetting the sum instead of the weights.
This makes TQ1_0 almost as fast as Q8_0 on AVX2.
* ggml : fix build issues in certain environments
* ggml : add NEON vec_dot implementation for TQ1_0 and TQ2_0
* ggml : avoid directly using vmlal_high_s8, for 32-bit ARM compat
The compiler seems smart enough to use the same instruction
even when using vget_high_s8 instead.
* ggml : remove q1_3 and q2_2
No more 1.625 bpw and 2.000 bpw,
now instead using 1.6875 bpw and 2.0625 bpw
with TQ1_0 and TQ2_0, respectively.
* llama : remove the separate scale tensors of BitNet b1.58
They won't be needed, since the remaining ternary quant types have
built-in scales.
* ggml-quants : rename fields of TQ1_0 and TQ2_0 structs for consistency
* ggml-quants : allow using vdotq_s32 in TQ2_0 vec_dot
Not yet tested on hardware which supports it,
might not work or might not even compile. But also it might.
It should make the performance better on recent ARM CPUs.
* ggml-quants : remove comment about possible format change of TQ2_0
Making it slightly more convenient for AVX512
but less convenient for everything else is not worth the trouble.
* gguf-py : Numpy (de)quantization for TQ1_0 and TQ2_0
* ggml-quants : use roundf instead of nearest_int for TQ1_0 and TQ2_0
This does not change anything for ternary models,
since their values should never end up being in halfway cases anyway.
* convert : allow direct conversion to TQ1_0 and TQ2_0
The token embeddings and output tensors are kept in F16
to allow quantizing them to Q4_K and Q6_K with llama-quantize.
* llama : handle fallback for TQ1_0 and TQ2_0 with Q4_0
Q4_0 is not completely symmetric (so not lossless for ternary models),
but it should be good enough.
* ggml-quants : allow using ARM dot product instructions for TQ1_0
* ggml-quants : deduplicate TQ1_0 and TQ2_0 __ARM_FEATURE_DOTPROD support
* ggml : remove unused ggml_mul special case
It would otherwise conflict with the more general
optimization coming with Mamba-2.
* ggml : handle TQ1_0 and TQ2_0 in dequantization-based operators
* test-backend-ops : add TQ1_0 and TQ2_0 comments for later
Not yet adding uncommented, because some backends like SYCL and Metal
do not properly handle unknown types in supports_op for GGML_OP_MUL_MAT.
(and Metal also doesn't handle it with GGML_OP_GET_ROWS)
Support for TQ1_0 and TQ2_0 for other backends than CPU
will be added in follow-up pull requests.
2024-09-06 03:48:47 +02:00
|
|
|
// vaddlvq_s16
|
2024-04-24 11:00:07 +02:00
|
|
|
// vpaddq_s16
|
|
|
|
// vpaddq_s32
|
|
|
|
// vaddvq_s32
|
|
|
|
// vaddvq_f32
|
|
|
|
// vmaxvq_f32
|
|
|
|
// vcvtnq_s32_f32
|
|
|
|
// vzip1_u8
|
|
|
|
// vzip2_u8
|
|
|
|
|
ggml-quants : ternary packing for TriLMs and BitNet b1.58 (#8151)
* ggml-quants : 1.625 bpw ternary packing for BitNet 1.58b
* ggml-quants : faster 1.625 bpw AVX2 vec_dot
Not using a lookup table anymore makes it match q4_0 speed.
* gguf-py : fix formatting
* llama : remove spaces on empty line
* ggml-quants : subtract 1 when back in epi8
This makes the 1.625 bpw type go faster than q4_0. Still not the fastest.
* ggml-quants : Q2_2 now faster than Q4_K on with AVX2
* ggml-quants : cleanup Q1_3 code formatting
* ggml-quants : ARM NEON vec_dot for q2_2 and q1_3
* ggml-quants : use ceiling division when quantizing q1_3
* convert-hf : simplify BitNet pre-quantization
This still results in the exact same tensor weights and scales,
but it reveals some weirdness in the current algorithm.
* convert-hf : allow converting the weird BitNet 1.3B
Its FFN size is 5460 which is not convenient.
The offending tensors are kept in F16,
which makes the final model 5.01 bpw.
* bitnet : replace 1.58b with b1.58, as in the paper
* ggml-quants : fix build failure on Windows
* ggml-quants : attempt to fix Arm 32-bit support
* ggml : add some informative comments in q1_3 vec_dot
* ggml : add TQ1_0 and TQ2_0 ternary quantization types
* ggml : even faster TQ2_0
* ggml : also faster TQ1_0
Same optimization as for TQ2_0 by offsetting the sum instead of the weights.
This makes TQ1_0 almost as fast as Q8_0 on AVX2.
* ggml : fix build issues in certain environments
* ggml : add NEON vec_dot implementation for TQ1_0 and TQ2_0
* ggml : avoid directly using vmlal_high_s8, for 32-bit ARM compat
The compiler seems smart enough to use the same instruction
even when using vget_high_s8 instead.
* ggml : remove q1_3 and q2_2
No more 1.625 bpw and 2.000 bpw,
now instead using 1.6875 bpw and 2.0625 bpw
with TQ1_0 and TQ2_0, respectively.
* llama : remove the separate scale tensors of BitNet b1.58
They won't be needed, since the remaining ternary quant types have
built-in scales.
* ggml-quants : rename fields of TQ1_0 and TQ2_0 structs for consistency
* ggml-quants : allow using vdotq_s32 in TQ2_0 vec_dot
Not yet tested on hardware which supports it,
might not work or might not even compile. But also it might.
It should make the performance better on recent ARM CPUs.
* ggml-quants : remove comment about possible format change of TQ2_0
Making it slightly more convenient for AVX512
but less convenient for everything else is not worth the trouble.
* gguf-py : Numpy (de)quantization for TQ1_0 and TQ2_0
* ggml-quants : use roundf instead of nearest_int for TQ1_0 and TQ2_0
This does not change anything for ternary models,
since their values should never end up being in halfway cases anyway.
* convert : allow direct conversion to TQ1_0 and TQ2_0
The token embeddings and output tensors are kept in F16
to allow quantizing them to Q4_K and Q6_K with llama-quantize.
* llama : handle fallback for TQ1_0 and TQ2_0 with Q4_0
Q4_0 is not completely symmetric (so not lossless for ternary models),
but it should be good enough.
* ggml-quants : allow using ARM dot product instructions for TQ1_0
* ggml-quants : deduplicate TQ1_0 and TQ2_0 __ARM_FEATURE_DOTPROD support
* ggml : remove unused ggml_mul special case
It would otherwise conflict with the more general
optimization coming with Mamba-2.
* ggml : handle TQ1_0 and TQ2_0 in dequantization-based operators
* test-backend-ops : add TQ1_0 and TQ2_0 comments for later
Not yet adding uncommented, because some backends like SYCL and Metal
do not properly handle unknown types in supports_op for GGML_OP_MUL_MAT.
(and Metal also doesn't handle it with GGML_OP_GET_ROWS)
Support for TQ1_0 and TQ2_0 for other backends than CPU
will be added in follow-up pull requests.
2024-09-06 03:48:47 +02:00
|
|
|
inline static int32_t vaddlvq_s16(int16x8_t v) {
|
|
|
|
int32x4_t v0 = vreinterpretq_s32_s64(vpaddlq_s32(vpaddlq_s16(v)));
|
|
|
|
return vgetq_lane_s32(v0, 0) + vgetq_lane_s32(v0, 2);
|
2024-04-24 11:00:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
|
|
|
|
int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
|
|
|
|
int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
|
|
|
|
return vcombine_s16(a0, b0);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
|
|
|
|
int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
|
|
|
|
int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
|
|
|
|
return vcombine_s32(a0, b0);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static int32_t vaddvq_s32(int32x4_t v) {
|
|
|
|
return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static float vaddvq_f32(float32x4_t v) {
|
|
|
|
return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static float vmaxvq_f32(float32x4_t v) {
|
|
|
|
return
|
|
|
|
MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
|
|
|
|
MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
|
|
|
|
int32x4_t res;
|
|
|
|
|
|
|
|
res[0] = roundf(vgetq_lane_f32(v, 0));
|
|
|
|
res[1] = roundf(vgetq_lane_f32(v, 1));
|
|
|
|
res[2] = roundf(vgetq_lane_f32(v, 2));
|
|
|
|
res[3] = roundf(vgetq_lane_f32(v, 3));
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
|
|
|
|
uint8x8_t res;
|
|
|
|
|
|
|
|
res[0] = a[0]; res[1] = b[0];
|
|
|
|
res[2] = a[1]; res[3] = b[1];
|
|
|
|
res[4] = a[2]; res[5] = b[2];
|
|
|
|
res[6] = a[3]; res[7] = b[3];
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
|
|
|
|
uint8x8_t res;
|
|
|
|
|
|
|
|
res[0] = a[4]; res[1] = b[4];
|
|
|
|
res[2] = a[5]; res[3] = b[5];
|
|
|
|
res[4] = a[6]; res[5] = b[6];
|
|
|
|
res[6] = a[7]; res[7] = b[7];
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// vld1q_s16_x2
|
|
|
|
// vld1q_u8_x2
|
|
|
|
// vld1q_u8_x4
|
|
|
|
// vld1q_s8_x2
|
|
|
|
// vld1q_s8_x4
|
|
|
|
// TODO: double-check these work correctly
|
|
|
|
|
|
|
|
typedef struct ggml_int16x8x2_t {
|
|
|
|
int16x8_t val[2];
|
|
|
|
} ggml_int16x8x2_t;
|
|
|
|
|
|
|
|
inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
|
|
|
|
ggml_int16x8x2_t res;
|
|
|
|
|
|
|
|
res.val[0] = vld1q_s16(ptr + 0);
|
|
|
|
res.val[1] = vld1q_s16(ptr + 8);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct ggml_uint8x16x2_t {
|
|
|
|
uint8x16_t val[2];
|
|
|
|
} ggml_uint8x16x2_t;
|
|
|
|
|
|
|
|
inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
|
|
|
|
ggml_uint8x16x2_t res;
|
|
|
|
|
|
|
|
res.val[0] = vld1q_u8(ptr + 0);
|
|
|
|
res.val[1] = vld1q_u8(ptr + 16);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct ggml_uint8x16x4_t {
|
|
|
|
uint8x16_t val[4];
|
|
|
|
} ggml_uint8x16x4_t;
|
|
|
|
|
|
|
|
inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
|
|
|
|
ggml_uint8x16x4_t res;
|
|
|
|
|
|
|
|
res.val[0] = vld1q_u8(ptr + 0);
|
|
|
|
res.val[1] = vld1q_u8(ptr + 16);
|
|
|
|
res.val[2] = vld1q_u8(ptr + 32);
|
|
|
|
res.val[3] = vld1q_u8(ptr + 48);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct ggml_int8x16x2_t {
|
|
|
|
int8x16_t val[2];
|
|
|
|
} ggml_int8x16x2_t;
|
|
|
|
|
|
|
|
inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
|
|
|
|
ggml_int8x16x2_t res;
|
|
|
|
|
|
|
|
res.val[0] = vld1q_s8(ptr + 0);
|
|
|
|
res.val[1] = vld1q_s8(ptr + 16);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct ggml_int8x16x4_t {
|
|
|
|
int8x16_t val[4];
|
|
|
|
} ggml_int8x16x4_t;
|
|
|
|
|
|
|
|
inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
|
|
|
|
ggml_int8x16x4_t res;
|
|
|
|
|
|
|
|
res.val[0] = vld1q_s8(ptr + 0);
|
|
|
|
res.val[1] = vld1q_s8(ptr + 16);
|
|
|
|
res.val[2] = vld1q_s8(ptr + 32);
|
|
|
|
res.val[3] = vld1q_s8(ptr + 48);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: not tested
|
|
|
|
inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) {
|
|
|
|
int8x16_t res;
|
|
|
|
|
|
|
|
res[ 0] = a[b[ 0]];
|
|
|
|
res[ 1] = a[b[ 1]];
|
|
|
|
res[ 2] = a[b[ 2]];
|
|
|
|
res[ 3] = a[b[ 3]];
|
|
|
|
res[ 4] = a[b[ 4]];
|
|
|
|
res[ 5] = a[b[ 5]];
|
|
|
|
res[ 6] = a[b[ 6]];
|
|
|
|
res[ 7] = a[b[ 7]];
|
|
|
|
res[ 8] = a[b[ 8]];
|
|
|
|
res[ 9] = a[b[ 9]];
|
|
|
|
res[10] = a[b[10]];
|
|
|
|
res[11] = a[b[11]];
|
|
|
|
res[12] = a[b[12]];
|
|
|
|
res[13] = a[b[13]];
|
|
|
|
res[14] = a[b[14]];
|
|
|
|
res[15] = a[b[15]];
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: not tested
|
|
|
|
inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) {
|
|
|
|
uint8x16_t res;
|
|
|
|
|
|
|
|
res[ 0] = a[b[ 0]];
|
|
|
|
res[ 1] = a[b[ 1]];
|
|
|
|
res[ 2] = a[b[ 2]];
|
|
|
|
res[ 3] = a[b[ 3]];
|
|
|
|
res[ 4] = a[b[ 4]];
|
|
|
|
res[ 5] = a[b[ 5]];
|
|
|
|
res[ 6] = a[b[ 6]];
|
|
|
|
res[ 7] = a[b[ 7]];
|
|
|
|
res[ 8] = a[b[ 8]];
|
|
|
|
res[ 9] = a[b[ 9]];
|
|
|
|
res[10] = a[b[10]];
|
|
|
|
res[11] = a[b[11]];
|
|
|
|
res[12] = a[b[12]];
|
|
|
|
res[13] = a[b[13]];
|
|
|
|
res[14] = a[b[14]];
|
|
|
|
res[15] = a[b[15]];
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ggml_int16x8x2_t int16x8x2_t
|
|
|
|
#define ggml_uint8x16x2_t uint8x16x2_t
|
|
|
|
#define ggml_uint8x16x4_t uint8x16x4_t
|
|
|
|
#define ggml_int8x16x2_t int8x16x2_t
|
|
|
|
#define ggml_int8x16x4_t int8x16x4_t
|
|
|
|
|
|
|
|
#define ggml_vld1q_s16_x2 vld1q_s16_x2
|
|
|
|
#define ggml_vld1q_u8_x2 vld1q_u8_x2
|
|
|
|
#define ggml_vld1q_u8_x4 vld1q_u8_x4
|
|
|
|
#define ggml_vld1q_s8_x2 vld1q_s8_x2
|
|
|
|
#define ggml_vld1q_s8_x4 vld1q_s8_x4
|
|
|
|
#define ggml_vqtbl1q_s8 vqtbl1q_s8
|
|
|
|
#define ggml_vqtbl1q_u8 vqtbl1q_u8
|
|
|
|
|
|
|
|
#endif // !defined(__aarch64__)
|
|
|
|
|
|
|
|
#if !defined(__ARM_FEATURE_DOTPROD)
|
|
|
|
|
|
|
|
inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) {
|
|
|
|
const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b));
|
|
|
|
const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
|
|
|
|
|
|
|
|
return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1)));
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ggml_vdotq_s32(a, b, c) vdotq_s32(a, b, c)
|
|
|
|
|
|
|
|
#endif // !defined(__ARM_FEATURE_DOTPROD)
|
|
|
|
|
|
|
|
#endif // defined(__ARM_NEON)
|
|
|
|
|
2024-04-29 16:55:02 +02:00
|
|
|
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
2024-04-24 11:00:07 +02:00
|
|
|
|
2024-02-22 22:21:39 +01:00
|
|
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
|
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
|
|
|
|
|
|
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
|
|
|
|
|
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
2024-03-11 10:28:51 +01:00
|
|
|
ggml_fp16_internal_t tmp;
|
2024-02-22 22:21:39 +01:00
|
|
|
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
|
|
|
return (float)tmp;
|
|
|
|
}
|
2023-10-30 18:19:15 +01:00
|
|
|
|
2024-02-22 22:21:39 +01:00
|
|
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
|
|
ggml_fp16_t res;
|
2024-03-11 10:28:51 +01:00
|
|
|
ggml_fp16_internal_t tmp = f;
|
2024-02-22 22:21:39 +01:00
|
|
|
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
|
|
|
return res;
|
|
|
|
}
|
2023-10-30 18:19:15 +01:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#ifdef __wasm_simd128__
|
|
|
|
#include <wasm_simd128.h>
|
|
|
|
#else
|
|
|
|
#ifdef __POWER9_VECTOR__
|
|
|
|
#include <altivec.h>
|
|
|
|
#undef bool
|
|
|
|
#define bool _Bool
|
|
|
|
#else
|
|
|
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
|
|
|
#include <intrin.h>
|
|
|
|
#else
|
ggml : add llamafile sgemm (#6414)
This change upstreams llamafile's cpu matrix multiplication kernels
which improve image and prompt evaluation speed. For starters, Q4_0
and Q8_0 weights should go ~40% faster on CPU. The biggest benefits
are with data types like f16 / f32, which process prompts 2x faster
thus making them faster than quantized data types for prompt evals.
This change also introduces bona fide AVX512 support since tinyBLAS
is able to exploit the larger register file. For example, on my CPU
llama.cpp llava-cli processes an image prompt at 305 tokens/second,
using the Q4_K and Q4_0 types, which has always been faster than if
we used f16 LLaVA weights, which at HEAD go 188 tokens/second. With
this change, f16 LLaVA performance leap frogs to 464 tokens/second.
On Intel Core i9-14900K this change improves F16 prompt perf by 5x.
For example, using llama.cpp at HEAD with Mistral 7b f16 to process
a 215 token prompt will go 13 tok/sec. This change has fixes making
it go 52 tok/sec. It's mostly thanks to my vectorized outer product
kernels but also because I added support for correctly counting the
number of cores on Alderlake, so the default thread count discounts
Intel's new efficiency cores. Only Linux right now can count cores.
This work was sponsored by Mozilla who's given permission to change
the license of this code from Apache 2.0 to MIT. To read more about
what's improved, and how it works, see: https://justine.lol/matmul/
2024-04-16 20:55:30 +02:00
|
|
|
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__)
|
2023-10-30 18:19:15 +01:00
|
|
|
#if !defined(__riscv)
|
|
|
|
#include <immintrin.h>
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __riscv_v_intrinsic
|
|
|
|
#include <riscv_vector.h>
|
|
|
|
#endif
|
|
|
|
|
2024-05-20 09:19:21 +02:00
|
|
|
#if defined(__loongarch64)
|
|
|
|
#if defined(__loongarch_asx)
|
|
|
|
#include <lasxintrin.h>
|
|
|
|
#endif
|
|
|
|
#if defined(__loongarch_sx)
|
|
|
|
#include <lsxintrin.h>
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__loongarch_asx)
|
|
|
|
|
|
|
|
typedef union {
|
|
|
|
int32_t i;
|
|
|
|
float f;
|
|
|
|
} ft_union;
|
|
|
|
|
|
|
|
/* float type data load instructions */
|
|
|
|
static __m128 __lsx_vreplfr2vr_s(float val) {
|
|
|
|
ft_union fi_tmpval = {.f = val};
|
|
|
|
return (__m128)__lsx_vreplgr2vr_w(fi_tmpval.i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __m256 __lasx_xvreplfr2vr_s(float val) {
|
|
|
|
ft_union fi_tmpval = {.f = val};
|
|
|
|
return (__m256)__lasx_xvreplgr2vr_w(fi_tmpval.i);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-10-30 18:19:15 +01:00
|
|
|
#ifdef __F16C__
|
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
|
|
#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
|
|
#else
|
|
|
|
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
|
|
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined(__POWER9_VECTOR__)
|
|
|
|
|
|
|
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
|
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
|
|
/* the inline asm below is about 12% faster than the lookup method */
|
|
|
|
#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
|
|
|
|
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
|
|
|
|
|
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
|
|
register float f;
|
|
|
|
register double d;
|
|
|
|
__asm__(
|
|
|
|
"mtfprd %0,%2\n"
|
|
|
|
"xscvhpdp %0,%0\n"
|
|
|
|
"frsp %1,%0\n" :
|
|
|
|
/* temp */ "=d"(d),
|
|
|
|
/* out */ "=f"(f):
|
|
|
|
/* in */ "r"(h));
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
|
|
register double d;
|
|
|
|
register ggml_fp16_t r;
|
|
|
|
__asm__( /* xscvdphp can work on double or single precision */
|
|
|
|
"xscvdphp %0,%2\n"
|
|
|
|
"mffprd %1,%0\n" :
|
|
|
|
/* temp */ "=d"(d),
|
|
|
|
/* out */ "=r"(r):
|
|
|
|
/* in */ "f"(f));
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
// FP16 <-> FP32
|
|
|
|
// ref: https://github.com/Maratyszcza/FP16
|
|
|
|
|
|
|
|
static inline float fp32_from_bits(uint32_t w) {
|
|
|
|
union {
|
|
|
|
uint32_t as_bits;
|
|
|
|
float as_value;
|
|
|
|
} fp32;
|
|
|
|
fp32.as_bits = w;
|
|
|
|
return fp32.as_value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t fp32_to_bits(float f) {
|
|
|
|
union {
|
|
|
|
float as_value;
|
|
|
|
uint32_t as_bits;
|
|
|
|
} fp32;
|
|
|
|
fp32.as_value = f;
|
|
|
|
return fp32.as_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
|
|
const uint32_t w = (uint32_t) h << 16;
|
|
|
|
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
|
|
const uint32_t two_w = w + w;
|
|
|
|
|
|
|
|
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
|
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
|
|
const float exp_scale = 0x1.0p-112f;
|
|
|
|
#else
|
|
|
|
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
|
|
#endif
|
|
|
|
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
|
|
|
|
|
|
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
|
|
const float magic_bias = 0.5f;
|
|
|
|
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
|
|
|
|
|
|
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
|
|
const uint32_t result = sign |
|
|
|
|
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
|
|
return fp32_from_bits(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
|
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
|
|
const float scale_to_inf = 0x1.0p+112f;
|
|
|
|
const float scale_to_zero = 0x1.0p-110f;
|
|
|
|
#else
|
|
|
|
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
|
|
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
|
|
#endif
|
|
|
|
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
|
|
|
|
|
|
const uint32_t w = fp32_to_bits(f);
|
|
|
|
const uint32_t shl1_w = w + w;
|
|
|
|
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
|
|
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
|
|
if (bias < UINT32_C(0x71000000)) {
|
|
|
|
bias = UINT32_C(0x71000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
|
|
const uint32_t bits = fp32_to_bits(base);
|
|
|
|
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
|
|
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
|
|
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
|
|
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
|
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
|
|
|
|
|
|
#endif // __F16C__
|
|
|
|
|
2024-04-24 11:00:07 +02:00
|
|
|
#endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
|
2023-10-30 18:19:15 +01:00
|
|
|
|
2024-07-10 14:14:51 +02:00
|
|
|
#ifdef __ARM_FEATURE_SVE
|
|
|
|
#include <arm_sve.h>
|
|
|
|
#endif // __ARM_FEATURE_SVE
|
|
|
|
|
2023-10-30 18:19:15 +01:00
|
|
|
// precomputed f32 table for f16 (256 KB)
|
|
|
|
// defined in ggml.c, initialized in ggml_init()
|
|
|
|
extern float ggml_table_f32_f16[1 << 16];
|
|
|
|
|
|
|
|
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
|
|
|
|
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
|
|
// This is also true for POWER9.
|
2024-02-22 22:21:39 +01:00
|
|
|
#if !defined(GGML_FP16_TO_FP32)
|
2023-10-30 18:19:15 +01:00
|
|
|
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
|
|
|
uint16_t s;
|
|
|
|
memcpy(&s, &f, sizeof(uint16_t));
|
|
|
|
return ggml_table_f32_f16[s];
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
|
2024-02-22 22:21:39 +01:00
|
|
|
#endif
|
2023-10-30 18:19:15 +01:00
|
|
|
|
2024-02-22 22:21:39 +01:00
|
|
|
#if !defined(GGML_FP32_TO_FP16)
|
|
|
|
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
2023-10-30 18:19:15 +01:00
|
|
|
#endif
|
|
|
|
|
2024-07-27 04:41:55 +02:00
|
|
|
// bitset
|
|
|
|
|
|
|
|
static_assert(sizeof(ggml_bitset_t) == 4, "bitset_t constants must be updated");
|
|
|
|
#define BITSET_SHR 5 // log2(sizeof(ggml_bitset_t)*8)
|
|
|
|
#define BITSET_MASK (sizeof(ggml_bitset_t)*8 - 1)
|
|
|
|
|
|
|
|
static size_t ggml_bitset_size(size_t n) {
|
|
|
|
return (n + BITSET_MASK) >> BITSET_SHR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool ggml_bitset_get(const ggml_bitset_t * bitset, size_t i) {
|
|
|
|
return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ggml_bitset_set(ggml_bitset_t * bitset, size_t i) {
|
|
|
|
bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ggml_bitset_clear(ggml_bitset_t * bitset, size_t i) {
|
|
|
|
bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash set
|
|
|
|
|
|
|
|
#define GGML_HASHSET_FULL ((size_t)-1)
|
|
|
|
#define GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
|
2023-11-13 13:16:23 +01:00
|
|
|
|
2024-01-12 20:07:38 +01:00
|
|
|
struct ggml_hash_set ggml_hash_set_new(size_t size);
|
2024-07-27 04:41:55 +02:00
|
|
|
void ggml_hash_set_free(struct ggml_hash_set * hash_set);
|
|
|
|
|
|
|
|
// returns the minimum size for a hash set that can hold min_sz elements
|
|
|
|
size_t ggml_hash_size(size_t min_sz);
|
2024-01-12 20:07:38 +01:00
|
|
|
|
2024-07-27 04:41:55 +02:00
|
|
|
// remove all elements from the hash set
|
|
|
|
void ggml_hash_set_reset(struct ggml_hash_set * hash_set);
|
2023-11-13 13:16:23 +01:00
|
|
|
|
2024-07-27 04:41:55 +02:00
|
|
|
// returns true if key is in the hash set
|
|
|
|
static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
2023-11-13 13:16:23 +01:00
|
|
|
|
2024-07-27 04:41:55 +02:00
|
|
|
// returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
|
|
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
|
|
|
|
|
|
|
// returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
|
|
static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
2023-11-13 13:16:23 +01:00
|
|
|
|
|
|
|
// return index, asserts if table is full
|
2024-07-27 04:41:55 +02:00
|
|
|
static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
|
|
|
|
|
|
|
|
// hash function for ggml_tensor
|
|
|
|
static inline size_t ggml_hash(const struct ggml_tensor * p) {
|
|
|
|
// the last 4 bits are always zero due to alignment
|
|
|
|
return (size_t)(uintptr_t)p >> 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
|
|
|
size_t h = ggml_hash(key) % hash_set->size;
|
|
|
|
|
|
|
|
// linear probing
|
|
|
|
size_t i = h;
|
|
|
|
while (ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
|
|
|
|
i = (i + 1) % hash_set->size;
|
|
|
|
if (i == h) {
|
|
|
|
// visited all hash table entries -> not found
|
|
|
|
return GGML_HASHSET_FULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
|
|
|
size_t i = ggml_hash_find(hash_set, key);
|
|
|
|
return i != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
|
|
|
size_t h = ggml_hash(key) % hash_set->size;
|
|
|
|
|
|
|
|
// linear probing
|
|
|
|
size_t i = h;
|
|
|
|
do {
|
|
|
|
if (!ggml_bitset_get(hash_set->used, i)) {
|
|
|
|
ggml_bitset_set(hash_set->used, i);
|
|
|
|
hash_set->keys[i] = key;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
if (hash_set->keys[i] == key) {
|
|
|
|
return GGML_HASHSET_ALREADY_EXISTS;
|
|
|
|
}
|
|
|
|
i = (i + 1) % hash_set->size;
|
|
|
|
} while (i != h);
|
|
|
|
|
|
|
|
// visited all hash table entries -> not found
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
|
|
|
|
size_t h = ggml_hash(key) % hash_set->size;
|
|
|
|
|
|
|
|
// linear probing
|
|
|
|
size_t i = h;
|
|
|
|
do {
|
|
|
|
if (!ggml_bitset_get(hash_set->used, i)) {
|
|
|
|
ggml_bitset_set(hash_set->used, i);
|
|
|
|
hash_set->keys[i] = key;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
if (hash_set->keys[i] == key) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
i = (i + 1) % hash_set->size;
|
|
|
|
} while (i != h);
|
|
|
|
|
|
|
|
// visited all hash table entries -> not found
|
|
|
|
GGML_ABORT("fatal error");
|
|
|
|
}
|
2023-10-30 18:19:15 +01:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|