2023-03-22 06:32:36 +01:00
# ifndef LLAMA_H
# define LLAMA_H
2023-06-06 21:33:23 +02:00
# include "ggml.h"
2024-11-03 19:34:08 +01:00
# include "ggml-cpu.h"
2024-01-17 17:39:41 +01:00
# include "ggml-backend.h"
2024-01-31 16:30:17 +01:00
2023-03-22 06:32:36 +01:00
# include <stddef.h>
# include <stdint.h>
2023-08-28 17:59:39 +02:00
# include <stdio.h>
2023-03-22 06:32:36 +01:00
# include <stdbool.h>
# ifdef LLAMA_SHARED
2023-03-29 15:19:29 +02:00
# if defined(_WIN32) && !defined(__MINGW32__)
2023-03-22 06:32:36 +01:00
# ifdef LLAMA_BUILD
# define LLAMA_API __declspec(dllexport)
# else
# define LLAMA_API __declspec(dllimport)
# endif
# else
# define LLAMA_API __attribute__ ((visibility ("default")))
# endif
# else
# define LLAMA_API
# endif
2023-06-24 10:47:58 +02:00
# ifdef __GNUC__
# define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
# elif defined(_MSC_VER)
# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
# else
# define DEPRECATED(func, hint) func
# endif
2023-08-21 22:07:43 +02:00
# define LLAMA_DEFAULT_SEED 0xFFFFFFFF
2023-05-20 14:58:15 +02:00
2024-09-07 14:16:19 +02:00
// TODO: use everywhere in the implementation
# define LLAMA_TOKEN_NULL -1
2023-12-16 18:58:46 +01:00
# define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
2023-08-21 22:07:43 +02:00
# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
2024-04-08 14:43:30 +02:00
# define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
2023-03-22 06:32:36 +01:00
2023-08-21 22:07:43 +02:00
# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
2024-09-07 14:16:19 +02:00
# define LLAMA_SESSION_VERSION 9
2023-06-29 15:15:15 +02:00
2024-04-08 14:43:30 +02:00
# define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
2024-07-28 06:42:05 +02:00
# define LLAMA_STATE_SEQ_VERSION 2
2024-04-08 14:43:30 +02:00
2023-03-22 06:32:36 +01:00
# ifdef __cplusplus
extern " C " {
# endif
//
// C interface
//
// TODO: show sample usage
//
2024-09-07 14:16:19 +02:00
// struct llama_vocab; // TODO: add in the future
2023-06-24 10:47:58 +02:00
struct llama_model ;
2023-03-22 06:32:36 +01:00
struct llama_context ;
2024-09-07 14:16:19 +02:00
struct llama_sampler ;
2023-03-22 06:32:36 +01:00
2023-09-28 18:04:36 +02:00
typedef int32_t llama_pos ;
typedef int32_t llama_token ;
typedef int32_t llama_seq_id ;
2023-03-22 06:32:36 +01:00
2023-08-21 22:07:43 +02:00
enum llama_vocab_type {
2024-03-14 17:21:56 +01:00
LLAMA_VOCAB_TYPE_NONE = 0 , // For models without vocab
2024-03-28 16:44:36 +01:00
LLAMA_VOCAB_TYPE_SPM = 1 , // LLaMA tokenizer based on byte-level BPE with byte fallback
LLAMA_VOCAB_TYPE_BPE = 2 , // GPT-2 tokenizer based on byte-level BPE
LLAMA_VOCAB_TYPE_WPM = 3 , // BERT tokenizer based on WordPiece
2024-06-25 21:14:35 +02:00
LLAMA_VOCAB_TYPE_UGM = 4 , // T5 tokenizer based on Unigram
2024-09-01 16:38:17 +02:00
LLAMA_VOCAB_TYPE_RWKV = 5 , // RWKV tokenizer based on greedy tokenization
2023-08-21 22:07:43 +02:00
} ;
2024-04-29 15:58:41 +02:00
// pre-tokenization types
enum llama_vocab_pre_type {
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 ,
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 ,
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2 ,
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3 ,
LLAMA_VOCAB_PRE_TYPE_FALCON = 4 ,
LLAMA_VOCAB_PRE_TYPE_MPT = 5 ,
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6 ,
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7 ,
2024-05-04 07:32:32 +02:00
LLAMA_VOCAB_PRE_TYPE_REFACT = 8 ,
2024-05-05 07:19:30 +02:00
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9 ,
2024-05-19 14:46:46 +02:00
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10 ,
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11 ,
LLAMA_VOCAB_PRE_TYPE_OLMO = 12 ,
LLAMA_VOCAB_PRE_TYPE_DBRX = 13 ,
2024-05-26 14:28:35 +02:00
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14 ,
2024-06-14 12:16:49 +02:00
LLAMA_VOCAB_PRE_TYPE_PORO = 15 ,
2024-07-07 14:52:10 +02:00
LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16 ,
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17 ,
LLAMA_VOCAB_PRE_TYPE_VIKING = 18 ,
LLAMA_VOCAB_PRE_TYPE_JAIS = 19 ,
2024-07-20 15:43:51 +02:00
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20 ,
2024-07-22 16:43:01 +02:00
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21 ,
2024-07-22 18:43:43 +02:00
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22 ,
2024-08-15 09:17:12 +02:00
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23 ,
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24 ,
2024-08-16 08:35:18 +02:00
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25 ,
2024-09-28 14:08:43 +02:00
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 ,
2024-04-29 15:58:41 +02:00
} ;
2024-02-25 21:12:24 +01:00
enum llama_rope_type {
LLAMA_ROPE_TYPE_NONE = - 1 ,
2024-08-13 21:13:15 +02:00
LLAMA_ROPE_TYPE_NORM = 0 ,
LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX ,
2024-02-25 21:12:24 +01:00
} ;
2024-06-04 09:17:17 +02:00
enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
2023-08-21 22:07:43 +02:00
LLAMA_TOKEN_TYPE_UNDEFINED = 0 ,
LLAMA_TOKEN_TYPE_NORMAL = 1 ,
LLAMA_TOKEN_TYPE_UNKNOWN = 2 ,
LLAMA_TOKEN_TYPE_CONTROL = 3 ,
LLAMA_TOKEN_TYPE_USER_DEFINED = 4 ,
LLAMA_TOKEN_TYPE_UNUSED = 5 ,
LLAMA_TOKEN_TYPE_BYTE = 6 ,
} ;
2024-06-04 09:17:17 +02:00
enum llama_token_attr {
LLAMA_TOKEN_ATTR_UNDEFINED = 0 ,
2024-06-05 01:26:14 +02:00
LLAMA_TOKEN_ATTR_UNKNOWN = 1 < < 0 ,
LLAMA_TOKEN_ATTR_UNUSED = 1 < < 1 ,
LLAMA_TOKEN_ATTR_NORMAL = 1 < < 2 ,
LLAMA_TOKEN_ATTR_CONTROL = 1 < < 3 , // SPECIAL?
LLAMA_TOKEN_ATTR_USER_DEFINED = 1 < < 4 ,
LLAMA_TOKEN_ATTR_BYTE = 1 < < 5 ,
LLAMA_TOKEN_ATTR_NORMALIZED = 1 < < 6 ,
LLAMA_TOKEN_ATTR_LSTRIP = 1 < < 7 ,
LLAMA_TOKEN_ATTR_RSTRIP = 1 < < 8 ,
LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 < < 9 ,
2024-06-04 09:17:17 +02:00
} ;
2023-08-21 22:07:43 +02:00
// model file types
enum llama_ftype {
LLAMA_FTYPE_ALL_F32 = 0 ,
2023-09-28 18:04:36 +02:00
LLAMA_FTYPE_MOSTLY_F16 = 1 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0 = 2 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = 3 , // except 1d tensors
2024-07-16 09:00:30 +02:00
// LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
2023-09-28 18:04:36 +02:00
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
LLAMA_FTYPE_MOSTLY_Q8_0 = 7 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_0 = 8 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_1 = 9 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q2_K = 10 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_K = 18 , // except 1d tensors
2024-01-08 16:02:32 +01:00
LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19 , // except 1d tensors
2024-01-11 20:39:39 +01:00
LLAMA_FTYPE_MOSTLY_IQ2_XS = 20 , // except 1d tensors
2024-01-11 20:43:15 +01:00
LLAMA_FTYPE_MOSTLY_Q2_K_S = 21 , // except 1d tensors
2024-02-26 17:28:38 +01:00
LLAMA_FTYPE_MOSTLY_IQ3_XS = 22 , // except 1d tensors
2024-01-30 14:14:12 +01:00
LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23 , // except 1d tensors
2024-02-18 17:16:55 +01:00
LLAMA_FTYPE_MOSTLY_IQ1_S = 24 , // except 1d tensors
2024-02-21 10:39:52 +01:00
LLAMA_FTYPE_MOSTLY_IQ4_NL = 25 , // except 1d tensors
2024-02-24 15:23:52 +01:00
LLAMA_FTYPE_MOSTLY_IQ3_S = 26 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_M = 27 , // except 1d tensors
2024-02-26 17:28:38 +01:00
LLAMA_FTYPE_MOSTLY_IQ2_S = 28 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_M = 29 , // except 1d tensors
2024-02-27 15:34:24 +01:00
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30 , // except 1d tensors
2024-03-26 15:21:27 +01:00
LLAMA_FTYPE_MOSTLY_IQ1_M = 31 , // except 1d tensors
2024-05-08 08:30:09 +02:00
LLAMA_FTYPE_MOSTLY_BF16 = 32 , // except 1d tensors
2024-07-10 14:14:51 +02:00
LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35 , // except 1d tensors
ggml-quants : ternary packing for TriLMs and BitNet b1.58 (#8151)
* ggml-quants : 1.625 bpw ternary packing for BitNet 1.58b
* ggml-quants : faster 1.625 bpw AVX2 vec_dot
Not using a lookup table anymore makes it match q4_0 speed.
* gguf-py : fix formatting
* llama : remove spaces on empty line
* ggml-quants : subtract 1 when back in epi8
This makes the 1.625 bpw type go faster than q4_0. Still not the fastest.
* ggml-quants : Q2_2 now faster than Q4_K on with AVX2
* ggml-quants : cleanup Q1_3 code formatting
* ggml-quants : ARM NEON vec_dot for q2_2 and q1_3
* ggml-quants : use ceiling division when quantizing q1_3
* convert-hf : simplify BitNet pre-quantization
This still results in the exact same tensor weights and scales,
but it reveals some weirdness in the current algorithm.
* convert-hf : allow converting the weird BitNet 1.3B
Its FFN size is 5460 which is not convenient.
The offending tensors are kept in F16,
which makes the final model 5.01 bpw.
* bitnet : replace 1.58b with b1.58, as in the paper
* ggml-quants : fix build failure on Windows
* ggml-quants : attempt to fix Arm 32-bit support
* ggml : add some informative comments in q1_3 vec_dot
* ggml : add TQ1_0 and TQ2_0 ternary quantization types
* ggml : even faster TQ2_0
* ggml : also faster TQ1_0
Same optimization as for TQ2_0 by offsetting the sum instead of the weights.
This makes TQ1_0 almost as fast as Q8_0 on AVX2.
* ggml : fix build issues in certain environments
* ggml : add NEON vec_dot implementation for TQ1_0 and TQ2_0
* ggml : avoid directly using vmlal_high_s8, for 32-bit ARM compat
The compiler seems smart enough to use the same instruction
even when using vget_high_s8 instead.
* ggml : remove q1_3 and q2_2
No more 1.625 bpw and 2.000 bpw,
now instead using 1.6875 bpw and 2.0625 bpw
with TQ1_0 and TQ2_0, respectively.
* llama : remove the separate scale tensors of BitNet b1.58
They won't be needed, since the remaining ternary quant types have
built-in scales.
* ggml-quants : rename fields of TQ1_0 and TQ2_0 structs for consistency
* ggml-quants : allow using vdotq_s32 in TQ2_0 vec_dot
Not yet tested on hardware which supports it,
might not work or might not even compile. But also it might.
It should make the performance better on recent ARM CPUs.
* ggml-quants : remove comment about possible format change of TQ2_0
Making it slightly more convenient for AVX512
but less convenient for everything else is not worth the trouble.
* gguf-py : Numpy (de)quantization for TQ1_0 and TQ2_0
* ggml-quants : use roundf instead of nearest_int for TQ1_0 and TQ2_0
This does not change anything for ternary models,
since their values should never end up being in halfway cases anyway.
* convert : allow direct conversion to TQ1_0 and TQ2_0
The token embeddings and output tensors are kept in F16
to allow quantizing them to Q4_K and Q6_K with llama-quantize.
* llama : handle fallback for TQ1_0 and TQ2_0 with Q4_0
Q4_0 is not completely symmetric (so not lossless for ternary models),
but it should be good enough.
* ggml-quants : allow using ARM dot product instructions for TQ1_0
* ggml-quants : deduplicate TQ1_0 and TQ2_0 __ARM_FEATURE_DOTPROD support
* ggml : remove unused ggml_mul special case
It would otherwise conflict with the more general
optimization coming with Mamba-2.
* ggml : handle TQ1_0 and TQ2_0 in dequantization-based operators
* test-backend-ops : add TQ1_0 and TQ2_0 comments for later
Not yet adding uncommented, because some backends like SYCL and Metal
do not properly handle unknown types in supports_op for GGML_OP_MUL_MAT.
(and Metal also doesn't handle it with GGML_OP_GET_ROWS)
Support for TQ1_0 and TQ2_0 for other backends than CPU
will be added in follow-up pull requests.
2024-09-06 03:48:47 +02:00
LLAMA_FTYPE_MOSTLY_TQ1_0 = 36 , // except 1d tensors
LLAMA_FTYPE_MOSTLY_TQ2_0 = 37 , // except 1d tensors
2023-08-22 19:05:59 +02:00
LLAMA_FTYPE_GUESSED = 1024 , // not specified in the model file
2023-08-21 22:07:43 +02:00
} ;
2023-11-01 23:04:33 +01:00
enum llama_rope_scaling_type {
2024-02-25 11:09:09 +01:00
LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = - 1 ,
LLAMA_ROPE_SCALING_TYPE_NONE = 0 ,
LLAMA_ROPE_SCALING_TYPE_LINEAR = 1 ,
LLAMA_ROPE_SCALING_TYPE_YARN = 2 ,
LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN ,
2023-11-01 23:04:33 +01:00
} ;
2024-02-15 18:21:49 +01:00
enum llama_pooling_type {
2024-03-03 11:40:27 +01:00
LLAMA_POOLING_TYPE_UNSPECIFIED = - 1 ,
2024-02-25 11:09:09 +01:00
LLAMA_POOLING_TYPE_NONE = 0 ,
LLAMA_POOLING_TYPE_MEAN = 1 ,
LLAMA_POOLING_TYPE_CLS = 2 ,
2024-06-21 07:38:22 +02:00
LLAMA_POOLING_TYPE_LAST = 3 ,
2024-09-28 16:42:03 +02:00
LLAMA_POOLING_TYPE_RANK = 4 , // used by reranking models to attach the classification head to the graph
2024-02-15 18:21:49 +01:00
} ;
2024-07-05 09:05:56 +02:00
enum llama_attention_type {
LLAMA_ATTENTION_TYPE_UNSPECIFIED = - 1 ,
LLAMA_ATTENTION_TYPE_CAUSAL = 0 ,
LLAMA_ATTENTION_TYPE_NON_CAUSAL = 1 ,
} ;
2024-01-12 20:07:38 +01:00
enum llama_split_mode {
2024-09-28 16:42:03 +02:00
LLAMA_SPLIT_MODE_NONE = 0 , // single GPU
LLAMA_SPLIT_MODE_LAYER = 1 , // split layers and KV across GPUs
2024-10-30 02:01:23 +01:00
LLAMA_SPLIT_MODE_ROW = 2 , // split layers and KV across GPUs, use tensor parallelism if supported
2024-01-12 20:07:38 +01:00
} ;
2024-09-07 14:16:19 +02:00
// TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
2023-03-22 06:32:36 +01:00
typedef struct llama_token_data {
2023-05-20 10:06:11 +02:00
llama_token id ; // token id
float logit ; // log-odds of the token
float p ; // probability of the token
2023-03-22 06:32:36 +01:00
} llama_token_data ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
typedef struct llama_token_data_array {
2024-09-07 14:16:19 +02:00
// TODO: consider SoA
2024-10-21 08:46:40 +02:00
// NOTE: this pointer can be modified by the samplers
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
llama_token_data * data ;
size_t size ;
2024-09-07 14:16:19 +02:00
int64_t selected ; // this is the index in the data array (i.e. not the token id)
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
bool sorted ;
} llama_token_data_array ;
2024-05-03 15:24:30 +02:00
typedef bool ( * llama_progress_callback ) ( float progress , void * user_data ) ;
2023-03-25 06:26:28 +01:00
2023-09-28 18:04:36 +02:00
// Input data for llama_decode
// A llama_batch object can contain input about one or many sequences
// The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
//
// - token : the token ids of the input (used when embd is NULL)
// - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
// - pos : the positions of the respective token in the sequence
2024-10-18 23:18:01 +02:00
// (if set to NULL, the token position will be tracked automatically by llama_decode)
2023-09-28 18:04:36 +02:00
// - seq_id : the sequence to which the respective token belongs
2024-10-18 23:18:01 +02:00
// (if set to NULL, the sequence ID will be assumed to be 0)
2024-03-04 21:31:20 +01:00
// - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
2024-10-18 23:18:01 +02:00
// (if set to NULL, only the logits for last token will be returned)
2023-09-28 18:04:36 +02:00
//
typedef struct llama_batch {
int32_t n_tokens ;
2023-10-18 15:21:57 +02:00
llama_token * token ;
float * embd ;
llama_pos * pos ;
int32_t * n_seq_id ;
llama_seq_id * * seq_id ;
2024-03-04 21:31:20 +01:00
int8_t * logits ; // TODO: rename this to "output"
2023-09-28 18:04:36 +02:00
} llama_batch ;
2023-12-05 18:19:18 +01:00
enum llama_model_kv_override_type {
2024-02-25 11:09:09 +01:00
LLAMA_KV_OVERRIDE_TYPE_INT ,
LLAMA_KV_OVERRIDE_TYPE_FLOAT ,
LLAMA_KV_OVERRIDE_TYPE_BOOL ,
2024-04-26 20:06:33 +02:00
LLAMA_KV_OVERRIDE_TYPE_STR ,
2023-12-05 18:19:18 +01:00
} ;
struct llama_model_kv_override {
enum llama_model_kv_override_type tag ;
2024-04-26 20:06:33 +02:00
char key [ 128 ] ;
2023-12-05 18:19:18 +01:00
union {
2024-04-26 20:06:33 +02:00
int64_t val_i64 ;
double val_f64 ;
bool val_bool ;
char val_str [ 128 ] ;
2023-12-05 18:19:18 +01:00
} ;
} ;
2023-09-28 21:42:38 +02:00
struct llama_model_params {
2024-11-25 19:30:06 +01:00
// NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
ggml_backend_dev_t * devices ;
2023-09-28 21:42:38 +02:00
int32_t n_gpu_layers ; // number of layers to store in VRAM
2024-01-12 20:07:38 +01:00
enum llama_split_mode split_mode ; // how to split the model across multiple GPUs
2024-10-30 02:01:23 +01:00
// the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
2024-01-12 20:07:38 +01:00
int32_t main_gpu ;
2024-01-31 16:30:17 +01:00
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
2024-01-12 20:07:38 +01:00
const float * tensor_split ;
llama : add custom RoPE (#2054)
* Implement customizable RoPE
The original RoPE has pre-defined parameters
theta_i = 10000^(−2(i−1)/d), for i in [1, 2, ..., d/2]
Our customizable RoPE, ggml_rope_custom_inplace, uses
theta_i = scale * base^(−2(i−1)/d), for i in [1, 2, ..., d/2]
with the default matches the original
scale = 1.0
base = 10000
The new command line arguments
--rope-freq-base
--rope-freq-scale
set the two new RoPE parameter.
Recent researches show changing these two parameters extends the context limit with minimal loss.
1. Extending Context to 8K
kaiokendev
https://kaiokendev.github.io/til#extending-context-to-8k
2. Extending Context Window of Large Language Models via Positional Interpolation
Shouyuan Chen, Sherman Wong, Liangjian Chen, Yuandong Tian
https://arxiv.org/abs/2306.15595
3. NTK-Aware Scaled RoPE allows LLaMA models to have extended (8k+) context size without any fine-tuning and minimal perplexity degradation.
https://www.reddit.com/user/bloc97
https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
For the bold, try adding the following command line parameters to your favorite model:
-c 16384 --rope-freq-base 80000 --rope-freq-scale 0.5
* ggml-metal: fix custom rope
* common: fix argument names in help
* llama: increase MEM_REQ_EVAL for MODEL_3B
It avoids crashing for quantized weights on CPU.
Better ways to calculate the required buffer size would be better.
* llama: make MEM_REQ_EVAL depend on n_ctx
* server: use proper Content-Type in curl examples
Without the header Content-Type: application/json, curl will POST with
Content-Type: application/x-www-form-urlencoded
Though our simple server doesn't care, the httplib.h used has a limit
with CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 8192
With Content-Type: application/json, we can send large json data.
* style : minor fixes, mostly indentations
* ggml : fix asserts
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-07-15 12:34:16 +02:00
2024-05-14 13:27:19 +02:00
// comma separated list of RPC servers to use for offloading
const char * rpc_servers ;
2023-12-22 07:19:36 +01:00
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
// If the provided progress_callback returns true, model loading continues.
// If it returns false, model loading is immediately aborted.
2023-06-20 03:24:39 +02:00
llama_progress_callback progress_callback ;
2023-12-05 18:19:18 +01:00
2023-06-20 03:24:39 +02:00
// context pointer passed to the progress callback
void * progress_callback_user_data ;
2023-03-22 06:32:36 +01:00
2023-12-05 18:19:18 +01:00
// override key-value pairs of the model meta data
const struct llama_model_kv_override * kv_overrides ;
2023-06-20 03:24:39 +02:00
// Keep the booleans together to avoid misalignment during copy-by-value.
2024-04-26 18:39:58 +02:00
bool vocab_only ; // only load the vocabulary, no weights
bool use_mmap ; // use mmap if possible
bool use_mlock ; // force system to keep model in RAM
bool check_tensors ; // validate model tensor data
2023-09-28 21:42:38 +02:00
} ;
2024-05-27 08:24:13 +02:00
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
// https://github.com/ggerganov/llama.cpp/pull/7544
2023-09-28 21:42:38 +02:00
struct llama_context_params {
2023-11-03 08:24:00 +01:00
uint32_t n_ctx ; // text context, 0 = from model
2024-03-13 18:54:21 +01:00
uint32_t n_batch ; // logical maximum batch size that can be submitted to llama_decode
uint32_t n_ubatch ; // physical maximum batch size
2024-03-11 16:49:47 +01:00
uint32_t n_seq_max ; // max number of sequences (i.e. distinct states for recurrent models)
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-30 01:20:53 +02:00
int32_t n_threads ; // number of threads to use for generation
int32_t n_threads_batch ; // number of threads to use for batch processing
2024-03-03 11:40:27 +01:00
enum llama_rope_scaling_type rope_scaling_type ; // RoPE scaling type, from `enum llama_rope_scaling_type`
enum llama_pooling_type pooling_type ; // whether to pool (sum) embedding results by sequence id
2024-07-05 09:05:56 +02:00
enum llama_attention_type attention_type ; // attention type to use for embeddings
2023-09-28 21:42:38 +02:00
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
2023-11-01 23:04:33 +01:00
float rope_freq_base ; // RoPE base frequency, 0 = from model
float rope_freq_scale ; // RoPE frequency scaling factor, 0 = from model
2023-11-25 16:47:07 +01:00
float yarn_ext_factor ; // YaRN extrapolation mix factor, negative = from model
2023-11-01 23:04:33 +01:00
float yarn_attn_factor ; // YaRN magnitude scaling factor
float yarn_beta_fast ; // YaRN low correction dim
float yarn_beta_slow ; // YaRN high correction dim
uint32_t yarn_orig_ctx ; // YaRN original context size
2024-02-27 13:35:51 +01:00
float defrag_thold ; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
2023-09-28 21:42:38 +02:00
2024-01-17 17:39:41 +01:00
ggml_backend_sched_eval_callback cb_eval ;
void * cb_eval_user_data ;
2024-05-27 08:24:13 +02:00
enum ggml_type type_k ; // data type for K cache [EXPERIMENTAL]
enum ggml_type type_v ; // data type for V cache [EXPERIMENTAL]
2023-12-07 12:03:17 +01:00
2024-09-07 14:16:19 +02:00
// Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value.
// TODO: move at the end of the struct
2024-03-02 20:52:25 +01:00
bool logits_all ; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
2024-03-04 21:31:20 +01:00
bool embeddings ; // if true, extract embeddings (together with logits)
2023-12-07 12:03:17 +01:00
bool offload_kqv ; // whether to offload the KQV ops (including the KV cache) to GPU
2024-05-27 08:24:13 +02:00
bool flash_attn ; // whether to use flash attention [EXPERIMENTAL]
2024-09-13 08:53:38 +02:00
bool no_perf ; // whether to measure performance timings
2024-03-02 20:52:25 +01:00
// Abort callback
// if it returns true, execution of llama_decode() will be aborted
// currently works only with CPU execution
ggml_abort_callback abort_callback ;
void * abort_callback_data ;
2023-03-22 06:32:36 +01:00
} ;
2023-08-21 22:07:43 +02:00
2023-06-10 09:59:17 +02:00
// model quantization parameters
typedef struct llama_model_quantize_params {
2024-03-22 19:47:14 +01:00
int32_t nthread ; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
enum llama_ftype ftype ; // quantize to this llama_ftype
enum ggml_type output_tensor_type ; // output tensor type
2024-08-07 01:41:54 +02:00
enum ggml_type token_embedding_type ; // token embeddings tensor type
2024-03-22 19:47:14 +01:00
bool allow_requantize ; // allow quantizing non-f32/f16 tensors
bool quantize_output_tensor ; // quantize output.weight
bool only_copy ; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
bool pure ; // quantize all tensors to the default type
2024-04-25 12:29:35 +02:00
bool keep_split ; // quantize to the same number of shards
2024-03-22 19:47:14 +01:00
void * imatrix ; // pointer to importance matrix data
2024-03-26 13:09:30 +01:00
void * kv_overrides ; // pointer to vector containing overrides
2023-06-10 09:59:17 +02:00
} llama_model_quantize_params ;
2024-09-07 14:16:19 +02:00
typedef struct llama_logit_bias {
llama_token token ;
float bias ;
} llama_logit_bias ;
2023-07-24 05:58:10 +02:00
2024-09-07 14:16:19 +02:00
typedef struct llama_sampler_chain_params {
bool no_perf ; // whether to measure performance timings
} llama_sampler_chain_params ;
2023-07-05 22:51:13 +02:00
2024-02-19 09:23:37 +01:00
// used in chat template
typedef struct llama_chat_message {
const char * role ;
const char * content ;
} llama_chat_message ;
2024-07-15 20:50:47 +02:00
// lora adapter
struct llama_lora_adapter ;
2023-09-28 18:04:36 +02:00
// Helpers for getting default parameters
2024-09-07 14:16:19 +02:00
// TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172)
LLAMA_API struct llama_model_params llama_model_default_params ( void ) ;
LLAMA_API struct llama_context_params llama_context_default_params ( void ) ;
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params ( void ) ;
2023-08-21 22:07:43 +02:00
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params ( void ) ;
2023-07-19 09:06:40 +02:00
2023-05-20 10:06:11 +02:00
// Initialize the llama + ggml backend
2023-06-26 19:57:59 +02:00
// If numa is true, use NUMA optimizations
2023-05-20 10:06:11 +02:00
// Call once at the start of the program
2024-02-16 10:31:07 +01:00
LLAMA_API void llama_backend_init ( void ) ;
//optional:
LLAMA_API void llama_numa_init ( enum ggml_numa_strategy numa ) ;
2023-05-20 10:06:11 +02:00
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-30 01:20:53 +02:00
// Optional: an auto threadpool gets created in ggml if not passed explicitly
LLAMA_API void llama_attach_threadpool (
struct llama_context * ctx ,
ggml_threadpool_t threadpool ,
ggml_threadpool_t threadpool_batch ) ;
LLAMA_API void llama_detach_threadpool ( struct llama_context * ctx ) ;
2023-08-21 22:07:43 +02:00
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free ( void ) ;
2023-05-20 10:06:11 +02:00
2023-06-24 10:47:58 +02:00
LLAMA_API struct llama_model * llama_load_model_from_file (
const char * path_model ,
2024-09-07 14:16:19 +02:00
struct llama_model_params params ) ;
2023-06-24 10:47:58 +02:00
LLAMA_API void llama_free_model ( struct llama_model * model ) ;
2024-09-07 14:16:19 +02:00
// TODO: rename to llama_init_from_model
2023-06-24 10:47:58 +02:00
LLAMA_API struct llama_context * llama_new_context_with_model (
struct llama_model * model ,
struct llama_context_params params ) ;
2023-03-22 06:32:36 +01:00
// Frees all allocated memory
LLAMA_API void llama_free ( struct llama_context * ctx ) ;
2023-08-21 22:07:43 +02:00
LLAMA_API int64_t llama_time_us ( void ) ;
2024-01-31 16:30:17 +01:00
LLAMA_API size_t llama_max_devices ( void ) ;
LLAMA_API bool llama_supports_mmap ( void ) ;
LLAMA_API bool llama_supports_mlock ( void ) ;
LLAMA_API bool llama_supports_gpu_offload ( void ) ;
2024-10-10 20:14:55 +02:00
LLAMA_API bool llama_supports_rpc ( void ) ;
2024-01-31 16:30:17 +01:00
2023-12-21 20:57:48 +01:00
LLAMA_API uint32_t llama_n_ctx ( const struct llama_context * ctx ) ;
LLAMA_API uint32_t llama_n_batch ( const struct llama_context * ctx ) ;
2024-03-13 18:54:21 +01:00
LLAMA_API uint32_t llama_n_ubatch ( const struct llama_context * ctx ) ;
2024-03-11 16:49:47 +01:00
LLAMA_API uint32_t llama_n_seq_max ( const struct llama_context * ctx ) ;
2023-08-21 22:07:43 +02:00
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_n_vocab ( const struct llama_model * model ) ;
LLAMA_API int32_t llama_n_ctx_train ( const struct llama_model * model ) ;
LLAMA_API int32_t llama_n_embd ( const struct llama_model * model ) ;
2024-03-15 21:43:02 +01:00
LLAMA_API int32_t llama_n_layer ( const struct llama_model * model ) ;
2024-09-17 08:23:30 +02:00
LLAMA_API int32_t llama_n_head ( const struct llama_model * model ) ;
2023-08-21 22:07:43 +02:00
2024-09-07 14:16:19 +02:00
LLAMA_API const struct llama_model * llama_get_model ( const struct llama_context * ctx ) ;
LLAMA_API enum llama_pooling_type llama_pooling_type ( const struct llama_context * ctx ) ;
LLAMA_API enum llama_vocab_type llama_vocab_type ( const struct llama_model * model ) ;
LLAMA_API enum llama_rope_type llama_rope_type ( const struct llama_model * model ) ;
2023-10-03 19:09:28 +02:00
// Get the model's RoPE frequency scaling factor
LLAMA_API float llama_rope_freq_scale_train ( const struct llama_model * model ) ;
2023-11-17 16:17:37 +01:00
// Functions to access the model's GGUF metadata scalar values
// - The functions return the length of the string on success, or -1 on failure
// - The output string is always null-terminated and cleared on failure
// - GGUF array values are not supported by these functions
// Get metadata value as a string by key name
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_model_meta_val_str ( const struct llama_model * model , const char * key , char * buf , size_t buf_size ) ;
2023-11-17 16:17:37 +01:00
// Get the number of metadata key/value pairs
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_model_meta_count ( const struct llama_model * model ) ;
2023-11-17 16:17:37 +01:00
// Get metadata key name by index
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_model_meta_key_by_index ( const struct llama_model * model , int32_t i , char * buf , size_t buf_size ) ;
2023-11-17 16:17:37 +01:00
// Get metadata value as a string by index
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_model_meta_val_str_by_index ( const struct llama_model * model , int32_t i , char * buf , size_t buf_size ) ;
2023-11-17 16:17:37 +01:00
2023-08-21 22:07:43 +02:00
// Get a string describing the model type
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_model_desc ( const struct llama_model * model , char * buf , size_t buf_size ) ;
2023-09-28 18:04:36 +02:00
2023-08-25 15:16:19 +02:00
// Returns the total size of all the tensors in the model in bytes
LLAMA_API uint64_t llama_model_size ( const struct llama_model * model ) ;
2023-09-28 18:04:36 +02:00
2023-08-25 15:16:19 +02:00
// Returns the total number of parameters in the model
LLAMA_API uint64_t llama_model_n_params ( const struct llama_model * model ) ;
2023-08-21 22:07:43 +02:00
train : finetune LORA (#2632)
* fix track_max_mem in forward_batch_wo_cache_flash_attn_train
* remove unnecessary Adam(W) optimizer tensors.
reduces optimizer memory overhead from 7*modelsize to 2*modelsize.
additionally allows to optimize models with more than 2^31 parameters by replacing int with int64_t.
bumps training checkpoint file version, but old checkpoints can still be read.
new version with less tensors is saved.
* add gradient clipping to AdamW
* Fix reset of unused g->nodes and g->grads to NULL
* implement gradient checkpointing for training
reduces memory overhead from O(n_layer) to O(sqrt(n_layer))
as explained in readme of https://github.com/cybertronai/gradient-checkpointing
* remove unused compute buffer 3
* add and use function ggml_build_backward_expand to avoid stack overflows with large maximum number of nodes
GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
* change AdamW decay parameter to work like the torch AdamW decay parameter
It is now relative to Adam learning rate `alpha*sched`.
Before that it was relative to `sched` only.
`alpha` being the maximum learning rate and `sched` being a scaling parameter in [0..1]
* change default AdamW weight decay parameter used in training to 0.1 as used in nanoGPT
* change default AdamW weight decay parameter defined in ggml to 0.0, making Adam default instead of AdamW
btw: the default weight decay parameter for torch.optim.AdamW is 0.01
* bug fixes for cross entropy loss
ggml_cross_entropy_loss: sums where not correctly added in workload of each thread
ggml_cross_entropy_loss_back: simplify backward process, reducing numerical issues
guard usage of exp f16 lookup in cross entropy by #define GGML_CROSS_ENTROPY_EXP_FP16
cross entropy loss is only used once during training, but it is quite sensitive to numerical errors introduced by exp-f16-lookup.
so exp-f16-lookup for cross entropy loss is disabled by default, trading better gradients for very slightly worse runtime performance.
* fix test-grad0 for cross_entropy_loss
the second argument to cross_entropy_loss must sum up to 1 for each row
* fix test-grad0 for soft_max
dont use only sum as aggregation, because sum of softmax is always 1 -> finite differences should not work
instead use sum(log(soft_max()*(1-eps)+eps)); use eps to avoid log(0)
* improve finite differences of test-grad0 by using double instead of float
* change cross_entropy_loss to output average over all rows
this helps keeping the loss and gradients in a sane range
* improve gradient checkpointing
sqrt(n_layers) is only the best checkpoint step when mem size of checkpoints and mem size of layers are equal.
since layers require more memory than the single-tensor-checkpoint we use, the optimal values are compute different:
```
given: n, u, v
objective: minimize(a*u+b*v) where a*b=n, a>0, b>0
b=n/a
minimize(a*u+v*n/a)
diff(a*u+v*n/a, a) = u - (v*n/a)/a
diff(a*u+v*n/a, a) == 0
u - (v*n/a)/a == 0
u == v*n/(a*a)
u*a*a = v*n
a*a = v*n/u
a = sqrt(n*v/u)
```
this change results in more checkpoints, requiring less layers to store between checkpoints, overall improving memory usage.
* disable gradient checkpointing debug output
* llama : fix rope usage in train-text-from-scratch after ChatGLM change
* add more training parameters:
--enable-restart N Only for Adam optimizer. Enable restarts of cos-decay
--disable-restart N Only for Adam optimizer. Disable restarts of cos-decay
--opt-past N Number of optimization iterations to track for delta convergence test. Disabled when zero.
--opt-delta N Maximum delta for delta convergence test. Disabled when <= zero.
--opt-max-no-improvement N Maximum number of optimization iterations with no improvement. Disabled when <= zero.
--adam-epsf N AdamW epsilon for convergence test. Disabled when <= zero.
--adam-min-alpha N Adam minimum learning rate alpha, usually 0.1 * alpha
* replace memcpy with reshape operation so that the graph is not cut at the input
this makes it possible to store other values into the input tensor and then simply recompute the graph without rebuilding it
* remove unused function argument from get_example_targets_batch
* measure and print total training time
* add optimization callback to ggml_opt_resume_g
this callback is called before each iteration with custom data and pointer to learning schedule parameter (only used in Adam(W)).
can be used for dynamic learning schedule and setting input data for batches before each iteration
* use optimization callback in training
allows dynamic learning schedule and different batch data for each iteration without relying on low n_iter and high n_examples parameters
reduces runtime by avoiding restart of optimization function and improves training convergence by providing a different batch for each iteration
* add minimum number of tensor dimensions to apply weight decay (default 2)
this allows to not apply weight decay to bias parameters
* rename training parameter cos-decay-alpha to cos-decay-min and clarify that adam-min-alpha also applies to warmup
* fix increase of model.train_samples and model.train_tokens
now that each optimizer iteration gets its own batch we need to multiply by number of opt iterations
* change sampling parameters for prediction after training to defaults of common.h
and clarify what is context for prediction and what are generated tokens
* tighten abs error bounds for cross_entropy_loss in test-grad0
* add conditional compilation of using F16 exp in flash attention
uncomment `// #define GGML_FLASH_ATTN_EXP_FP16` to enable usage of f16 exp in flash attention
* tighten abs error bounds for flash_attn in test-grad0
* tighten abs error bounds for sqrt in test-grad0
* remove out-commented vectorized code of opt_adam
the vectorized code might be bit faster for low number of parameters, but it had a big memory usage overhead
* ggml : update ggml_rms_norm_back with configurable eps
* llama training : fix ggml_rms_norm_back calls to pass configurable eps
* remove trailing whitespace
* add train function using automatic gradient checkpointing backward pass and allocator
* in train function replace add_inplace by regular add
because using add_inplace seems to result in different gradients
* don't use allocate hash_map on context
because the context has no_alloc=True when using memory allocator resulting in NULL data pointers
* correctly clone reshape and permute operations by also cloning tensor->nb values
* fix variable name and add missing type cast
* terminate recursive tensor cloning when reaching tensor without src tensors
* correctly clone view tensors by setting data pointers
without this the checkpointing would only work when being used together with memory allocator
* fix variable names
* swap arguments to commutative ops to be the same as in `forward_batch_wo_cache_flash_attn`
* add input tensors as checkpoints
so that recursive tensor cloning of gradient checkpointing terminates on input tensors
* fix variable name and add missing boolean negation
* make sure some tensors are not reallocated by inserting new temporary nodes depending on them:
output and parameter gradient tensors need to be available at the end of the graph execution
parameter gradient tensors also need to be available before the graph execution because they are set to zero before each optimizer iteration
checkpoint tensors are allocated all together to reduce memory allocator fragmentation
afterwards, in addition to the temporary nodes, we also need to reset the temporary leafs
* fix ASSERT to work with zero layers
* add training options whether to use allocator and/or unified training function
* integrate unified training function which may use memory allocator
the unified training function also supports arguments whether to use flash attention and/or gradient checkpointing
* format name of cloned tensors with " (clone)" suffix
* set names for tensors in unified train function for easier debugging
* allocate graph on context using ggml_new_graph
* remove handwritten training functions
* remove unused training parameters "use_scratch" and "use_unified"
* remove trailing whitespace
* remove unused train params: mem_compute1_gb & mem_compute2_gb
mem_compute_gb is used for compute when automatic memory allocator is not enabled, otherwise it can be very small to only hold the tensor definitions
mem_compute0_gb is used for automatic memory allocator (as long as measurement of max required size is not implemented)
* remove unused forward_batch function
* add debug asserts in ggml_allocr_alloc to some common pitfalls when using this function directly
* only use ggml_allocr_alloc when tensor has NULL data and is no view
* fix test when to create temporary backward graph
temporary backward graph is only necessary when using checkpointing
* fix memory "leak" in optimizers
each iteration a new cplan with new memory for work data was allocated.
now cplan creation only happens at the start of optimization, with each iteration reusing the cplan and its work data.
* reverse order of for loop in ggml_build_backward_expand to save memory when using gradient checkpointing and allocator
with this loop order gradient checkpointing with allocator on 16 layer model saves 13% memory; 2 layer memory it saves 2% memory.
the computation results are the same
* add API functions to access llama model tensors
* add stub example for finetuning, based on train-text-from-scratch
* move and remove code
* add API functions to access remaining model parameters:
mult, head and rot
* first draft for LORA finetune training
* remove const model and layer arguments in API functions for accessing model tensors
* bug fixes to make finetune compile
automatic allocator does not work yet
* add debug prints for training memory improvements
* fix names of lora tensors
* avoid stack overflow resulting from big ggml_cgraph
replace stack allocation and ggml_build_forward by ggml_new_graph in combination with ggml_build_forward_expand
* replace llama API functions to get model tensors by one function to get model tensor by name
LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
* remove unused call to not existing llama_get_layer_from_model
* implement ggml_compute_forward_out_prod_q_f32
* remove trailing whitespace
* add lora finetune support on quantized base model tensors
* add ggml_add_cast API function
this function works like ggml_add, but accepts a data type for the resulting tensor.
only supported for quantized src0 input.
* use ggml_add_cast in finetuning
lora-applied weights will now have data type F32, which improves gradients when finetuning quantized base models
* bug fix: actually use result type passed to ggml_add_cast
* make sure base model tensors data cannot be used in viewable operations
memory allocator would try to make lora application inplace on base model tensors.
since those are memory mapped this will result in memory access violations
* fix bug in ggml_out_prod which resulted in wrong n_dims of result tensors
* avoid keeping in memory ALL of the gradients
The problem here stems from ggml_graph_reset. This function is called in the optimization function, before each graph computation, to reset the gradients to zero. This required a unique memory slot for each gradient: allocating memory from a previosly freed memory location might lead to non-zero input gradients.
During ggml_compute_backward the gradients are build stepwise by adding or substracting new values, starting from a OP_NONE tensor which needs to contain zero-values. This requires the graph reset.
To avoid this I now remember in ggml_build_backward_expand the original OP_NONE gradient tensors in a hash table, which is passed to ggml_compute_backward. There instead of using add (or sub or similar) I test whether the existing gradient to be changed is a zero-valued-tensor by looking up its existence in the hash table. When it is such a zero-tensor it will not be modified, but replaced by the value to be added, otherwise the regular add (not inplace, allocator will take care of this) will be used. This way none of those zero-tensor values will be necessary in the final backward graph and more importantly they won't need a unique memory slot, just to make them zero.
* remove trailing whitespace
* remove debug prints and function to compute tensor data hash
* improve optimization iteration prints
* adjust maximal values to support finetuning 3B models
* change default finetune params lora_r and lora_alpha to match the n_rank parameters of 4
* bug fix: make sure finetune input gradient is allocated at begin and kept until end
* remove unnecessary src tensor from ggml_get_rows_back
we don't need data of src[2] for computation, only to setup the correct output shape.
remove dependency on src[2], so that allocator can work more freely.
the computational graph is still completely determined, because the output shape is naturally included.
this is similar to how ggml_reshape does it.
* remove unnecessary src tensor from ggml_repeat & ggml_repeat_back
we don't need data of src[1] for computation, only to setup the correct output shape.
remove dependency on src[1], so that allocator can work more freely.
the computational graph is still completely determined, because the output shape is naturally included
* resolve todo
allocator will only make it inplace when they are of the same type
* mixing multiple LORA adapters is now possible
pass more than one '--lora FNAME' argument to apply more than one LORA.
use '--lora-scaled FNAME S' when you want to specify a user-defined scale for an adapter.
* add option to save finetune output every N iterations
* also save latest finetune output with ITERATION="LATEST" and print where files are saved
saving with LATEST makes it easier to resume training from the latest checkpoint
the string "LATEST" can be configured with command line option "--fn-latest STR"
* update checkpoint train stats before saving via "--save-every"
* add command line option `--rank-wo N` for rank of wo tensor
* update finetune README
* fix dump_non_result_info_yaml to output multiple lora adapters
* bug fix: replace GGML_TYPE_SIZE[t] by ggml_type_size(t)
* replace llama_n_mult by llama_n_ff
* finetune bug fixes to compile with merged in code from master
* remove prediction related code to reduce duplicated code with main
use main instead
* reduce large memory overhead in train-text-from-scratch
all gradients had to be pinned so that graph_reset works correctly.
this is no longer necessary with the changes to ggml_compute_backward introduced in this PR.
* add comment explaining why finetune checkpoints are allocated in one block
* make default value of float member a float literal
* handle rms_norm and rope parameters the same as in train-text-from-scratch
* remove unused code
* remove vocab related code as it is unnecessary
* add LLM_KV_TRAINING_TYPE to train-text-from-scratch checkpoints
so that they can be differentiated from lora finetune checkpoints
* add gguf constants and load/save functions from train-text-from-scratch
* add load & save lora finetune checkpoints via gguf
* add python script to convert old finetune checkpoint files to gguf
* remove old checkpoint save & load code
* remove code to print data checksums which was used to verify correctness of new gguf code
* omit tokenization when training is disabled, only save llama lora adapter
training can be disabled by passing '-n 0' to finetune
* remove trailing whitespace
* update README.md
* implement ggml_compute_forward_repeat_f16
* avoid stack overflow of large cgraphs in test-grad0
* add ggml API functions ggml_unravel_index, ggml_get_i32_nd and its analogs for set and for f32
ggml_get_i32_1d, ggml_set_i32_1d, ggml_get_f32_1d, ggml_set_f32_1d now support non-contiguous tensors.
in case of non-contiguous tensor, the 1d index is unraveled into a multi index using ggml_unravel_index to be passed to '_nd' function equivalent.
this fixes a bug in test-grad0 which happens due to ggml_build_backward not building purely contiguous tensors anymore
* increase test-grad0 context mem size to accommodate for bigger cgraph
* add sanity check to ggml_compute_backward, asserting the correct shape of gradients
* fix ggml_acc_or_set to return tensor of correct shape
* remove unused 'inplace' argument from ggml_compute_backward function
inplace operations to add gradients are no longer created by ggml_compute_backward
use allocator to automatically make inplace operations
* add missing argument 'int i0' to ggml_get_i32_nd & ggml_set_i32_nd header declarations
* fix error message in ggml_allocr_alloc to display actual max_avail
* fix check_gradient
ggml_build_backward_expand was previously replaced by ggml_build_backward, but the assignment of forward graph to backward graph missing
* use tensor->view_src instead of ggml_is_view and get_view_source
* move gradient checkpointing code into ggml, new API function:
// build gradient checkpointing backward graph gb for gf using provided checkpoints
// gb_tmp will contain original backward graph with rewritten backward process nodes,
// but without the second forward pass nodes.
GGML_API void ggml_build_backward_gradient_checkpointing(
struct ggml_context * ctx,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
struct ggml_cgraph * gb_tmp,
struct ggml_tensor * * checkpoints,
int n_checkpoints);
* replace custom data getters and setters by ggml functions
* train-text-from-scratch can train (full finetune) gguf models
just pass the gguf model via `--checkpoint-in FN`.
after this, to continue training, pass the generated checkpoint instead of the original gguf model.
tested with smaller models, bigger models may exceed available memory.
use (LORA) finetune for those.
* remove trailing whitespace
* add option to save train-text-from-scratch output every N iterations
* update README.md
* fix warnings
* fix warnings
* remove finetune option to disable allocator
the allocator should always be used.
by making sure that it is always used it gets easier to implement automatic memory requirements computation
* add tensor checkpoints only when gradient checkpointing is enabled
* initialize opt ggml context if none was provided
* add ggml-alloc API function 'ggml_allocr_max_size' to get max size of alloc
GGML_API size_t ggml_allocr_max_size(struct ggml_allocr * alloc);
* finetune: automatically allocate all memory and changes to command line options
remove '--n_examples N' parameter, as it no longer makes sense to call optimization process multiple times in a loop.
add '--only_write_lora' command line option: will skip tokenization and training, to only write a llama.cpp comptabile LORA adapter.
remove memory buffer related command line options.
improve iteration console output.
* add finetune to Makefile
* update README.md
* print time per iteration and estimate remaining time
* increase measured alloc size by tensor_alignment
ggml_allocr_reset will reduce the given size by up to tensor_alignment-1
* fix README.md
* add some more allocator debug prints
* bug fix, probably solves the 'ggml_allocr_alloc: not enough space in the buffer' issue
* revert last commit
"bug fix, probably solves the 'ggml_allocr_alloc: not enough space in the buffer' issue"
"alloc was freeing an externally allocated tensor, because it calculated the end of allocator memory as alloc->data + alloc->max_size instead of alloc->data + alloc->size."
This is intentional to reduce the risk of freeing external tensors when measuring. Unless max_size is not properly calculated, I don't see why this is an issue.
* remove unnecessary "0x" before "%p" output
* move measurement memory segment to upper region of the address space
* update README.md
* fix printf format warnings
* add missing gguf_free in load_checkpoint_lora_file
* load default rms_norm and rope parameters from base model
* add gradient accumulation
specify number accumulation steps with '--grad-acc N'.
this will simulate a bigger batch size of grad_acc*batch.
* fix tracking of train_samples and train_tokens
* build : fix compile warnings
* ggml : fix L-BFGS linesearch loop
* improve finetune time measurement
fix printf warnings on system where int64_t is (long int).
change time datatypes to double because values get big with long training times.
exclude file saving from time measurement.
converge faster to actual time per iteration by removing very small first duration before first iteration was performed.
fix bug in output of total training time, the reported value was 1000 times to small.
* specify default lora rank with '--lora-r N'
'--lora-r N' will specify default rank for all tensors
'--rank-wq N', etc. will override this default rank for specific tensor types.
* fix gradient accumulation bug where the same batch was used for each microstep
* fix gradient accumulation bug where the same batch was used for each microstep
* support grouped-query-attention in ggml_flash_attn and ggml_flash_attn_back
k and v can now be repeated in q along ne[2]
in forward pass just use modulo to compute k and v indices, like ik2 = iq2 % nek2.
in backard pass this won't work as easy, because multiple threads will compete to accumulate to the same k->grad[:,ik1,ik2,ik3] and v->grad[:,iv1,iv2,iv3].
so we change the parallelization over q rows to be over k rows. this ensures non-overlapping (ik2,ik3) across threads.
in each thread we then iterate over the number of repetitions of k/v in q to compute iq2 as iq2 = ik2 + irep*nek2.
since ne2 is not the same for q,k and v we also change how the gradients are concatenated into the result tensor.
additionally the offsets of gradq, gradk and gradv in the result tensor are now memory aligned.
we also simplify the compute_backward part of flash_attn to use ggml_reshape instead of switching over the number of dimensions.
this needs a small change to ggml_reshape, removing the assertion of second argument to be contiguous.
since only the shape (ne) of the second reshape argument is of relevance, its memory layout (nb) is irrelevant -> it can very well be non-contiguous.
change test-grad0 to also test for repeated k/v in q.
this changes the rng and now results in small gradient differences in softmax. these solely come from using f16 exp table lookup in forward softmax: when temporarily changing softmax to use actual exp function, the reported gradient differences go away. gradient differences coming solely from f16 table lookup are acceptable.
added a note to explain this.
* add llama API functions to get grouped-query-attention n_head parameter 'n_head_kv'.
* fix finetune to support grouped-query-attention (using flash-attention)
note: ggml changes to ggml_out_prod are necessary to support grouped-query-attention without flash-attention.
* support broadcastable a in out_prod(a, b) and backward pass of broadcasting mul_mat(a, b)
* test broadcasting mul_mat backward pass
* decouple random number generator of each operation test
when changing one test the rng of others tests is not influenced anymore
* add comment briefly describing what ggml_repeat_back does
* simplify broadcasting mul_mat backward using ggml_repeat_back
* add cgraph evaluation order member and corresponding enum type
this controls in which order ggml_build_forward visits source nodes.
by default the nodes are visited left to right, i.e. src[0] first.
in some cases it is beneficial for ggml-alloc to visit in a different order.
two possible orders are supported: left-to-right (src[0] first) and right-to-left (src[0] last).
* measure max compute size for each cgraph eval order and use best order
this can bring huge memory savings:
e.g. codellama-34b with n_ctx=64, n_batch=1 goes from 92927.8mb down to 4627.6 MB
* remove unused command line options
* add sample start patterns and options to force new or by default resume last shuffling
* update shuffle rng state on reshuffle
* exclude known zero values from computations in flash_attn_f32 & flash_attn_back_f32
* remove probably unnecessary exception type flags from stringstream
* pass correct max number of tokens to llama_tokenize
* account for possible leading whitespace that will be added by tokenizer
e.g. '\t' will be tokenized by llama spm tokenizer to [29871, 12]
* use unrolled vec_mad in out_prod
y is vec_mad result vec.
x is vec_mad input vec.
v is vec_mad input scalar.
ggml_vec_mad_f32_unroll will internally loop over x and v with same y.
GGML_VEC_MAD_UNROLL is by default defined to 32.
This value is empirical optimized using performance test runs of out-prod in openllama-3b finetune with 256 context length and batch size 1. It gives 23% performance boost for out_prod.
Full measurements of out-prod runtime in ms:
unroll_xv unroll_yv
1 67014.643 87826.469
2 77117.552 89077.656
4 72091.311 109121.657
8 61077.543 88678.334
16 56914.67 79514.947
24 59024.595 84350.254
28 55952.446 83368.73
32 51476.658 85177.745
36 55973.792 84659.92
40 55139.616 93844.738
48 60736.392 93330.267
64 99856.878 116994.99
Second column is when unrollying yv instead of xv
* set lora_alpha to value of lora_r if it is not set via command line
otherwise only changing lora_r will change scaling of lora adapter used in prediction
* reshuffle original sample order instead of the previous shuffled order
otherwise resumed reshuffle will not result in same sample order
* block tiling for out-prod inspired by mul-mat
block sizes are empirically optimized
roughly doubles the flops of out-prod
* exclude some more known zero values from computations in flash_attn_f32 & flash_attn_back_f32
* add static keywords
* remove outcommented old code
* update train-text-from-scratch with tokenization, sample selection and shuffling from finetune
* remove lbfgs related train parameters
* move common train functions into common/train.[h|cpp]
* move train state into struct train_state
* move train data saving code into callback to unify code of opt_callback
train_params are still different in finetune and train-text-from-scratch, so it can't yet be moved to train.h|cpp
* move common train params into common/train
* move common opt_callback into common/train
* fix consume_common_train_arg
* save and load head_count_kv in lora checkpoints
* increase train_samples by used_samples instead of number of batches
on batch can contain more than one sample when option "fill_with_next_samples" is used
* fix usage of llama_tokenize
* remove static from process_escape since we need it exposed in header
* fix code formating of long function declarations
* fix condition in load_train_state_gguf
* use die("msg") instead of replace GGML_ASSERT(!"msg") or throw std::runtime_error("msg")
* fix saving and loading of training type
* remove terminating '\0' from tokenization
(llama_tokenize is now passed the string length instead of relying on terminating '\0')
* fix compile warnings
* fix compile warnings
* use new/delete for train_state instead of malloc/free
using malloc may result in seg faults when trying to assign string fields
* assert that sample_count > 0, avoiding division by zero
* fix frand to return value in interval [0,1)
* add train option "--sample-random-offsets"
Use samples beginning at random offsets.
The offset is only applied to the first sample in each batch context window.
Together with "--fill-with-next-samples" this may help for training endless text generation.
For example given a dataset containing samples "abcd", "ABCD", "0123".
With context size of 8 and options "--fill-with-next-samples", "--no-separate-with-eos", "--no-separate-with-bos",
the context windows of batches could only be filled with "abcdABCD", "ABCDabcd", "0123abcd", etc.
With "--sample-random-offsets" it can also be filled with "23abcdAB", "bcd0123A", etc.
* deduplicate code into function
* remove n_rot hparam, as it must always be hparam.n_embd_head()
* align code
* assert correct base model tensor shapes
* move some params from lora hparams into model hparams and load model params from gguf
this equalizes the model definition in finetune and text-from-scratch and removes the need for additional llama api functions to get model parameters
* remove now unnecessary llama API functions to get model params that where added by this PR
* train-text-from-scratch: automatically allocate model tensors, remove option '--mem-model N'
* train-text-from-scratch: automatically allocate opt context
* train-text-from-scratch: automatically allocate input tensors
* train-text-from-scratch: automatically allocate compute memory
* remove unused options and equalize train-text-from-scratch with finetune
* initialize opt->loss_after with zero
* add export-lora program
* remove trailing whitespace
* add export-lora build in Makefile
* remove unused struct tensor_info from export-lora
* add export-lora build dependency to llama
because it depends on common, which depends on llama
* update finetune README.md
* cancel optimization when specified number of epochs is completed
* improve handling of export-lora arguments
print errors and warnings when files could not be read or created
* Fix export-lora.cpp "not enough space in the context's memory pool" (#1)
* Fix export-lora.cpp "not enough space in the context's memory pool"
Without this patch, export-lora would sometimes error with "not enough space in the context's memory pool (needed 656784, available 656800)".
* increase required context size by 5*GGML_MEM_ALIGN instead of plain 16
---------
Co-authored-by: xaedes <xaedes@gmail.com>
* improve handling of not yet supported tensor types
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Co-authored-by: meatbag-18a <145869052+meatbag-18a@users.noreply.github.com>
2023-09-28 20:40:11 +02:00
// Get a llama model tensor
LLAMA_API struct ggml_tensor * llama_get_model_tensor ( struct llama_model * model , const char * name ) ;
2024-07-04 15:46:11 +02:00
// Returns true if the model contains an encoder that requires llama_encode() call
LLAMA_API bool llama_model_has_encoder ( const struct llama_model * model ) ;
2024-08-10 11:43:26 +02:00
// Returns true if the model contains a decoder that requires llama_decode() call
LLAMA_API bool llama_model_has_decoder ( const struct llama_model * model ) ;
2024-07-04 15:46:11 +02:00
// For encoder-decoder models, this function returns id of the token that must be provided
// to the decoder to start generating output sequence. For other models, it returns -1.
LLAMA_API llama_token llama_model_decoder_start_token ( const struct llama_model * model ) ;
2024-08-21 23:58:11 +02:00
// Returns true if the model is recurrent (like Mamba, RWKV, etc.)
LLAMA_API bool llama_model_is_recurrent ( const struct llama_model * model ) ;
2023-03-22 06:32:36 +01:00
// Returns 0 on success
2024-01-02 15:15:16 +01:00
LLAMA_API uint32_t llama_model_quantize (
2023-03-22 06:32:36 +01:00
const char * fname_inp ,
const char * fname_out ,
2023-06-10 09:59:17 +02:00
const llama_model_quantize_params * params ) ;
2023-03-22 06:32:36 +01:00
2024-07-15 20:50:47 +02:00
// Load a LoRA adapter from file
// The loaded adapter will be associated to the given model, and will be free when the model is deleted
LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init (
struct llama_model * model ,
const char * path_lora ) ;
// Add a loaded LoRA adapter to given context
// This will not modify model's weight
LLAMA_API int32_t llama_lora_adapter_set (
struct llama_context * ctx ,
struct llama_lora_adapter * adapter ,
float scale ) ;
2024-07-24 11:25:19 +02:00
// Remove a specific LoRA adapter from given context
2024-07-15 20:50:47 +02:00
// Return -1 if the adapter is not present in the context
LLAMA_API int32_t llama_lora_adapter_remove (
struct llama_context * ctx ,
struct llama_lora_adapter * adapter ) ;
2024-07-24 11:25:19 +02:00
// Remove all LoRA adapters from given context
LLAMA_API void llama_lora_adapter_clear (
struct llama_context * ctx ) ;
2024-07-15 20:50:47 +02:00
// Manually free a LoRA adapter
// Note: loaded adapters will be free when the associated model is deleted
LLAMA_API void llama_lora_adapter_free ( struct llama_lora_adapter * adapter ) ;
2024-03-15 21:43:02 +01:00
// Apply a loaded control vector to a llama_context, or if data is NULL, clear
// the currently loaded vector.
// n_embd should be the size of a single layer's control, and data should point
// to an n_embd x n_layers buffer starting from layer 1.
// il_start and il_end are the layer range the vector should apply to (both inclusive)
// See llama_control_vector_load in common to load a control vector.
LLAMA_API int32_t llama_control_vector_apply (
struct llama_context * lctx ,
const float * data ,
size_t len ,
int32_t n_embd ,
int32_t il_start ,
int32_t il_end ) ;
2023-04-17 17:28:55 +02:00
2023-09-28 18:04:36 +02:00
//
// KV cache
//
2023-11-23 18:07:56 +01:00
// Information associated with an individual cell in the KV cache view.
struct llama_kv_cache_view_cell {
// The position for this cell. Takes KV cache shifts into account.
// May be negative if the cell is not populated.
llama_pos pos ;
} ;
// An updateable view of the KV cache.
struct llama_kv_cache_view {
// Number of KV cache cells. This will be the same as the context size.
int32_t n_cells ;
// Maximum number of sequences that can exist in a cell. It's not an error
// if there are more sequences in a cell than this value, however they will
// not be visible in the view cells_sequences.
2024-03-11 16:49:47 +01:00
int32_t n_seq_max ;
2023-11-23 18:07:56 +01:00
// Number of tokens in the cache. For example, if there are two populated
// cells, the first with 1 sequence id in it and the second with 2 sequence
// ids then you'll have 3 tokens.
int32_t token_count ;
// Number of populated cache cells.
int32_t used_cells ;
// Maximum contiguous empty slots in the cache.
int32_t max_contiguous ;
// Index to the start of the max_contiguous slot range. Can be negative
// when cache is full.
int32_t max_contiguous_idx ;
// Information for an individual cell.
struct llama_kv_cache_view_cell * cells ;
2024-03-11 16:49:47 +01:00
// The sequences for each cell. There will be n_seq_max items per cell.
2023-11-23 18:07:56 +01:00
llama_seq_id * cells_sequences ;
} ;
// Create an empty KV cache view. (use only for debugging purposes)
2024-03-11 16:49:47 +01:00
LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init ( const struct llama_context * ctx , int32_t n_seq_max ) ;
2023-11-23 18:07:56 +01:00
// Free a KV cache view. (use only for debugging purposes)
LLAMA_API void llama_kv_cache_view_free ( struct llama_kv_cache_view * view ) ;
// Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
LLAMA_API void llama_kv_cache_view_update ( const struct llama_context * ctx , struct llama_kv_cache_view * view ) ;
// Returns the number of tokens in the KV cache (slow, use only for debug)
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_get_kv_cache_token_count ( const struct llama_context * ctx ) ;
2023-11-23 18:07:56 +01:00
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_get_kv_cache_used_cells ( const struct llama_context * ctx ) ;
2023-04-02 12:23:04 +02:00
ggml : add Flash Attention (#5021)
* ggml : add ggml_flash_attn_ext API
* ggml : fix GQA support in ggml_flash_attn_ext
* ggml : online attention (CPU)
* metal : initial implementation
* metal : f16 precision
* metal : reduce branches
* metal : specialize for head size
* wip : 8 rows per simd group
* wip : 4 rows per simd group
* wip : template for rows per warp
* metal : parallelize across KV size
* metal : parallel reduce across heads
* metal : efficient flash_attn_f16 implementation
* metal : avoid redundant loads of the attention
* metal : scale and mask in matrix form
* metal : fix comment
* llama : avoid ggml_cast, use F32 query
* metal : add parallel reduce version (disabled)
* metal : move output into local memory + optimize
- the result from each simdgroup now stays in the registers
- significantly reduced SRAM usage
- more efficient skipping of -INF blocks
- avoid simdgroup barrier in hot loop
- add comments
* metal : add tests, fix scaling, support C > 32
* metal : improve precision
* ggml : fix f16 mad
* metal : minor
* metal : support Q > 8
* tests : add ATTN tests
* metal : disable buffer allocation logs
* tests : more
* metal : faster inner loop for C == 32
* metal : fix array initialization
* tests : ifdef
* ggml : switch to padded F16 mask for ggml_soft_max, ggml_flash_attn_ext
* ggml : fix ggml_soft_max mask requirement
* cuda : fix soft_max to use correct mask size
* cuda : add flash_attn kernel (wip)
* metal : optimize softmax for C > 32
* metal : optimize softmax
* tests : minor fix
* cuda : avoid zeroing fragments
* tests : update dims
* cuda : fix __hisinf() result check
* cuda : avoid warp_reduce for smax
* cuda : use int instead of int64_t
Noticeably improves performance (thanks to Johannes)
* cuda : make loops use the same loop values
Thanks Johannes again for the tip
* cuda : unroll some of the loops
* cuda : avoid __hisinf branches
* cuda : use half2 in softmax
* cuda : switch to 1 warp for bs > 16
* cuda : speed-up reduce part of the kernel
* cuda : unroll Q*K^T loop
* cuda : fix -INF block check
* cuda : simplify softmax
* cuda : fix matrix names
* cuda : minor
* llama : adapt to F16 KQ_pos
* llama : adapt new models to F16 KQ_mask
* ggml : fix F16 store (ARM NEON)
* llama : fix type of KQ_mask and KQ_pos
* ggml : fix CPU soft_max
* tests : add hs=256
* cuda : fix build
* metal : improve perf via smaller int registers
* cuda : adapt soft_max to F16 mask and pos
* CUDA: faster FlashAttention, kernel for bs == 1
* 16 cols for Phi-2
* no vec for hs, no hs==256 ncols==32 for Volta
* adjust kernel selection logic
* 4 warps, 256 stride for all D
* no ncols == 64
* Multiple parallel blocks for batch size 1
* fix compile warnings
* fix excessive KQ_b loads
* fix cmake build
* fix KV cache padding, NaN from INFINITY (#6438)
* llama : flash_attn cparam + fix defrag
* server: support flash_attn param
* server: bench: enable flash_attn param
* CUDA: refactor host code, dyn. par. blocks
* fix flash_attn_vec_f16 race condition
* flush softmax exp below threshold to 0
* store temp KQ in registers
* Calculate KQ as FP32 if KQV has GGML_PREC_F32
* Add __hgt2_mask implementation for CUDA 11
* fix KQ FP32 precision fpr parallel_blocks > 1
* llama-bench : add -fa,--flash-attn arg
* metal : add BS=1 kernel for flash attention (#6508)
* metal : add BS=1 kernel for flash attention (wip)
* metal : support more than 1 warps
* metal : opts
* metal : opt
* metal : switch to parallel reduce
* metal : reduce registers
* metal : simplify
* metal : initial FA vec kernel
* metal : use F32 attention accumulators
* batched-bench : add fattn arg
* llama : simplify llama_build_kv_store
ggml-ci
* llama : adapt build_olmo to changes
* ggml : fix arm fp16 store on windows
* metal : clean-up
* metal : clean-up kernel code
* metal : minor
* tests : remove benchmarks
ggml-ci
* ggml : fix avx512 const correctness
ggml-ci
* ggml : fix soft_max with bias on CPU
ggml-ci
* common : print --flash-attn in help
* ggml : fix num dimensions in ggml_flash_attn_ext
* llama : force disable flash attention for incompatible models
* ggml : ggml_soft_max support F16/F32 mask/pos
ggml-ci
* cuda : uint -> uint32_t
* cuda : "constexpr dim3" -> "const dim3"
ggml-ci
* cuda : try to fix __hgt2_mask
ggml-ci
* ggml : add TODO's for F16/F32 mask/pos support in other backends
* llama : replace bool need_kq_pos with use_alibi
* llama : prep ALiBi support for BERT models
ggml-ci
* llama : fix n_batch requirements
ggml-ci
* cont
* server : add help for --flash-attn arg
* llama : disable FA for AMD
* tests : remove TMP_ATTN_BENCH
ggml-ci
* llama : support save/load state with FA enabled
ggml-ci
* ci : add CUDA save-load-state tests
ggml-ci
* llama : llama_kv_cache_clear zeroes data + fix save-load seq
ggml-ci
* llama : fix copy-paste errors, add TODO
* llama : disallow incompatible states
* llama : update llama_state_get_size after v_trans field
* metal : remove tmp log
* llama : add static reminder for llama_state_get_size
* metal : fix max nsg
ggml-ci
* ci : fix arg order
ggml-ci
---------
Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
Co-authored-by: Pierrick HYMBERT <pierrick.hymbert@gmail.com>
2024-04-30 11:16:08 +02:00
// Clear the KV cache - both cell info is erased and KV data is zeroed
2023-10-29 18:31:40 +01:00
LLAMA_API void llama_kv_cache_clear (
struct llama_context * ctx ) ;
2023-09-28 18:04:36 +02:00
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-04-08 14:43:30 +02:00
// Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
2023-10-29 18:31:40 +01:00
// seq_id < 0 : match any sequence
// p0 < 0 : [0, p1]
// p1 < 0 : [p0, inf)
llama : support Mamba Selective State Space Models (#5328)
* mamba : begin working on support for Mamba SSM
* mamba : begin figuring out how to (ab)use the kv cache for Mamba
* mamba : recurrent inference almost works, but incoherent
* mamba : recurrent inference WORKS!!!
* convert : optionally use d_conv and d_state from config.json for Mamba
* mamba : refactor recurrent conv, resulting in 20% perf increase
It's still slower than I'd like, but I did not really optimize `ggml_exp` yet.
I also refactored `ggml_exp` to work with tensors with more than 2 dimensions.
* ggml : parallelize ggml_exp
This results in 8% faster token generation for Mamba-130M.
* mamba : simplify the conv step with a self-overlapping view
Turns out the conv_state can be made smaller by one column.
Note that this breaks existing GGUFs of Mamba,
because the key_value_length field is tied to the conv_state size.
Convolution with a self-overlapping view is cool!
And it's much simpler than what I initially thought would be necessary
to make the convolution step work with more than 1 token at a time.
Next step is to make the SSM step work on batches of tokens too,
and thus I need to figure out a way to make a parallel selective scan
which will keep the ssm_state small and won't make it bigger
by a factor of (n_layer * batch_size).
* llama : fix Mamba KV self size wrongly displaying as f16 instead of f32
Relatedly, I also tried to see if other types than f32 worked for the states,
but they don't, because of the operators used.
It's probably better anyway to keep lots of precision there,
since the states are small anyway.
* mamba : fix self-overlapping view depth stride
* mamba : handle batches of more than 1 token
This means running Mamba no longer crashes when using the default settings!
And probably also slightly faster prompt processing.
Both batched and non-batched processing yield the same output.
Previously, the state was not cleared when starting a sequence.
Next step is to make the KV cache API work as expected for Mamba models.
* ggml: add ggml_ssm_scan to help with parallel selective scan
If the selective scan was implemented without a custom operator,
there would be waaay too many nodes in the graph. For example,
for Mamba-130M, with a batch size of 512 (the default),
a naive selective scan could add at least 24*512=12288 nodes,
which is more than LLAMA_MAX_NODES (8192),
and that's only for the smallest Mamba model.
So it's much cleaner with a custom operator.
Not sure about the name, though.
* ggml : in ggml_ssm_scan, merge multiple rows in the same vec operation
This will help with performance on CPU if ggml_vec_mul_f32
and ggml_vec_add_f32 are ever optimized with SIMD.
* mamba : very basic quantization support
Mostly works, but there is currently no difference
between the variants of a k-quant (e.g. Q4_K_S and Q4_K_M are the same).
Most of the SSM-specific weights can be kept in f32 without affecting
the size that much, since they are relatively small.
(the linear projection weights are responsible for most of Mamba's size)
Too much quantization seems to make the state degrade quite fast, and
the model begins to output gibberish.
It seems to affect bigger models to a lesser extent than small models,
but I'm not sure by how much.
Experimentation will be needed to figure out which weights are more important
for the _M (and _L?) variants of k-quants for Mamba.
* convert : fix wrong name for layer norm weight of offical Mamba models
I was using Q-bert/Mamba-* models before, which have a slighlty different
naming scheme for the weights.
(they start with "model.layers" instead of "backbone.layers")
* mamba : fuse more steps of the SSM scan in the ggml_ssm_scan operator
This increases performance on CPU by around 30% for prompt processing,
and by around 20% for text generation.
However, it also makes the ggml_exp and ggml_soft_plus operators unused.
Whether or not they should be kept will be decided later.
* convert : for Mamba, also consider the "MambaLMHeadModel" arch name
It's the name of the class of the official implementation,
though they don't use it (yet) in the "architectures" field of config.json
* mamba : fix vocab size problems with official models
The perplexity was waaaay to high for models with a non-round vocab size.
Not sure why, but it needed to be fixed in the metadata.
Note that this breaks existing GGUF-converted Mamba models,
but **only if** the vocab size was not already rounded.
* ggml : remove ggml_exp and ggml_soft_plus
They did not exist anyway outside of this branch,
and since ggml_ssm_scan fused operations together, they are unused.
It's always possible to bring them back if needed.
* mamba : remove some useless comments
No code change.
* convert : fix flake8 linter errors
* mamba : apply suggestions from code review
* mamba : remove unecessary branch for row-wise ssm_state and C multiplication
It was previously done to avoid permuting when only one token is processed
at a time (like when generating text), but permuting is cheap,
and dynamically changing the compute graph is not future-proof.
* ggml : in ggml_ssm_scan, use more appropriate asserts
* ggml : rename the destination pointer in ggml_compute_forward_ssm_scan_f32
* mamba : multiple sequences, but one at a time
This is a step towards making this Mamba implementation usable
with the server example (the way the system prompt is kept when clearing
the client slots will need to be changed before this can work, though).
The KV cache size for this kind of model is tied to the maximum number
of sequences kept at any single time.
For now, this number is obtained from n_parallel (plus one,
to have an extra sequence to dedicate to the system prompt),
but there might be a better way to do this which won't also
make the main example use 2 cells even if only 1 is really used.
(for this specific case, --parallel 0 helps)
Simultaneous sequence processing will probably require changes to
ggml_ssm_scan, and possibly a new operator for the conv step.
* mamba : support llama_kv_cache_seq_cp
This (mis)uses the logic around K shifts, because tokens in a state
can't be shifted anyway, and because inp_K_shift has the right shape and type.
Using ggml_get_rows is a nice way to do copies, but copy chains can't work.
Fortunately, copy chains don't really seem to be used in the examples.
Each KV cell is dedicated to the sequence ID corresponding to its own index.
* mamba : use a state mask
It's cleaner than the previous heuristic of
checking for the pos of the first token in the batch.
inp_KQ_mask could not be re-used for this, because it has the wrong shape
and because it seems more suited to the next step of
simultaneous sequence processing (helping with the problem of
remembering which token belongs to which sequence(s)/state(s)).
* llama : replace the usage of n_ctx with kv_self.size in many places
* mamba : use n_tokens directly instead of n_tok
* mamba : in comments, properly refer to KV cells instead of slots
* mamba : reduce memory usage of ggml_ssm_scan
From 290.37 MiB to 140.68 MiB of CPU compute buffer size
with Mamba 3B with a batch size of 512.
The result tensor of ggml_ssm_scan was previously a big part
of the CPU compute buffer size. To make it smaller,
it does not contain the intermediate ssm states anymore.
Both y and the last ssm state are combined in the result tensor,
because it seems only a single tensor can be returned by an operator
with the way the graph is built.
* mamba : simultaneous sequence processing
A batch can now contain tokens from multiple sequences.
This is necessary for at least the parallel example, the server example,
and the HellaSwag test in the perplexity example.
However, for this to be useful, uses of llama_kv_cache_seq_rm/cp
will need to be changed to work on whole sequences.
* ggml : add ggml_ssm_conv as a new operator for the conv step of Mamba
This operator makes it possible to use and update the correct states
for each token of the batch in the same way as ggml_ssm_scan.
Other solutions which use existing operators would need loops which would
add too many nodes to the graph (at least the ones I thought of).
Using this operator further reduces the size of the CPU compute buffer
from 140.68 MiB to 103.20 MiB with Mamba 3B with a batch size of 512.
And (at least on CPU), it's a bit faster than before.
Note that "ggml_ssm_conv" is probably not the most appropriate name,
and it could be changed if a better one is found.
* llama : add inp_s_seq as a new input tensor
The most convenient implementation to select the correct state (for Mamba)
for each token is to directly get the correct index from a tensor.
This is why inp_s_seq is storing int32_t and not floats.
The other, less convenient way to select the correct state would be
to have inp_KQ_mask contain 1.0f for each state used by a token
and 0.0f otherwise. This complicates quickly fetching the first used
state of a token, and is also less efficient because a whole row
of the mask would always need to be read for each token.
Using indexes makes it easy to stop searching when there are
no more sequences for a token, and the first sequence assigned
is always very quickly available (it's the first element of each row).
* mamba : support llama_kv_cache_seq_cp copy chains
* mamba : support shifting and dividing the kv cache pos
* mamba : make the server and parallel examples work with whole sequences
A seq_id is dedicated to the system prompt in both cases.
* llama : make llama_kv_cache_seq_rm return whether it succeeded or not
* mamba : dedicate an input tensor for state copy indices
This is cleaner and makes it easier to adapt when/if token positions
(and by extension, inp_K_shift) are no longer integers.
* mamba : adapt perplexity, batched, and batched-bench examples
* perplexity : limit the max number of sequences
This adapts to what the loaded model can provide.
* llama : add llama_n_max_seq to get the upper limit for seq_ids
Used by the perplexity example.
* batched : pass n_parallel to the model's context params
This should have been there already, but it wasn't.
* batched-bench : reserve sequences to support Mamba
* batched-bench : fix tokens being put in wrong sequences
Generation quality isn't what's measured in there anyway,
but at least using the correct sequences avoids using non-consecutive
token positions.
* mamba : stop abusing attention metadata
This breaks existing converted-to-GGUF Mamba models,
but will allow supporting mixed architectures like MambaFormer
without needing to break Mamba models.
This will also allow changing the size of Mamba's states
without having to reconvert models in the future.
(e.g. using something else than d_conv - 1 columns for the conv_states
will not require breaking existing converted Mamba models again)
* gguf-py : add new KV metadata key-value pairs for Mamba
* llama : add new metadata key-value pairs for Mamba
* llama : guard against divisions by zero when n_head is 0
* mamba : rename "unlimited" KV cache property to "recurrent"
* mamba : more correctly update the "used" field of the KV cache
* ggml : in ggml_ssm_scan, use a threshold for soft_plus
This is how the official Mamba implementation does it,
and it's also what torch.nn.Softplus does.
* convert : for Mamba, fallback to internal NeoX tokenizer
The resulting models are exactly the same
as if the tokenizer.json and tokenizer_config.json of GPT-NeoX were there.
* mamba : support state saving and restoring
* ggml : implicitly pass src tensors through dst for Mamba-related ops
* mamba : clarify some comments
* server : fix cache_tokens not getting correctly resized
Otherwise, when the "we have to evaluate at least 1 token" special case
was triggered, an extra token was kept in cache_tokens even if it was
removed from the KV cache.
For Mamba, this caused useless prompt reprocessing when the previous
request triggered the above case.
* convert-hf : support new metadata keys for Mamba
For the models available at
https://huggingface.co/collections/state-spaces/transformers-compatible-mamba-65e7b40ab87e5297e45ae406
* mamba : rename metadata to be more similar to transformers library
This breaks existing converted-to-GGUF models,
but the metadata names are more "standard".
* mamba : support mamba-*-hf models
These models share their token_embd.weight with their output.weight
* mamba : add missing spaces
This is purely a formatting change.
* convert-hf : omit output.weight when identical with token_embd.weight
Only for Mamba for now, but it might be relevant for other models eventually.
Most Mamba models actually share these two tensors, albeit implicitly.
* readme : add Mamba to supported models, and add recent API changes
* mamba : move state_seq and state_mask views outside layer loop
A few tensors were also missing `struct` in front of `ggml_tensor`.
2024-03-08 23:31:00 +01:00
LLAMA_API bool llama_kv_cache_seq_rm (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
llama_seq_id seq_id ,
llama_pos p0 ,
llama_pos p1 ) ;
// Copy all tokens that belong to the specified sequence to another sequence
// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
2023-10-03 20:04:01 +02:00
// p0 < 0 : [0, p1]
// p1 < 0 : [p0, inf)
2023-09-28 18:04:36 +02:00
LLAMA_API void llama_kv_cache_seq_cp (
struct llama_context * ctx ,
llama_seq_id seq_id_src ,
llama_seq_id seq_id_dst ,
llama_pos p0 ,
llama_pos p1 ) ;
// Removes all tokens that do not belong to the specified sequence
LLAMA_API void llama_kv_cache_seq_keep (
struct llama_context * ctx ,
llama_seq_id seq_id ) ;
// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-02-25 21:12:24 +01:00
// If the KV cache is RoPEd, the KV data is updated accordingly:
// - lazily on next llama_decode()
// - explicitly with llama_kv_cache_update()
2023-10-03 20:04:01 +02:00
// p0 < 0 : [0, p1]
// p1 < 0 : [p0, inf)
2024-02-25 21:12:24 +01:00
LLAMA_API void llama_kv_cache_seq_add (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
llama_seq_id seq_id ,
llama_pos p0 ,
llama_pos p1 ,
llama_pos delta ) ;
2024-01-08 10:18:32 +01:00
// Integer division of the positions by factor of `d > 1`
2024-02-25 21:12:24 +01:00
// If the KV cache is RoPEd, the KV data is updated accordingly:
// - lazily on next llama_decode()
// - explicitly with llama_kv_cache_update()
2024-01-08 10:18:32 +01:00
// p0 < 0 : [0, p1]
// p1 < 0 : [p0, inf)
2024-01-08 10:14:04 +01:00
LLAMA_API void llama_kv_cache_seq_div (
struct llama_context * ctx ,
llama_seq_id seq_id ,
llama_pos p0 ,
llama_pos p1 ,
int d ) ;
2024-02-25 21:12:24 +01:00
// Returns the largest position present in the KV cache for the specified sequence
LLAMA_API llama_pos llama_kv_cache_seq_pos_max (
struct llama_context * ctx ,
llama_seq_id seq_id ) ;
// Defragment the KV cache
// This will be applied:
// - lazily on next llama_decode()
// - explicitly with llama_kv_cache_update()
LLAMA_API void llama_kv_cache_defrag ( struct llama_context * ctx ) ;
// Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
LLAMA_API void llama_kv_cache_update ( struct llama_context * ctx ) ;
2024-11-19 12:29:26 +01:00
// Check if the context supports KV cache shifting
LLAMA_API bool llama_kv_cache_can_shift ( struct llama_context * ctx ) ;
2023-09-28 18:04:36 +02:00
//
// State / sessions
//
2023-04-26 22:08:43 +02:00
2024-07-28 06:42:05 +02:00
// Returns the *actual* size in bytes of the state
2024-09-07 14:16:19 +02:00
// (logits, embedding and kv_cache)
2024-07-28 06:42:05 +02:00
// Only use when saving the state, not when restoring it, otherwise the size may be too small.
LLAMA_API size_t llama_state_get_size ( struct llama_context * ctx ) ;
LLAMA_API DEPRECATED ( size_t llama_get_state_size ( struct llama_context * ctx ) ,
2024-04-08 14:43:30 +02:00
" use llama_state_get_size instead " ) ;
2023-04-22 08:21:32 +02:00
// Copies the state to the specified destination address.
// Destination needs to have allocated enough memory.
// Returns the number of bytes copied
2024-04-08 14:43:30 +02:00
LLAMA_API size_t llama_state_get_data (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
2024-07-28 06:42:05 +02:00
uint8_t * dst ,
size_t size ) ;
2024-04-08 14:43:30 +02:00
LLAMA_API DEPRECATED ( size_t llama_copy_state_data (
struct llama_context * ctx ,
uint8_t * dst ) ,
" use llama_state_get_data instead " ) ;
2023-04-22 08:21:32 +02:00
// Set the state reading from the specified address
// Returns the number of bytes read
2024-04-08 14:43:30 +02:00
LLAMA_API size_t llama_state_set_data (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
2024-07-28 06:42:05 +02:00
const uint8_t * src ,
size_t size ) ;
2024-04-08 14:43:30 +02:00
LLAMA_API DEPRECATED ( size_t llama_set_state_data (
struct llama_context * ctx ,
const uint8_t * src ) ,
" use llama_state_set_data instead " ) ;
2023-04-22 08:21:32 +02:00
2023-04-28 17:59:37 +02:00
// Save/load session file
2024-04-08 14:43:30 +02:00
LLAMA_API bool llama_state_load_file (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
const char * path_session ,
llama_token * tokens_out ,
size_t n_token_capacity ,
size_t * n_token_count_out ) ;
2024-04-08 14:43:30 +02:00
LLAMA_API DEPRECATED ( bool llama_load_session_file (
struct llama_context * ctx ,
const char * path_session ,
llama_token * tokens_out ,
size_t n_token_capacity ,
size_t * n_token_count_out ) ,
" use llama_state_load_file instead " ) ;
2023-09-28 18:04:36 +02:00
2024-04-08 14:43:30 +02:00
LLAMA_API bool llama_state_save_file (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
const char * path_session ,
const llama_token * tokens ,
size_t n_token_count ) ;
2024-04-08 14:43:30 +02:00
LLAMA_API DEPRECATED ( bool llama_save_session_file (
struct llama_context * ctx ,
const char * path_session ,
const llama_token * tokens ,
size_t n_token_count ) ,
" use llama_state_save_file instead " ) ;
// Get the exact size needed to copy the KV cache of a single sequence
LLAMA_API size_t llama_state_seq_get_size (
struct llama_context * ctx ,
llama_seq_id seq_id ) ;
// Copy the KV cache of a single sequence into the specified buffer
LLAMA_API size_t llama_state_seq_get_data (
struct llama_context * ctx ,
uint8_t * dst ,
2024-07-28 06:42:05 +02:00
size_t size ,
2024-04-08 14:43:30 +02:00
llama_seq_id seq_id ) ;
// Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
// Returns:
// - Positive: Ok
// - Zero: Failed to load
LLAMA_API size_t llama_state_seq_set_data (
struct llama_context * ctx ,
const uint8_t * src ,
2024-07-28 06:42:05 +02:00
size_t size ,
2024-04-08 14:43:30 +02:00
llama_seq_id dest_seq_id ) ;
LLAMA_API size_t llama_state_seq_save_file (
struct llama_context * ctx ,
const char * filepath ,
llama_seq_id seq_id ,
const llama_token * tokens ,
size_t n_token_count ) ;
LLAMA_API size_t llama_state_seq_load_file (
struct llama_context * ctx ,
const char * filepath ,
llama_seq_id dest_seq_id ,
llama_token * tokens_out ,
size_t n_token_capacity ,
size_t * n_token_count_out ) ;
2023-04-28 17:59:37 +02:00
2023-09-28 18:04:36 +02:00
//
// Decoding
//
2024-10-18 23:18:01 +02:00
// Return batch for single sequence of tokens
// The sequence ID will be fixed to 0
// The position of the tokens will be tracked automatically by llama_decode
2023-09-28 18:04:36 +02:00
//
// NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
//
LLAMA_API struct llama_batch llama_batch_get_one (
llama_token * tokens ,
2024-10-18 23:18:01 +02:00
int32_t n_tokens ) ;
2023-09-28 18:04:36 +02:00
2023-10-18 15:21:57 +02:00
// Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
// Each token can be assigned up to n_seq_max sequence ids
2023-09-28 18:04:36 +02:00
// The batch has to be freed with llama_batch_free()
// If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
// Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
// The rest of the llama_batch members are allocated with size n_tokens
// All members are left uninitialized
LLAMA_API struct llama_batch llama_batch_init (
int32_t n_tokens ,
2023-10-18 15:21:57 +02:00
int32_t embd ,
int32_t n_seq_max ) ;
2023-09-28 18:04:36 +02:00
// Frees a batch of tokens allocated with llama_batch_init()
LLAMA_API void llama_batch_free ( struct llama_batch batch ) ;
2024-07-04 15:46:11 +02:00
// Processes a batch of tokens with the ecoder part of the encoder-decoder model.
// Stores the encoder output internally for later use by the decoder cross-attention layers.
// 0 - success
2024-11-13 19:00:35 +01:00
// < 0 - error. the KV cache state is restored to the state before this call
2024-07-04 15:46:11 +02:00
LLAMA_API int32_t llama_encode (
struct llama_context * ctx ,
struct llama_batch batch ) ;
2023-09-28 18:04:36 +02:00
// Positive return values does not mean a fatal error, but rather a warning.
// 0 - success
// 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
2024-11-13 19:00:35 +01:00
// < 0 - error. the KV cache state is restored to the state before this call
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_decode (
2023-09-28 18:04:36 +02:00
struct llama_context * ctx ,
2023-09-28 21:42:38 +02:00
struct llama_batch batch ) ;
// Set the number of threads used for decoding
// n_threads is the number of threads used for generation (single token)
// n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-30 01:20:53 +02:00
LLAMA_API void llama_set_n_threads ( struct llama_context * ctx , int32_t n_threads , int32_t n_threads_batch ) ;
2023-06-04 22:34:30 +02:00
2024-05-23 14:29:26 +02:00
// Get the number of threads used for generation of a single token.
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-30 01:20:53 +02:00
LLAMA_API int32_t llama_n_threads ( struct llama_context * ctx ) ;
2024-05-23 14:29:26 +02:00
// Get the number of threads used for prompt and batch processing (multiple token).
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-30 01:20:53 +02:00
LLAMA_API int32_t llama_n_threads_batch ( struct llama_context * ctx ) ;
2024-05-23 14:29:26 +02:00
2024-06-23 15:39:45 +02:00
// Set whether the model is in embeddings mode or not
2024-06-21 07:38:22 +02:00
// If true, embeddings will be returned but logits will not
LLAMA_API void llama_set_embeddings ( struct llama_context * ctx , bool embeddings ) ;
2024-03-10 16:56:30 +01:00
// Set whether to use causal attention or not
// If set to true, the model will only attend to the past tokens
LLAMA_API void llama_set_causal_attn ( struct llama_context * ctx , bool causal_attn ) ;
2024-03-02 20:52:25 +01:00
// Set abort callback
LLAMA_API void llama_set_abort_callback ( struct llama_context * ctx , ggml_abort_callback abort_callback , void * abort_callback_data ) ;
2024-03-13 18:54:21 +01:00
// Wait until all computations are finished
// This is automatically done when using one of the functions below to obtain the computation results
// and is not necessary to call it explicitly in most cases
LLAMA_API void llama_synchronize ( struct llama_context * ctx ) ;
2024-03-02 20:52:25 +01:00
// Token logits obtained from the last call to llama_decode()
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// The logits for which llama_batch.logits[i] != 0 are stored contiguously
// in the order they have appeared in the batch.
// Rows: number of tokens for which llama_batch.logits[i] != 0
2023-08-21 22:07:43 +02:00
// Cols: n_vocab
LLAMA_API float * llama_get_logits ( struct llama_context * ctx ) ;
2024-04-08 15:02:30 +02:00
// Logits for the ith token. For positive indices, Equivalent to:
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
2024-04-08 15:02:30 +02:00
// Negative indicies can be used to access logits in reverse order, -1 is the last logit.
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// returns NULL for invalid ids.
2023-09-28 18:04:36 +02:00
LLAMA_API float * llama_get_logits_ith ( struct llama_context * ctx , int32_t i ) ;
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// Get all output token embeddings.
// when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
// the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
// in the order they have appeared in the batch.
// shape: [n_outputs*n_embd]
// Otherwise, returns NULL.
2023-08-21 22:07:43 +02:00
LLAMA_API float * llama_get_embeddings ( struct llama_context * ctx ) ;
2024-04-08 15:02:30 +02:00
// Get the embeddings for the ith token. For positive indices, Equivalent to:
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
2024-04-08 15:02:30 +02:00
// Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
2024-03-04 21:31:20 +01:00
// shape: [n_embd] (1-dimensional)
llama : greatly reduce output buffer memory usage (#6122)
* llama : greatly reduce logits memory usage
* llama : more compact state saving and reloading
* llama : fix lctx.n_outputs not being set before building graph
* perplexity : adapt to the logits API changes
* perplexity : fix Winogrande, use correct logits for second choice start
The first logits used to evaluate the second choice were not from
the end of the common prefix; instead, they were the logits from the end
of the first choice. This has been corrected.
The previous implementation sometimes had outliers in the scores of
choices for some tasks, and the logic to skip choices words
in the log-likelihood evaluation probably was an attempt to reduce those,
but it was complex and didn't quite seem to be the right thing.
This is simpler now, and the outlier scores aren't there anymore.
* perplexity : normalize spaces and punctuation in Winogrande sentences
* llama : fix embedding conditions
* llama : fix llama_get_embeddings_ith when the resulting id is 0
* llama : fix wrong n_outputs in llama_set_inputs
A mismatch happened when using a smaller n_ubatch than n_batch and then using
llama_batch_get_one(). The decision of what n_outputs should be now almost
fully depends on how lctx.n_outputs is set in llama_decode_internal.
The conditions are simpler this way.
* llama : when saving the state, recalculate n_outputs
This ensures the correct number of outputs for the entire previous batch
is stored in the session file, even when n_ubatch is smaller than n_batch.
* llama : fix not-skipping outputs of non-causal models
* llama : fix running a batch with n_outputs == 0
It previously worked because lctx.inp_out_ids was not initialized,
so it pointed to some garbage address which was somehow still valid when I
ran my tests.
* llama : keep same graph topology even when n_outputs == 0
* ggml : saner ggml_can_repeat with empty tensors
* ggml : future-proof ggml_is_empty by using GGML_MAX_DIMS - 1
* ggml : do not multi-thread ops returning empty tensors
* ggml : make ggml_is_empty public and work with views
* llama : use a vector for ctx->output_ids
* llama : rework reallocation logic for llama_output_reserve
Now comparing the actual size with the new total size of the output buffer
to allow more efficient enabling and disabling of the embeddings
and/or logits output in the future.
* ggml : skip empty tensors in all backends
* llama : fix llama_output_reserve nullptr deref when new_size is 0
* perplexity : make Winogrande work as it does on master
The problems with the Winogrande implementation will
need to be fixed in a separate PR to ease review.
* llama : clearer error messages for invalid logits or embeddings ids
* llama : assert all models that can have inp_out_ids
Since the graph topology is now constant, this presence check
can be done even when there are no outputs.
* llama : assert logits and embd buffers exist before writing to them
* llama : handle errors from llama_output_reserve at call sites
* perplexity : make hellaswag and multiple-choice outputs identical to master
Due to how the KV cache is updated, the logprobs for tokens in a batch
are very slightly affected by the other tokens present in the batch,
so to make hellaswag and multiple-choice return exactly the same results
as on master, the last token of each sequence needs to be evaluated
even though its output is not used at all.
This will probably be changed back in the future to make these benchmarks
a tiny bit faster.
* perplexity : fix division by zero when using less than 100 multiple-choice tasks
* llama : allow loading state saved with a different ctx size
When loading a session file, the context size is now only required to be
at least enough to load the KV cells contained in that session file,
instead of requiring to use exactly the same context size as when saving.
Doing this enables the use-case of extending or shrinking the context size
of a saved session.
This breaks existing session files because the meaning of kv_buf_size
is slightly changed (previously it was the size of the whole KV cache,
now it's only the size of the saved part of it). This allows for
finer-grained sanity checks when loading in an effort to keep kv_buf_size
useful even when the kv_size is changed.
* llama : minor
ggml-ci
* readme : update recent API changes, and warn about Vulkan
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-03-26 15:46:41 +01:00
// returns NULL for invalid ids.
2024-02-13 13:06:58 +01:00
LLAMA_API float * llama_get_embeddings_ith ( struct llama_context * ctx , int32_t i ) ;
2024-03-04 21:31:20 +01:00
// Get the embeddings for a sequence id
// Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
2024-09-28 16:42:03 +02:00
// when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence
// otherwise: float[n_embd] (1-dimensional)
2024-03-04 21:31:20 +01:00
LLAMA_API float * llama_get_embeddings_seq ( struct llama_context * ctx , llama_seq_id seq_id ) ;
2023-08-21 22:07:43 +02:00
//
// Vocab
//
2023-10-23 21:40:03 +02:00
LLAMA_API const char * llama_token_get_text ( const struct llama_model * model , llama_token token ) ;
2023-08-21 22:07:43 +02:00
2023-10-23 21:40:03 +02:00
LLAMA_API float llama_token_get_score ( const struct llama_model * model , llama_token token ) ;
2023-08-21 22:07:43 +02:00
2024-06-04 09:17:17 +02:00
LLAMA_API enum llama_token_attr llama_token_get_attr ( const struct llama_model * model , llama_token token ) ;
2023-08-21 22:07:43 +02:00
2024-04-21 13:50:41 +02:00
// Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
LLAMA_API bool llama_token_is_eog ( const struct llama_model * model , llama_token token ) ;
2024-05-25 11:04:03 +02:00
// Identify if Token Id is a control token or a render-able token
LLAMA_API bool llama_token_is_control ( const struct llama_model * model , llama_token token ) ;
2024-04-21 13:50:41 +02:00
2023-08-21 22:07:43 +02:00
// Special tokens
2023-10-23 21:40:03 +02:00
LLAMA_API llama_token llama_token_bos ( const struct llama_model * model ) ; // beginning-of-sentence
LLAMA_API llama_token llama_token_eos ( const struct llama_model * model ) ; // end-of-sentence
2024-10-12 07:21:51 +02:00
LLAMA_API llama_token llama_token_eot ( const struct llama_model * model ) ; // end-of-turn
2024-04-09 19:44:08 +02:00
LLAMA_API llama_token llama_token_cls ( const struct llama_model * model ) ; // classification
LLAMA_API llama_token llama_token_sep ( const struct llama_model * model ) ; // sentence separator
2023-10-23 21:40:03 +02:00
LLAMA_API llama_token llama_token_nl ( const struct llama_model * model ) ; // next-line
2024-06-25 21:14:35 +02:00
LLAMA_API llama_token llama_token_pad ( const struct llama_model * model ) ; // padding
2023-10-23 21:40:03 +02:00
2024-08-15 09:23:23 +02:00
LLAMA_API bool llama_add_bos_token ( const struct llama_model * model ) ;
LLAMA_API bool llama_add_eos_token ( const struct llama_model * model ) ;
2023-11-17 03:14:37 +01:00
2024-10-12 07:21:51 +02:00
// infill tokens
DEPRECATED ( LLAMA_API llama_token llama_token_prefix ( const struct llama_model * model ) , " use llama_token_fim_pre instead " ) ;
DEPRECATED ( LLAMA_API llama_token llama_token_middle ( const struct llama_model * model ) , " use llama_token_fim_mid instead " ) ;
DEPRECATED ( LLAMA_API llama_token llama_token_suffix ( const struct llama_model * model ) , " use llama_token_fim_suf instead " ) ;
LLAMA_API llama_token llama_token_fim_pre ( const struct llama_model * model ) ;
LLAMA_API llama_token llama_token_fim_suf ( const struct llama_model * model ) ;
LLAMA_API llama_token llama_token_fim_mid ( const struct llama_model * model ) ;
LLAMA_API llama_token llama_token_fim_pad ( const struct llama_model * model ) ;
LLAMA_API llama_token llama_token_fim_rep ( const struct llama_model * model ) ;
LLAMA_API llama_token llama_token_fim_sep ( const struct llama_model * model ) ;
2023-08-21 22:07:43 +02:00
//
// Tokenization
//
2024-09-28 14:13:21 +02:00
// The API is thread-safe.
//
2023-08-21 22:07:43 +02:00
2023-10-17 17:11:01 +02:00
/// @details Convert the provided text into tokens.
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
2024-03-11 16:49:47 +01:00
/// @return Returns the number of tokens on success, no more than n_tokens_max
2023-10-17 17:11:01 +02:00
/// @return Returns a negative number on failure - the number of tokens that would have been returned
2024-07-05 19:01:35 +02:00
/// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
2024-04-09 19:44:08 +02:00
/// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
/// as plaintext. Does not insert a leading space.
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_tokenize (
2023-07-14 20:55:24 +02:00
const struct llama_model * model ,
const char * text ,
2024-01-02 15:15:16 +01:00
int32_t text_len ,
2023-07-14 20:55:24 +02:00
llama_token * tokens ,
2024-03-11 16:49:47 +01:00
int32_t n_tokens_max ,
2024-04-09 19:44:08 +02:00
bool add_special ,
bool parse_special ) ;
2023-07-14 20:55:24 +02:00
2023-08-27 13:19:19 +02:00
// Token Id -> Piece.
// Uses the vocabulary in the provided context.
// Does not write null terminator to the buffer.
2024-07-05 19:01:35 +02:00
// User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
2024-04-21 17:36:45 +02:00
// @param special If true, special tokens are rendered in the output.
2024-01-02 15:15:16 +01:00
LLAMA_API int32_t llama_token_to_piece (
2023-07-14 20:55:24 +02:00
const struct llama_model * model ,
2023-08-21 22:07:43 +02:00
llama_token token ,
char * buf ,
2024-04-21 17:36:45 +02:00
int32_t length ,
2024-07-05 19:01:35 +02:00
int32_t lstrip ,
2024-04-21 17:36:45 +02:00
bool special ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
2024-07-05 19:01:35 +02:00
/// @details Convert the provided tokens into text (inverse of llama_tokenize()).
/// @param text The char pointer must be large enough to hold the resulting text.
/// @return Returns the number of chars/bytes on success, no more than text_len_max.
/// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
/// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
/// @param unparse_special If true, special tokens are rendered in the output.
LLAMA_API int32_t llama_detokenize (
const struct llama_model * model ,
const llama_token * tokens ,
int32_t n_tokens ,
char * text ,
int32_t text_len_max ,
bool remove_special ,
bool unparse_special ) ;
2024-07-23 12:10:17 +02:00
//
// Chat templates
//
2024-02-19 09:23:37 +01:00
/// Apply chat template. Inspired by hf apply_chat_template() on python.
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
2024-02-22 00:31:00 +01:00
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
2024-02-19 09:23:37 +01:00
/// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’ s default chat template will be used instead.
/// @param chat Pointer to a list of multiple llama_chat_message
/// @param n_msg Number of llama_chat_message in this chat
/// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
/// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
/// @param length The size of the allocated buffer
/// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
LLAMA_API int32_t llama_chat_apply_template (
const struct llama_model * model ,
const char * tmpl ,
const struct llama_chat_message * chat ,
size_t n_msg ,
bool add_ass ,
char * buf ,
int32_t length ) ;
2024-12-02 22:10:19 +01:00
// Get list of built-in chat templates
2024-12-03 12:54:30 +01:00
LLAMA_API int32_t llama_chat_builtin_templates ( const char * * output , size_t len ) ;
2024-12-02 22:10:19 +01:00
2023-08-21 22:07:43 +02:00
//
2024-09-07 14:16:19 +02:00
// Sampling API
//
// Sample usage:
//
// // prepare the sampling chain at the start
// auto sparams = llama_sampler_chain_default_params();
//
// llama_sampler * smpl = llama_sampler_chain_init(sparams);
//
// llama_sampler_chain_add(smpl, llama_sampler_init_top_k(50));
// llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
// llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.8));
//
// // typically, the chain should end with a sampler such as "greedy", "dist" or "mirostat"
// // this sampler will be responsible to select the actual token
// llama_sampler_chain_add(smpl, llama_sampler_init_dist(seed));
//
// ...
//
// // decoding loop:
// while (...) {
// ...
//
// llama_decode(ctx, batch);
//
// // sample from the logits of the last token in the batch
// const llama_token id = llama_sampler_sample(smpl, ctx, -1);
//
// // accepting the token updates the internal state of certain samplers (e.g. grammar, repetition, etc.)
// llama_sampler_accept(smpl, id);
// ...
// }
//
// llama_sampler_free(smpl);
//
// TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU).
// TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab
2023-07-24 05:58:10 +02:00
//
2023-08-21 22:07:43 +02:00
2024-09-07 14:16:19 +02:00
typedef void * llama_sampler_context_t ;
2024-07-23 12:10:17 +02:00
2024-09-07 14:16:19 +02:00
// user code can implement the interface below in order to create custom llama_sampler
struct llama_sampler_i {
const char * ( * name ) ( const struct llama_sampler * smpl ) ; // can be NULL
void ( * accept ) ( struct llama_sampler * smpl , llama_token token ) ; // can be NULL
void ( * apply ) ( struct llama_sampler * smpl , llama_token_data_array * cur_p ) ; // required
void ( * reset ) ( struct llama_sampler * smpl ) ; // can be NULL
struct llama_sampler * ( * clone ) ( const struct llama_sampler * smpl ) ; // can be NULL if ctx is NULL
void ( * free ) ( struct llama_sampler * smpl ) ; // can be NULL if ctx is NULL
2024-07-23 12:10:17 +02:00
2024-09-07 14:16:19 +02:00
// TODO: API for internal libllama usage for appending the sampling to an existing ggml_cgraph
//void (*apply_ggml) (struct llama_sampler * smpl, ...);
} ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
2024-09-07 14:16:19 +02:00
struct llama_sampler {
struct llama_sampler_i * iface ;
llama_sampler_context_t ctx ;
} ;
2023-09-28 18:04:36 +02:00
2024-09-07 14:16:19 +02:00
// mirror of llama_sampler_i:
LLAMA_API const char * llama_sampler_name ( const struct llama_sampler * smpl ) ;
LLAMA_API void llama_sampler_accept ( struct llama_sampler * smpl , llama_token token ) ;
LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl , llama_token_data_array * cur_p ) ;
LLAMA_API void llama_sampler_reset ( struct llama_sampler * smpl ) ;
LLAMA_API struct llama_sampler * llama_sampler_clone ( const struct llama_sampler * smpl ) ;
// important: do not free if the sampler has been added to a llama_sampler_chain (via llama_sampler_chain_add)
LLAMA_API void llama_sampler_free ( struct llama_sampler * smpl ) ;
// llama_sampler_chain
// a type of llama_sampler that can chain multiple samplers one after another
LLAMA_API struct llama_sampler * llama_sampler_chain_init ( struct llama_sampler_chain_params params ) ;
// important: takes ownership of the sampler object and will free it when llama_sampler_free is called
LLAMA_API void llama_sampler_chain_add ( struct llama_sampler * chain , struct llama_sampler * smpl ) ;
LLAMA_API struct llama_sampler * llama_sampler_chain_get ( const struct llama_sampler * chain , int32_t i ) ;
LLAMA_API int llama_sampler_chain_n ( const struct llama_sampler * chain ) ;
2024-09-13 03:54:49 +02:00
// after removing a sampler, the chain will no longer own it, and it will not be freed when the chain is freed
LLAMA_API struct llama_sampler * llama_sampler_chain_remove ( struct llama_sampler * chain , int32_t i ) ;
2024-09-07 14:16:19 +02:00
// available samplers:
2024-10-21 08:46:40 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_greedy ( void ) ;
LLAMA_API struct llama_sampler * llama_sampler_init_dist ( uint32_t seed ) ;
2024-01-15 14:06:52 +01:00
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
2024-09-24 08:03:17 +02:00
/// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
2024-10-21 08:46:40 +02:00
DEPRECATED ( LLAMA_API struct llama_sampler * llama_sampler_init_softmax ( void ) ,
" will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915) " ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_top_k ( int32_t k ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_top_p ( float p , size_t min_keep ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
2023-10-31 20:44:49 +01:00
/// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_min_p ( float p , size_t min_keep ) ;
2023-10-31 20:44:49 +01:00
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_typical ( float p , size_t min_keep ) ;
2024-10-21 08:46:40 +02:00
/// #details Updates the logits l_i` = l_i/t. When t <= 0.0f, the maximum logit is kept at it's original value, the rest are set to -inf
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_temp ( float t ) ;
2023-09-28 18:04:36 +02:00
2024-09-07 14:16:19 +02:00
/// @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.
LLAMA_API struct llama_sampler * llama_sampler_init_temp_ext ( float t , float delta , float exponent ) ;
2023-09-28 18:04:36 +02:00
2024-10-15 12:54:55 +02:00
/// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
LLAMA_API struct llama_sampler * llama_sampler_init_xtc ( float p , float t , size_t min_keep , uint32_t seed ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat (
int32_t n_vocab ,
uint32_t seed ,
float tau ,
float eta ,
int32_t m ) ;
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 07:34:41 +02:00
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2 (
uint32_t seed ,
float tau ,
float eta ) ;
LLAMA_API struct llama_sampler * llama_sampler_init_grammar (
const struct llama_model * model ,
const char * grammar_str ,
const char * grammar_root ) ;
LLAMA_API struct llama_sampler * llama_sampler_init_penalties (
int32_t n_vocab , // llama_n_vocab()
llama_token special_eos_id , // llama_token_eos()
llama_token linefeed_id , // llama_token_nl()
int32_t penalty_last_n , // last n tokens to penalize (0 = disable penalty, -1 = context size)
float penalty_repeat , // 1.0 = disabled
float penalty_freq , // 0.0 = disabled
float penalty_present , // 0.0 = disabled
bool penalize_nl , // consider newlines as a repeatable token
bool ignore_eos ) ; // ignore the end-of-sequence token
2024-10-25 18:07:34 +02:00
/// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982
LLAMA_API struct llama_sampler * llama_sampler_init_dry (
const struct llama_model * model ,
float dry_multiplier ,
float dry_base ,
int32_t dry_allowed_length ,
int32_t dry_penalty_last_n ,
const char * * seq_breakers ,
size_t num_breakers ) ;
2024-09-07 14:16:19 +02:00
LLAMA_API struct llama_sampler * llama_sampler_init_logit_bias (
int32_t n_vocab ,
int32_t n_logit_bias ,
const llama_logit_bias * logit_bias ) ;
2024-10-15 15:35:33 +02:00
// this sampler is meant to be used for fill-in-the-middle infilling
// it's supposed to be used after top_k + top_p sampling
//
// 1. if the sum of the EOG probs times the number of candidates is higher than the sum of the other probs -> pick EOG
// 2. combine probs of tokens that have the same prefix
//
// example:
//
// - before:
// "hel": 0.5
// "hell": 0.2
// "hello": 0.1
// "dummy": 0.1
//
// - after:
// "hel": 0.8
// "dummy": 0.1
//
// 3. discard non-EOG tokens with low prob
// 4. if no tokens are left -> pick EOT
//
LLAMA_API struct llama_sampler * llama_sampler_init_infill ( const struct llama_model * model ) ;
2024-09-10 18:04:25 +02:00
// Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise
LLAMA_API uint32_t llama_sampler_get_seed ( const struct llama_sampler * smpl ) ;
2024-09-09 17:10:46 +02:00
/// @details Sample and accept a token from the idx-th output of the last evaluation
2024-09-07 14:16:19 +02:00
//
2024-09-09 17:10:46 +02:00
// Shorthand for:
2024-09-07 14:16:19 +02:00
// const auto * logits = llama_get_logits_ith(ctx, idx);
// llama_token_data_array cur_p = { ... init from logits ... };
// llama_sampler_apply(smpl, &cur_p);
2024-09-09 17:10:46 +02:00
// auto token = cur_p.data[cur_p.selected].id;
// llama_sampler_accept(smpl, token);
// return token;
// Returns the sampled token
2024-09-07 14:16:19 +02:00
LLAMA_API llama_token llama_sampler_sample ( struct llama_sampler * smpl , struct llama_context * ctx , int32_t idx ) ;
2023-03-22 06:32:36 +01:00
2024-09-07 14:16:19 +02:00
// TODO: extend in the future
//LLAMA_API void llama_decode_with_sampler(struct llama_context * ctx, struct llama_sampler * smpl, struct llama_batch batch, ...);
2023-03-22 06:32:36 +01:00
2023-08-25 17:18:48 +02:00
//
2024-06-04 20:23:05 +02:00
// Model split
2023-08-25 17:18:48 +02:00
//
2024-03-22 19:00:01 +01:00
/// @details Build a split GGUF final path for this chunk.
/// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
// Returns the split_path length.
LLAMA_API int llama_split_path ( char * split_path , size_t maxlen , const char * path_prefix , int split_no , int split_count ) ;
/// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
/// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
// Returns the split_prefix length.
LLAMA_API int llama_split_prefix ( char * split_prefix , size_t maxlen , const char * split_path , int split_no , int split_count ) ;
2023-03-22 06:32:36 +01:00
// Print system information
LLAMA_API const char * llama_print_system_info ( void ) ;
2023-08-21 22:07:43 +02:00
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
2023-09-27 17:48:33 +02:00
LLAMA_API void llama_log_set ( ggml_log_callback log_callback , void * user_data ) ;
2023-08-21 22:07:43 +02:00
2024-09-07 14:16:19 +02:00
//
// Performance utils
//
// NOTE: Used by llama.cpp examples, avoid using in third-party apps. Instead, do your own performance measurements.
//
2024-07-23 12:10:17 +02:00
2024-09-13 08:53:38 +02:00
struct llama_perf_context_data {
double t_start_ms ;
double t_load_ms ;
double t_p_eval_ms ;
double t_eval_ms ;
int32_t n_p_eval ;
int32_t n_eval ;
2024-09-07 14:16:19 +02:00
} ;
2024-04-04 09:44:28 +02:00
2024-09-13 08:53:38 +02:00
struct llama_perf_sampler_data {
double t_sample_ms ;
int32_t n_sample ;
} ;
LLAMA_API struct llama_perf_context_data llama_perf_context ( const struct llama_context * ctx ) ;
LLAMA_API void llama_perf_context_print ( const struct llama_context * ctx ) ;
LLAMA_API void llama_perf_context_reset ( struct llama_context * ctx ) ;
// NOTE: the following work only with samplers constructed via llama_sampler_chain_init
LLAMA_API struct llama_perf_sampler_data llama_perf_sampler ( const struct llama_sampler * chain ) ;
LLAMA_API void llama_perf_sampler_print ( const struct llama_sampler * chain ) ;
LLAMA_API void llama_perf_sampler_reset ( struct llama_sampler * chain ) ;
2024-04-04 09:44:28 +02:00
2024-09-07 14:16:19 +02:00
# ifdef __cplusplus
}
# endif
2023-04-13 17:04:45 +02:00
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 21:24:37 +02:00
# endif // LLAMA_H