mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
llama : expose model's rope_freq_scale in the API (#3418)
so it can be scaled further before creating a context.
This commit is contained in:
parent
f56e1baec3
commit
48be797ffb
@ -7038,6 +7038,10 @@ int llama_n_embd(const struct llama_model * model) {
|
||||
return model->hparams.n_embd;
|
||||
}
|
||||
|
||||
float llama_rope_freq_scale_train(const struct llama_model * model) {
|
||||
return model->hparams.rope_freq_scale_train;
|
||||
}
|
||||
|
||||
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
|
||||
return snprintf(buf, buf_size, "%s %s %s",
|
||||
llama_model_arch_name(model->arch).c_str(),
|
||||
|
3
llama.h
3
llama.h
@ -282,6 +282,9 @@ extern "C" {
|
||||
LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
|
||||
LLAMA_API int llama_n_embd (const struct llama_model * model);
|
||||
|
||||
// Get the model's RoPE frequency scaling factor
|
||||
LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
|
||||
|
||||
// Get a string describing the model type
|
||||
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user