mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
Change default repeat_penalty to 1.0
I feel this penalty is not really helping. Especially for the example from the README it makes results pretty bad
This commit is contained in:
parent
eb34620aec
commit
8f644a0a85
31
utils.h
31
utils.h
@ -13,33 +13,32 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct gpt_params {
|
struct gpt_params {
|
||||||
int32_t seed = -1; // RNG seed
|
int32_t seed = -1; // RNG seed
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_predict = 128; // new tokens to predict
|
int32_t n_predict = 128; // new tokens to predict
|
||||||
int32_t repeat_last_n = 64; // last n tokens to penalize
|
int32_t repeat_last_n = 64; // last n tokens to penalize
|
||||||
int32_t n_ctx = 512; //context size
|
int32_t n_ctx = 512; //context size
|
||||||
bool memory_f16 = false; // use f16 instead of f32 for memory kv
|
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
int32_t top_k = 40;
|
int32_t top_k = 40;
|
||||||
float top_p = 0.95f;
|
float top_p = 0.95f;
|
||||||
float temp = 0.80f;
|
float temp = 0.80f;
|
||||||
float repeat_penalty = 1.30f;
|
float repeat_penalty = 1.10f;
|
||||||
|
|
||||||
int32_t n_batch = 8; // batch size for prompt processing
|
int32_t n_batch = 8; // batch size for prompt processing
|
||||||
|
|
||||||
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
|
|
||||||
bool random_prompt = false;
|
|
||||||
|
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
|
||||||
|
|
||||||
bool interactive = false; // interactive mode
|
|
||||||
bool interactive_start = false; // reverse prompt immediately
|
|
||||||
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
||||||
bool instruct = false; // instruction mode (used for Alpaca models)
|
|
||||||
bool ignore_eos = false; // do not stop generating after eos
|
bool memory_f16 = false; // use f16 instead of f32 for memory kv
|
||||||
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
|
bool interactive = false; // interactive mode
|
||||||
|
bool interactive_start = false; // reverse prompt immediately
|
||||||
|
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||||
|
bool ignore_eos = false; // do not stop generating after eos
|
||||||
};
|
};
|
||||||
|
|
||||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||||
|
Loading…
Reference in New Issue
Block a user