minor : spacing

This commit is contained in:
Georgi Gerganov 2024-03-22 15:24:57 +02:00 committed by GitHub
parent 2605c139a6
commit 12aa74ba7d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -225,6 +225,7 @@ struct my_llama_hparams {
uint32_t n_head_kv = 32; uint32_t n_head_kv = 32;
uint32_t n_layer = 32; uint32_t n_layer = 32;
uint32_t n_rot = 64; uint32_t n_rot = 64;
bool operator!=(const my_llama_hparams& other) const { bool operator!=(const my_llama_hparams& other) const {
return memcmp(this, &other, sizeof(my_llama_hparams)); return memcmp(this, &other, sizeof(my_llama_hparams));
} }
@ -643,6 +644,7 @@ static void save_as_llama_model(
// for rms-att-weight // for rms-att-weight
int row_length = model->hparams.n_embd; int row_length = model->hparams.n_embd;
int n_ff = model->hparams.n_ff; int n_ff = model->hparams.n_ff;
const uint32_t n_multiqueries = model->hparams.n_head_kv <= 0 || model->hparams.n_head_kv >= model->hparams.n_head ? 1 : model->hparams.n_head / model->hparams.n_head_kv; const uint32_t n_multiqueries = model->hparams.n_head_kv <= 0 || model->hparams.n_head_kv >= model->hparams.n_head ? 1 : model->hparams.n_head / model->hparams.n_head_kv;
for (uint32_t i = 0; i < model->hparams.n_layer; ++i){ for (uint32_t i = 0; i < model->hparams.n_layer; ++i){
@ -879,15 +881,24 @@ int main(int argc, char ** argv) {
{ {
LOG("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model); LOG("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
FILE *file = fopen(params.fn_llama2c_model, "r"); FILE *file = fopen(params.fn_llama2c_model, "r");
if (!file) { LOG("%s: Unable to open the checkpoint file %s!\n",__func__,params.fn_llama2c_model); return 1; } if (!file) {
LOG("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
return 1;
}
// read in the config header // read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) { LOG("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model); return 1; } if (fread(&config, sizeof(Config), 1, file) != 1) {
LOG("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
return 1;
}
auto shared_weights = config.vocab_size > 0; auto shared_weights = config.vocab_size > 0;
config.vocab_size = abs(config.vocab_size); config.vocab_size = abs(config.vocab_size);
// read in the Transformer weights // read in the Transformer weights
alloc_weights(&weights, &config, shared_weights); alloc_weights(&weights, &config, shared_weights);
if(checkpoint_init_weights(&weights, &config, file, shared_weights)) { LOG("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model); return 1; } if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
LOG("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
return 1;
}
fclose(file); fclose(file);
} }
@ -904,7 +915,9 @@ int main(int argc, char ** argv) {
model.hparams.n_head_kv = config.n_kv_heads; model.hparams.n_head_kv = config.n_kv_heads;
model.hparams.n_layer = config.n_layers; //params.n_layer; model.hparams.n_layer = config.n_layers; //params.n_layer;
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head); model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head);
print_params(&model.hparams); print_params(&model.hparams);
struct ggml_init_params lcparams; struct ggml_init_params lcparams;
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb); lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
lcparams.mem_buffer = NULL; lcparams.mem_buffer = NULL;