ggml : increase max tensor name + clean up compiler warnings in train-text (#1988)

* Clean up compiler warnings in train-text

Some brackets to disambiguate order of operations

* Increase GGML_MAX_NAME

Avoiding strncpy danger in train-text-from-scratch and reducing potential future name length issues
This commit is contained in:
David Yang 2023-06-27 03:45:32 +08:00 committed by GitHub
parent aa777abbb7
commit eaa6ca5a61
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 18 deletions

View File

@ -294,20 +294,9 @@ void init_model(struct my_llama_model * model) {
ggml_set_name(layer.ffn_norm, (layers_i + ".ffn_norm.weight").c_str()); ggml_set_name(layer.ffn_norm, (layers_i + ".ffn_norm.weight").c_str());
// 'layers.10.feed_forward.w1.weight' has length of 32. ggml_format_name(layer.w1, "%s.feed_forward.w1.weight", layers_i.c_str());
// ggml_tensor->name only has 32 characters, but we need one more for the '\0' terminator. ggml_format_name(layer.w2, "%s.feed_forward.w2.weight", layers_i.c_str());
// ggml_set_name will set the last character to '\0', so we can only store 'layers.10.feed_forward.w1.weigh'. ggml_format_name(layer.w3, "%s.feed_forward.w3.weight", layers_i.c_str());
// when saving llama compatible model the tensors names will miss a character.
// ggml_set_name(layer.w1, (layers_i + ".feed_forward.w1.weight").c_str());
// ggml_set_name(layer.w2, (layers_i + ".feed_forward.w2.weight").c_str());
// ggml_set_name(layer.w3, (layers_i + ".feed_forward.w3.weight").c_str());
strncpy(layer.w1->name, (layers_i + ".feed_forward.w1.weight").c_str(), sizeof(layer.w1->name));
strncpy(layer.w2->name, (layers_i + ".feed_forward.w2.weight").c_str(), sizeof(layer.w2->name));
strncpy(layer.w3->name, (layers_i + ".feed_forward.w3.weight").c_str(), sizeof(layer.w3->name));
layer.w1->padding[0] = 0;
layer.w2->padding[0] = 0;
layer.w3->padding[0] = 0;
} }
} }
@ -2368,7 +2357,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
file->write_u32(0); file->write_u32(0);
file->write_u32(0); file->write_u32(0);
file->write_u32(GGML_TYPE_F32); file->write_u32(GGML_TYPE_F32);
file->seek(0-file->tell() & 31, SEEK_CUR); file->seek((0-file->tell()) & 31, SEEK_CUR);
return; return;
} }
const char * name = ggml_get_name(tensor); const char * name = ggml_get_name(tensor);
@ -2383,7 +2372,7 @@ void write_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
file->write_u32(tensor->type); file->write_u32(tensor->type);
file->write_raw(ne, sizeof(ne[0]) * nd); file->write_raw(ne, sizeof(ne[0]) * nd);
file->write_raw(name, name_len); file->write_raw(name, name_len);
file->seek(0-file->tell() & 31, SEEK_CUR); file->seek((0-file->tell()) & 31, SEEK_CUR);
file->write_raw(tensor->data, ggml_nbytes(tensor)); file->write_raw(tensor->data, ggml_nbytes(tensor));
} }
@ -2404,7 +2393,7 @@ void read_tensor(struct llama_file * file, struct ggml_tensor * tensor) {
std::string name = file->read_string(name_len); std::string name = file->read_string(name_len);
GGML_ASSERT(strncmp(ggml_get_name(tensor), name.c_str(), sizeof(tensor->name)-1) == 0); GGML_ASSERT(strncmp(ggml_get_name(tensor), name.c_str(), sizeof(tensor->name)-1) == 0);
file->seek(0-file->tell() & 31, SEEK_CUR); file->seek((0-file->tell()) & 31, SEEK_CUR);
file->read_raw(tensor->data, ggml_nbytes(tensor)); file->read_raw(tensor->data, ggml_nbytes(tensor));
} }

2
ggml.h
View File

@ -198,7 +198,7 @@
#define GGML_MAX_PARAMS 256 #define GGML_MAX_PARAMS 256
#define GGML_MAX_CONTEXTS 64 #define GGML_MAX_CONTEXTS 64
#define GGML_MAX_OPT 4 #define GGML_MAX_OPT 4
#define GGML_MAX_NAME 32 #define GGML_MAX_NAME 48
#define GGML_DEFAULT_N_THREADS 4 #define GGML_DEFAULT_N_THREADS 4
#define GGML_ASSERT(x) \ #define GGML_ASSERT(x) \