gguf : start implementing gguf_file_saver (WIP)

This commit is contained in:
M. Yusuf Sarıgöz 2023-08-11 09:52:01 +03:00
parent f316b94c7c
commit e7d346c37c
2 changed files with 57 additions and 0 deletions

View File

@ -626,20 +626,32 @@ struct gguf_file_saver {
: file(fname, "wb"), any_file_loader(any_file_loader) { : file(fname, "wb"), any_file_loader(any_file_loader) {
fprintf(stderr, "llama.cpp: saving model to %s\n", fname); fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
write_magic(); write_magic();
write_version();
write_hparams(new_ftype); write_hparams(new_ftype);
write_vocab(); write_vocab();
} }
void write_magic() { void write_magic() {
const int32_t magic = GGUF_MAGIC;
file.write_i32(magic);
} }
void write_version() {
const int32_t version = GGUF_VERSION;
file.write_i32(version);
}
void write_hparams(enum llama_ftype new_ftype) { void write_hparams(enum llama_ftype new_ftype) {
const llama_hparams & hparams = any_file_loader->hparams; const llama_hparams & hparams = any_file_loader->hparams;
GGML_UNUSED(hparams); GGML_UNUSED(hparams);
GGML_UNUSED(new_ftype); GGML_UNUSED(new_ftype);
} }
void write_vocab() { void write_vocab() {
uint32_t n_vocab = any_file_loader->hparams.n_vocab; uint32_t n_vocab = any_file_loader->hparams.n_vocab;
GGML_UNUSED(n_vocab); GGML_UNUSED(n_vocab);
} }
void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
switch (new_type) { switch (new_type) {
case GGML_TYPE_F32: case GGML_TYPE_F32:

View File

@ -15,6 +15,7 @@
#include <climits> #include <climits>
#include <string> #include <string>
#include <sstream>
#include <vector> #include <vector>
#include <stdexcept> #include <stdexcept>
@ -61,6 +62,14 @@ static std::string format(const char * fmt, ...) {
return std::string(buf.data(), size); return std::string(buf.data(), size);
} }
template<typename T>
static std::string to_string(const T & val) {
std::stringstream ss;
ss << val;
return ss.str();
}
// TODO: can we merge this one and gguf_context? // TODO: can we merge this one and gguf_context?
struct gguf_file { struct gguf_file {
// use FILE * so we don't have to re-open the file to mmap // use FILE * so we don't have to re-open the file to mmap
@ -95,6 +104,42 @@ struct gguf_file {
#endif #endif
GGML_ASSERT(ret == 0); // same GGML_ASSERT(ret == 0); // same
} }
void write_str(const std::string & val) {
const int32_t n = val.size();
fwrite((const char *) &n, sizeof(n), 1, fp);
fwrite(val.c_str(), n, 1, fp);
}
void write_i32(int32_t val) {
fwrite((const char *) &val, sizeof(val), 1, fp);
}
void write_u64(size_t val) {
fwrite((const char *) &val, sizeof(val), 1, fp);
}
template<typename T>
void write_val(const std::string & key, enum gguf_type type, const T & val) {
write_str(key);
fwrite((const char *) &type, sizeof(type), 1, fp);
fwrite((const char *) &val, sizeof(val), 1, fp);
}
template<typename T>
void write_arr(const std::string & key, enum gguf_type type, const std::vector<T> & val) {
write_str(key);
{
const enum gguf_type tarr = GGUF_TYPE_ARRAY;
fwrite((const char *) &tarr, sizeof(tarr), 1, fp);
}
const int32_t n = val.size();
fwrite((const char *) &type, sizeof(type), 1, fp);
fwrite((const char *) &n, sizeof(n), 1, fp);
fwrite(val.data(), sizeof(T), n, fp);
}
}; };
#if defined(_WIN32) #if defined(_WIN32)