mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-07 11:23:56 +01:00
gguf : rm redundant method
This commit is contained in:
parent
7009cf581c
commit
f44bbd3d88
@ -626,7 +626,6 @@ struct gguf_file_saver {
|
|||||||
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
|
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
|
||||||
write_header();
|
write_header();
|
||||||
write_hparams(new_ftype);
|
write_hparams(new_ftype);
|
||||||
write_vocab();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: probably it's better to move these to gguf_file
|
// TODO: probably it's better to move these to gguf_file
|
||||||
@ -744,10 +743,6 @@ struct gguf_file_saver {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void write_vocab() {
|
|
||||||
uint32_t n_vocab = fl->hparams.n_vocab;
|
|
||||||
GGML_UNUSED(n_vocab);
|
|
||||||
}
|
|
||||||
|
|
||||||
void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
|
void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
|
||||||
GGML_UNUSED(tensor);
|
GGML_UNUSED(tensor);
|
||||||
|
Loading…
Reference in New Issue
Block a user