mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 14:20:31 +01:00
minor : spacing
This commit is contained in:
parent
fc4c2a6fc3
commit
9c5fd6be14
@ -111,14 +111,14 @@ static void usage(const char * executable) {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
static void load_imatrix(const std::string & imatrix_file, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||||
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
||||||
if (!in) {
|
if (!in) {
|
||||||
printf("%s: failed to open %s\n",__func__,imatrix_file.c_str());
|
printf("%s: failed to open %s\n",__func__, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int n_entries;
|
int n_entries;
|
||||||
in.read((char*)&n_entries, sizeof(n_entries));
|
in.read((char *)&n_entries, sizeof(n_entries));
|
||||||
if (in.fail() || n_entries < 1) {
|
if (in.fail() || n_entries < 1) {
|
||||||
printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
|
printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
@ -128,25 +128,25 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
|||||||
std::vector<char> name_as_vec(len+1);
|
std::vector<char> name_as_vec(len+1);
|
||||||
in.read((char *)name_as_vec.data(), len);
|
in.read((char *)name_as_vec.data(), len);
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
printf("%s: failed reading name for entry %d from %s\n",__func__,i+1,imatrix_file.c_str());
|
printf("%s: failed reading name for entry %d from %s\n", __func__, i+1, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
name_as_vec[len] = 0;
|
name_as_vec[len] = 0;
|
||||||
std::string name{name_as_vec.data()};
|
std::string name{name_as_vec.data()};
|
||||||
auto& e = imatrix_data[std::move(name)];
|
auto & e = imatrix_data[std::move(name)];
|
||||||
int ncall;
|
int ncall;
|
||||||
in.read((char*)&ncall, sizeof(ncall));
|
in.read((char *)&ncall, sizeof(ncall));
|
||||||
int nval;
|
int nval;
|
||||||
in.read((char *)&nval, sizeof(nval));
|
in.read((char *)&nval, sizeof(nval));
|
||||||
if (in.fail() || nval < 1) {
|
if (in.fail() || nval < 1) {
|
||||||
printf("%s: failed reading number of values for entry %d\n",__func__,i);
|
printf("%s: failed reading number of values for entry %d\n", __func__, i);
|
||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
e.resize(nval);
|
e.resize(nval);
|
||||||
in.read((char*)e.data(), nval*sizeof(float));
|
in.read((char *)e.data(), nval*sizeof(float));
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
printf("%s: failed reading data for entry %d\n",__func__,i);
|
printf("%s: failed reading data for entry %d\n", __func__, i);
|
||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -154,13 +154,13 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
|||||||
for (auto& v : e) v /= ncall;
|
for (auto& v : e) v /= ncall;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("%s: loaded %d importance matrix entries from %s\n",__func__,int(imatrix_data.size()),imatrix_file.c_str());
|
printf("%s: loaded %d importance matrix entries from %s\n", __func__, int(imatrix_data.size()), imatrix_file.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prepare_imatrix(const std::string& imatrix_file,
|
static void prepare_imatrix(const std::string & imatrix_file,
|
||||||
const std::vector<std::string>& included_weights,
|
const std::vector<std::string> & included_weights,
|
||||||
const std::vector<std::string>& excluded_weights,
|
const std::vector<std::string> & excluded_weights,
|
||||||
std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||||
if (!imatrix_file.empty()) {
|
if (!imatrix_file.empty()) {
|
||||||
load_imatrix(imatrix_file, imatrix_data);
|
load_imatrix(imatrix_file, imatrix_data);
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ static ggml_type parse_ggml_type(const char * arg) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override>& overrides) {
|
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
|
||||||
const char* sep = strchr(data, '=');
|
const char* sep = strchr(data, '=');
|
||||||
if (sep == nullptr || sep - data >= 128) {
|
if (sep == nullptr || sep - data >= 128) {
|
||||||
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
||||||
@ -219,27 +219,22 @@ static bool parse_kv_override(const char * data, std::vector<llama_model_kv_over
|
|||||||
sep += 4;
|
sep += 4;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||||
kvo.int_value = std::atol(sep);
|
kvo.int_value = std::atol(sep);
|
||||||
}
|
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||||
else if (strncmp(sep, "float:", 6) == 0) {
|
|
||||||
sep += 6;
|
sep += 6;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||||
kvo.float_value = std::atof(sep);
|
kvo.float_value = std::atof(sep);
|
||||||
}
|
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||||
else if (strncmp(sep, "bool:", 5) == 0) {
|
|
||||||
sep += 5;
|
sep += 5;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||||
if (std::strcmp(sep, "true") == 0) {
|
if (std::strcmp(sep, "true") == 0) {
|
||||||
kvo.bool_value = true;
|
kvo.bool_value = true;
|
||||||
}
|
} else if (std::strcmp(sep, "false") == 0) {
|
||||||
else if (std::strcmp(sep, "false") == 0) {
|
|
||||||
kvo.bool_value = false;
|
kvo.bool_value = false;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -344,8 +339,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (ftype_str == "COPY") {
|
if (ftype_str == "COPY") {
|
||||||
params.only_copy = true;
|
params.only_copy = true;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fname_out = argv[arg_idx];
|
fname_out = argv[arg_idx];
|
||||||
arg_idx++;
|
arg_idx++;
|
||||||
|
|
||||||
|
28
llama.cpp
28
llama.cpp
@ -12809,20 +12809,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
gguf_set_kv (ctx_out, ml.meta);
|
gguf_set_kv (ctx_out, ml.meta);
|
||||||
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
||||||
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
|
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
|
||||||
|
|
||||||
if (params->kv_overrides) {
|
if (params->kv_overrides) {
|
||||||
const std::vector<llama_model_kv_override>& overrides = *(const std::vector<llama_model_kv_override>*)params->kv_overrides;
|
const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
|
||||||
for (auto& o : overrides) {
|
for (auto & o : overrides) {
|
||||||
if (o.key[0] == 0) break;
|
if (o.key[0] == 0) break;
|
||||||
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
||||||
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
||||||
}
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
|
||||||
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
||||||
}
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
|
||||||
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -12836,21 +12834,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||||
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
|
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
|
||||||
++qs.n_attention_wv;
|
++qs.n_attention_wv;
|
||||||
}
|
} else if (name.find("ffn_down") != std::string::npos) {
|
||||||
else if (name.find("ffn_down") != std::string::npos) {
|
|
||||||
++qs.n_ffn_down;
|
++qs.n_ffn_down;
|
||||||
}
|
} else if (name.find("ffn_gate") != std::string::npos) {
|
||||||
else if (name.find("ffn_gate") != std::string::npos) {
|
|
||||||
++qs.n_ffn_gate;
|
++qs.n_ffn_gate;
|
||||||
}
|
} else if (name.find("ffn_up") != std::string::npos) {
|
||||||
else if (name.find("ffn_up") != std::string::npos) {
|
|
||||||
++qs.n_ffn_up;
|
++qs.n_ffn_up;
|
||||||
}
|
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
||||||
else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
|
||||||
qs.has_output = true;
|
qs.has_output = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
|
if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t) qs.n_attention_wv != model.hparams.n_layer) {
|
||||||
LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n",
|
LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n",
|
||||||
__func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer);
|
__func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user