mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
Fix a bug in the rope calculation
This commit is contained in:
parent
18ebda34d6
commit
70bc0b8b15
@ -73,7 +73,7 @@ fout.write(struct.pack("i", hparams["dim"]))
|
||||
fout.write(struct.pack("i", hparams["multiple_of"]))
|
||||
fout.write(struct.pack("i", hparams["n_heads"]))
|
||||
fout.write(struct.pack("i", hparams["n_layers"]))
|
||||
fout.write(struct.pack("i", 64)) # rot
|
||||
fout.write(struct.pack("i", hparams["dim"] // hparams["n_heads"])) # rot (obsolete)
|
||||
fout.write(struct.pack("i", ftype))
|
||||
|
||||
# Is this correct??
|
||||
|
8
main.cpp
8
main.cpp
@ -400,7 +400,7 @@ bool llama_eval(
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_head = hparams.n_head;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
const int n_rot = hparams.n_rot;
|
||||
const int n_rot = hparams.n_embd/hparams.n_head;
|
||||
|
||||
const int d_key = n_embd/n_head;
|
||||
|
||||
@ -628,6 +628,9 @@ int main(int argc, char ** argv) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
// params.prompt = R"(// this function checks if the number n is prime
|
||||
//bool is_prime(int n) {)";
|
||||
|
||||
int64_t t_load_us = 0;
|
||||
|
||||
gpt_vocab vocab;
|
||||
@ -691,7 +694,6 @@ int main(int argc, char ** argv) {
|
||||
|
||||
if (i >= embd_inp.size()) {
|
||||
// sample next token
|
||||
const int top_k = params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float temp = params.temp;
|
||||
|
||||
@ -702,7 +704,7 @@ int main(int argc, char ** argv) {
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
|
||||
id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||
id = llama_sample_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_p, temp, rng);
|
||||
|
||||
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
|
79
utils.cpp
79
utils.cpp
@ -257,7 +257,7 @@ std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::st
|
||||
}
|
||||
}
|
||||
|
||||
if (l == 0 && t != 13) {
|
||||
if (l == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -367,6 +367,83 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
return logits_id[idx].second;
|
||||
}
|
||||
|
||||
gpt_vocab::id llama_sample_top_p(
|
||||
const gpt_vocab & vocab,
|
||||
const float * logits,
|
||||
double top_p,
|
||||
double temp,
|
||||
std::mt19937 & rng) {
|
||||
int n_logits = vocab.id_to_token.size();
|
||||
|
||||
std::vector<std::pair<double, gpt_vocab::id>> logits_id;
|
||||
logits_id.reserve(n_logits);
|
||||
|
||||
{
|
||||
const double scale = 1.0/temp;
|
||||
for (int i = 0; i < n_logits; ++i) {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale, i));
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(
|
||||
logits_id.begin(),
|
||||
logits_id.end(),
|
||||
[](const std::pair<double, gpt_vocab::id> & a, const std::pair<double, gpt_vocab::id> & b) {
|
||||
return a.first > b.first;
|
||||
});
|
||||
|
||||
double maxl = -INFINITY;
|
||||
for (const auto & kv : logits_id) {
|
||||
maxl = std::max(maxl, kv.first);
|
||||
}
|
||||
|
||||
// compute probs for the top K tokens
|
||||
std::vector<double> probs;
|
||||
probs.reserve(logits_id.size());
|
||||
|
||||
double sum = 0.0;
|
||||
for (const auto & kv : logits_id) {
|
||||
double p = exp(kv.first - maxl);
|
||||
probs.push_back(p);
|
||||
sum += p;
|
||||
}
|
||||
|
||||
// normalize the probs
|
||||
for (auto & p : probs) {
|
||||
p /= sum;
|
||||
}
|
||||
|
||||
if (top_p < 1.0f) {
|
||||
double cumsum = 0.0f;
|
||||
for (int i = 0; i < (int) probs.size(); i++) {
|
||||
cumsum += probs[i];
|
||||
if (cumsum >= top_p) {
|
||||
probs.resize(i + 1);
|
||||
logits_id.resize(i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cumsum = 1.0/cumsum;
|
||||
for (int i = 0; i < (int) probs.size(); i++) {
|
||||
probs[i] *= cumsum;
|
||||
}
|
||||
}
|
||||
|
||||
//printf("\n");
|
||||
//for (int i = 0; i < (int) 10; i++) {
|
||||
// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
|
||||
//}
|
||||
//printf("\n\n");
|
||||
//exit(0);
|
||||
|
||||
std::discrete_distribution<> dist(probs.begin(), probs.end());
|
||||
int idx = dist(rng);
|
||||
|
||||
return logits_id[idx].second;
|
||||
}
|
||||
|
||||
|
||||
size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist) {
|
||||
const int nb = k / qk;
|
||||
const size_t row_size = nb*(sizeof(float) + sizeof(uint8_t)*qk/2);
|
||||
|
9
utils.h
9
utils.h
@ -18,7 +18,7 @@ struct gpt_params {
|
||||
int32_t n_predict = 128; // new tokens to predict
|
||||
|
||||
// sampling parameters
|
||||
int32_t top_k = 40;
|
||||
int32_t top_k = 40; // unused
|
||||
float top_p = 0.95f;
|
||||
float temp = 0.80f;
|
||||
|
||||
@ -86,6 +86,13 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
double temp,
|
||||
std::mt19937 & rng);
|
||||
|
||||
gpt_vocab::id llama_sample_top_p(
|
||||
const gpt_vocab & vocab,
|
||||
const float * logits,
|
||||
double top_p,
|
||||
double temp,
|
||||
std::mt19937 & rng);
|
||||
|
||||
//
|
||||
// Quantization
|
||||
//
|
||||
|
Loading…
Reference in New Issue
Block a user