mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
convert.py : fix llama/llama2 conversion due to vocab_size=-1 (#4258)
This commit is contained in:
parent
954e22858c
commit
f4d973cecb
@ -267,7 +267,7 @@ class Params:
|
||||
n_ctx = 2048
|
||||
|
||||
return Params(
|
||||
n_vocab = config.get("vocab_size", model["tok_embeddings.weight"].shape[0]),
|
||||
n_vocab = model["tok_embeddings.weight"].shape[0],
|
||||
n_embd = config["dim"],
|
||||
n_layer = config["n_layers"],
|
||||
n_ctx = n_ctx,
|
||||
|
Loading…
Reference in New Issue
Block a user