mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
Fix baichuan convert script not detecing model (#3739)
It seems nobody objects.
This commit is contained in:
parent
96981f37b1
commit
6336701c93
@ -110,7 +110,7 @@ print("gguf: loading model "+dir_model.name)
|
|||||||
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
|
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
|
||||||
hparams = json.load(f)
|
hparams = json.load(f)
|
||||||
print("hello print: ",hparams["architectures"][0])
|
print("hello print: ",hparams["architectures"][0])
|
||||||
if hparams["architectures"][0] != "BaichuanForCausalLM":
|
if hparams["architectures"][0] != "BaichuanForCausalLM" and hparams["architectures"][0] != "BaiChuanForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0])
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
Loading…
Reference in New Issue
Block a user