mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 13:58:46 +01:00
use weights_only in conversion script (#32)
this restricts malicious weights from executing arbitrary code by restricting the unpickler to only loading tensors, primitive types, and dictionaries
This commit is contained in:
parent
6a9a67f0be
commit
a93120236f
@ -86,7 +86,7 @@ for p in range(n_parts):
|
|||||||
if (p > 0):
|
if (p > 0):
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p)
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p)
|
||||||
|
|
||||||
model = torch.load(fname_model, map_location="cpu")
|
model = torch.load(fname_model, map_location="cpu", weights_only=True)
|
||||||
|
|
||||||
fout = open(fname_out, "wb")
|
fout = open(fname_out, "wb")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user