mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-02-05 08:00:42 +01:00
minicpm working without uhd
This commit is contained in:
parent
c0d93dd509
commit
8586d23c8a
@ -2339,6 +2339,7 @@ class MiniCPMVModel(Qwen2Model):
|
||||
model_arch = gguf.MODEL_ARCH.QWEN2
|
||||
proj_type: gguf.constants.CLIPProjectorType | None
|
||||
resampler_n_embd = 0
|
||||
tok_embd_tensor: Tensor | None = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
@ -2361,6 +2362,8 @@ class MiniCPMVModel(Qwen2Model):
|
||||
for tname, tensor in self.get_tensors():
|
||||
if tname == "resampler.ln_post.bias":
|
||||
self.resampler_n_embd = tensor.shape[0]
|
||||
if tname.endswith("embed_tokens.weight"):
|
||||
self.tok_embd_tensor = tensor
|
||||
if self.resampler_n_embd < 2:
|
||||
raise ValueError("Failed to detect resampler embedding size")
|
||||
else:
|
||||
@ -2372,6 +2375,16 @@ class MiniCPMVModel(Qwen2Model):
|
||||
self.hparams["vision_feature_layer"] = 0
|
||||
self.v_tensor_map = gguf.get_tensor_name_map(self.vision_arch, self.vparams["num_hidden_layers"])
|
||||
|
||||
def get_embd_of_tokens(self, map_token_to_tensor_name: Iterable[tuple[str, str]]) -> Iterable[tuple[str, Tensor]]:
|
||||
if self.tok_embd_tensor is None:
|
||||
raise ValueError("Token embedding tensor not found")
|
||||
from transformers import AutoTokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.dir_model, trust_remote_code=True)
|
||||
for token, tensor_name in map_token_to_tensor_name:
|
||||
tok_id = tokenizer.get_vocab()[token]
|
||||
row = self.tok_embd_tensor[tok_id]
|
||||
yield tensor_name, row
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
# For vision model
|
||||
@ -2388,6 +2401,14 @@ class MiniCPMVModel(Qwen2Model):
|
||||
self.format_tensor_name(gguf.MODEL_TENSOR.V_RESMPL_POS_EMBD_K, is_vision=True),
|
||||
torch.from_numpy(self._get_2d_sincos_pos_embed(self.resampler_n_embd, (70, 70)))
|
||||
)
|
||||
added_tokens = [
|
||||
("", gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_END_IMAGE] + ".weight"),
|
||||
("<slice>", gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_SLICE ] + ".weight"),
|
||||
("</slice>", gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_END_SLICE] + ".weight"),
|
||||
]
|
||||
for tensor_name, tensor in self.get_embd_of_tokens(added_tokens):
|
||||
yield tensor_name, tensor
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
del bid # unused
|
||||
@ -2404,6 +2425,7 @@ class MiniCPMVModel(Qwen2Model):
|
||||
name_k = name.replace("in_proj_", "in_proj_k.") # in_proj_k.(weight|bias)
|
||||
name_v = name.replace("in_proj_", "in_proj_v.") # in_proj_v.(weight|bias)
|
||||
return [
|
||||
# TODO: permute these
|
||||
(self.map_tensor_name(name_q), split_tensor[0]),
|
||||
(self.map_tensor_name(name_k), split_tensor[1]),
|
||||
(self.map_tensor_name(name_v), split_tensor[2]),
|
||||
@ -2413,6 +2435,9 @@ class MiniCPMVModel(Qwen2Model):
|
||||
if name == "resampler.proj" or name == "resampler.query":
|
||||
name += ".weight"
|
||||
|
||||
if name.startswith("resampler.proj"):
|
||||
data_torch = data_torch.transpose(-1, -2).contiguous()
|
||||
|
||||
if "post_layernorm" in name:
|
||||
return [] # skip post_layernorm
|
||||
|
||||
|
@ -100,7 +100,7 @@ int main(int argc, char ** argv) {
|
||||
// default prompt for llava 1.5
|
||||
//params.prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:<img_placement>\nwhat did you see?\nASSISTANT:";
|
||||
// default prompt for minicpmv 2.6
|
||||
params.prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nwhat did you see?\n<|im_end|>\n<|im_start|>assistant\n";
|
||||
params.prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nwhat did you see?\n<img_placement><|im_end|>\n<|im_start|>assistant\n";
|
||||
params.n_predict = 64;
|
||||
params.n_batch = 2048;
|
||||
params.n_ubatch = 1024;
|
||||
|
@ -467,6 +467,10 @@ class MODEL_TENSOR(IntEnum):
|
||||
V_RESMPL_Q_NORM = auto() # minicpmv
|
||||
V_RESMPL_PROJ = auto() # minicpmv
|
||||
V_RESMPL_QUERY = auto() # minicpmv
|
||||
V_TOK_EMBD_IMAGE = auto() # embedding for  token
|
||||
V_TOK_EMBD_SLICE = auto() # embedding for <slice> token
|
||||
V_TOK_EMBD_END_SLICE = auto() # embedding for </slice> token
|
||||
|
||||
|
||||
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||
@ -686,6 +690,10 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||
MODEL_TENSOR.V_RESMPL_Q_NORM: "v.resmpl.q_norm",
|
||||
MODEL_TENSOR.V_RESMPL_PROJ: "v.resmpl.proj",
|
||||
MODEL_TENSOR.V_RESMPL_QUERY: "v.resmpl.query",
|
||||
MODEL_TENSOR.V_TOK_EMBD_IMAGE: "v.tok_embd.image",
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_IMAGE: "v.tok_embd.end_image",
|
||||
MODEL_TENSOR.V_TOK_EMBD_SLICE: "v.tok_embd.slice",
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_SLICE: "v.tok_embd.end_slice",
|
||||
}
|
||||
|
||||
MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
@ -1682,6 +1690,10 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||
MODEL_TENSOR.V_RESMPL_Q_NORM,
|
||||
MODEL_TENSOR.V_RESMPL_PROJ,
|
||||
MODEL_TENSOR.V_RESMPL_QUERY,
|
||||
MODEL_TENSOR.V_TOK_EMBD_IMAGE,
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_IMAGE,
|
||||
MODEL_TENSOR.V_TOK_EMBD_SLICE,
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_SLICE,
|
||||
],
|
||||
# TODO
|
||||
}
|
||||
|
@ -907,6 +907,22 @@ class TensorNameMap:
|
||||
MODEL_TENSOR.V_RESMPL_QUERY: (
|
||||
"resampler.query",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_TOK_EMBD_IMAGE:(
|
||||
"v.tok_embd.image", # tensor generated from token embeddings
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_IMAGE:(
|
||||
"v.tok_embd.end_image", # tensor generated from token embeddings
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_TOK_EMBD_SLICE:(
|
||||
"v.tok_embd.slice", # tensor generated from token embeddings
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_TOK_EMBD_END_SLICE:(
|
||||
"v.tok_embd.end_slice", # tensor generated from token embeddings
|
||||
),
|
||||
}
|
||||
|
||||
# architecture-specific block mappings
|
||||
|
@ -1382,6 +1382,10 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
||||
{ LLM_TENSOR_V_RESMPL_Q_NORM, "v.resmpl.q_norm" },
|
||||
{ LLM_TENSOR_V_RESMPL_PROJ, "v.resmpl.proj" },
|
||||
{ LLM_TENSOR_V_RESMPL_QUERY, "v.resmpl.query" },
|
||||
{ LLM_TENSOR_V_TOK_EMBD_IMAGE, "v.tok_embd.image" },
|
||||
{ LLM_TENSOR_V_TOK_EMBD_END_IMAGE, "v.tok_embd.end_image" },
|
||||
{ LLM_TENSOR_V_TOK_EMBD_SLICE, "v.tok_embd.slice" },
|
||||
{ LLM_TENSOR_V_TOK_EMBD_END_SLICE, "v.tok_embd.end_slice" },
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -381,6 +381,10 @@ enum llm_tensor {
|
||||
LLM_TENSOR_V_RESMPL_Q_NORM,
|
||||
LLM_TENSOR_V_RESMPL_PROJ,
|
||||
LLM_TENSOR_V_RESMPL_QUERY,
|
||||
LLM_TENSOR_V_TOK_EMBD_IMAGE,
|
||||
LLM_TENSOR_V_TOK_EMBD_END_IMAGE,
|
||||
LLM_TENSOR_V_TOK_EMBD_SLICE,
|
||||
LLM_TENSOR_V_TOK_EMBD_END_SLICE,
|
||||
};
|
||||
|
||||
enum llm_tensor_layer {
|
||||
|
@ -3549,6 +3549,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
||||
vit.mm_model_ln_post_w = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_RESMPL_POST_NORM, "weight"), {rs_n_embd});
|
||||
vit.mm_model_ln_post_b = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_RESMPL_POST_NORM, "bias" ), {rs_n_embd});
|
||||
|
||||
// tok embd
|
||||
vit.mm_tok_embd_image = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_TOK_EMBD_IMAGE, "weight"), {n_embd});
|
||||
vit.mm_tok_embd_end_image = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_TOK_EMBD_END_IMAGE, "weight"), {n_embd});
|
||||
vit.mm_tok_embd_slice = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_TOK_EMBD_SLICE, "weight"), {n_embd});
|
||||
vit.mm_tok_embd_end_slice = ml.create_tensor(ctx_vision, tn(LLM_TENSOR_V_TOK_EMBD_END_SLICE, "weight"), {n_embd});
|
||||
|
||||
for (int i = 0; i < n_vlayer; ++i) {
|
||||
auto & layer = vit.layers[i];
|
||||
|
||||
|
@ -895,6 +895,10 @@ struct llama_vision_graph_builder {
|
||||
cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
|
||||
}
|
||||
|
||||
// add  token embeddings
|
||||
cur = ggml_concat(ctx0, model.mm_tok_embd_image, cur, 1);
|
||||
cur = ggml_concat(ctx0, cur, model.mm_tok_embd_end_image, 1);
|
||||
|
||||
ggml_set_name(cur, "output");
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
|
@ -129,7 +129,11 @@ struct llama_vision_model {
|
||||
struct ggml_tensor * mm_model_ln_post_w = nullptr;
|
||||
struct ggml_tensor * mm_model_ln_post_b = nullptr;
|
||||
|
||||
struct ggml_tensor * image_newline = nullptr;
|
||||
// special tokens
|
||||
struct ggml_tensor * mm_tok_embd_image = nullptr;
|
||||
struct ggml_tensor * mm_tok_embd_end_image = nullptr;
|
||||
struct ggml_tensor * mm_tok_embd_slice = nullptr;
|
||||
struct ggml_tensor * mm_tok_embd_end_slice = nullptr;
|
||||
};
|
||||
|
||||
struct llama_vision_context {
|
||||
|
Loading…
Reference in New Issue
Block a user