mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-22 08:17:58 +01:00
Updated Templates supported by llama_chat_apply_template (markdown)
parent
acb78dbbf2
commit
1fdaecff7d
@ -20,6 +20,7 @@ VARIANTS_TO_TEST = [
|
||||
'mlabonne/AlphaMonarch-7B',
|
||||
'google/gemma-7b-it',
|
||||
'OrionStarAI/Orion-14B-Chat',
|
||||
'openbmb/MiniCPM-2B-dpo-fp32',
|
||||
]
|
||||
|
||||
HISTORY = [
|
||||
@ -125,6 +126,11 @@ Assistant: </s>response</s>Human: again
|
||||
Assistant: </s>response</s>
|
||||
```
|
||||
|
||||
```
|
||||
Usage: ./server -m ... --chat-template minicpm
|
||||
test<用户>hello<AI>response<用户>again<AI>response
|
||||
```
|
||||
|
||||
Additionally, we also support zephyr template (I cannot find it on huggingface, but have seen in [this list](https://github.com/ggerganov/llama.cpp/blob/c8d847d57efdc0f9bbbf881d48c645e151b36fd8/examples/server/public/promptFormats.js) )
|
||||
|
||||
```
|
||||
|
Loading…
Reference in New Issue
Block a user