mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 06:10:29 +01:00
add support T5 in swift example
This commit is contained in:
parent
f0678c5ff4
commit
715682d21a
@ -137,6 +137,18 @@ actor LlamaContext {
|
|||||||
let i = Int(i1)
|
let i = Int(i1)
|
||||||
llama_batch_add(&batch, tokens_list[i], Int32(i), [0], false)
|
llama_batch_add(&batch, tokens_list[i], Int32(i), [0], false)
|
||||||
}
|
}
|
||||||
|
if llama_model_has_encoder(model) {
|
||||||
|
if (llama_encode(context, batch)) != 0 {
|
||||||
|
print("llama_encode() failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
var decoder_start_token_id = llama_model_decoder_start_token(model)
|
||||||
|
if decoder_start_token_id == -1 {
|
||||||
|
decoder_start_token_id = llama_token_bos(model)
|
||||||
|
}
|
||||||
|
llama_batch_clear(&batch)
|
||||||
|
llama_batch_add(&batch, decoder_start_token_id, 0, [0], false)
|
||||||
|
}
|
||||||
batch.logits[Int(batch.n_tokens) - 1] = 1 // true
|
batch.logits[Int(batch.n_tokens) - 1] = 1 // true
|
||||||
|
|
||||||
if llama_decode(context, batch) != 0 {
|
if llama_decode(context, batch) != 0 {
|
||||||
|
Loading…
Reference in New Issue
Block a user