mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-10-31 23:28:51 +01:00
438c2ca830
* implementing parallel decoding in server example * crash fixed * save dev progress * refactored sampling function * completion endpoint working * multiple client support * grammar + no stream completion * cached prompt support * chat.mjs support cached prompt + some fixes * server ui now support multiple clients * unused change reverted * fixed timings per slot * add context swap * add changes to README.md * llava multimodal integration * fixed tokens probs * add multimodal input - alfa * refactor code + remove unused comments + improved README.md * fix compilation errors with llvm * notify the user from server ui that multimodality is unavialable * some ci fixes * fix ci make build undefined ref errors * fix long prompt than ctx proposed in #3639 * fixed premature end due stop word * context shift fixed * fix llava implementation * sync README.md changes * readme change * update api like OpenAI * multimodal support enabled by default * fix make bui;d errors * fix multiple clients * fix zig build * new sampling API * latest changes of sampling API * server : coding-style normalization * server : coding-style normalization (part 2) * server : remove beam-search functionality * server : bug fix in ingest_images n_tokens is incremented internally by llama_batch_add * server : use refs + use llama_batch_clear() * server : snake case * server : minor sync * added thread safe pipeline * server : bach has to be allocated for n_parallel sequences * server : no need for atomic int - already using mutex * server : logs + minor code style * server : fix multibyte handle in partial response (#3706) * fix image load + view image in chat * make : silence stb warnings * clip : link to ggml, not to llama * server : fix switch fallthrough * server : fix crash in Debug on macOS (I have no idea why this fixes it!?) * server : refactor ctx_sampling init + n_ctx + names * server : bug fix for prompt caching * Do not save/load image_data to localStorage * editorconfig : new line in index.html * server : completion requests remember slot_id * Update readme to document multimodal in server * server : minor style * Update readme to document multimodal in server * server : hide ctx_sampling->prev behind API (#3696) * server : apply fix from #3722 * server : fix slot reuse * server : add comment about changing slot_state to bool --------- Co-authored-by: FSSRepo <go778sgt@gmail.com> Co-authored-by: Damian Stewart <d@damianstewart.com> Co-authored-by: Steward Garcia <57494570+FSSRepo@users.noreply.github.com> Co-authored-by: Jhen-Jie Hong <iainst0409@gmail.com> Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
131 lines
3.7 KiB
JavaScript
131 lines
3.7 KiB
JavaScript
import * as readline from 'node:readline'
|
|
import { stdin, stdout } from 'node:process'
|
|
import { readFileSync } from 'node:fs'
|
|
import { SchemaConverter } from './public/json-schema-to-grammar.mjs'
|
|
|
|
const args = process.argv.slice(2);
|
|
const grammarJsonSchemaFile = args.find(
|
|
(_, index) => args[index - 1] === "--grammar-json-schema"
|
|
);
|
|
|
|
const no_cached_prompt = args.find(
|
|
(_, index) => args[index - 1] === "--no-cache-prompt"
|
|
) ?? "false";
|
|
|
|
const grammarFile = args.find((_, index) => args[index - 1] === "--grammar");
|
|
|
|
// Example usage: function,arguments
|
|
const grammarJsonSchemaPropOrder = args.find(
|
|
(_, index) => args[index - 1] === "--grammar-json-schema-prop-order"
|
|
);
|
|
const propOrder = grammarJsonSchemaPropOrder
|
|
? grammarJsonSchemaPropOrder
|
|
.split(",")
|
|
.reduce((acc, cur, index) => ({ ...acc, [cur]: index }), {})
|
|
: {};
|
|
|
|
let grammar = null
|
|
if (grammarJsonSchemaFile) {
|
|
const schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8'))
|
|
const converter = new SchemaConverter(propOrder)
|
|
converter.visit(schema, '')
|
|
grammar = converter.formatGrammar()
|
|
}
|
|
if (grammarFile) {
|
|
grammar = readFileSync(grammarFile, 'utf-8')
|
|
}
|
|
|
|
// for cached prompt
|
|
let slot_id = -1;
|
|
|
|
const API_URL = 'http://127.0.0.1:8080'
|
|
|
|
const chat = [
|
|
{
|
|
human: "Hello, Assistant.",
|
|
assistant: "Hello. How may I help you today?"
|
|
},
|
|
{
|
|
human: "Please tell me the largest city in Europe.",
|
|
assistant: "Sure. The largest city in Europe is Moscow, the capital of Russia."
|
|
},
|
|
]
|
|
|
|
const instruction = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.`
|
|
|
|
function format_prompt(question) {
|
|
return `${instruction}\n${
|
|
chat.map(m =>`### Human: ${m.human}\n### Assistant: ${m.assistant}`).join("\n")
|
|
}\n### Human: ${question}\n### Assistant:`
|
|
}
|
|
|
|
async function tokenize(content) {
|
|
const result = await fetch(`${API_URL}/tokenize`, {
|
|
method: 'POST',
|
|
body: JSON.stringify({ content })
|
|
})
|
|
|
|
if (!result.ok) {
|
|
return []
|
|
}
|
|
|
|
return await result.json().tokens
|
|
}
|
|
|
|
const n_keep = await tokenize(instruction).length
|
|
|
|
async function chat_completion(question) {
|
|
const result = await fetch(`${API_URL}/completion`, {
|
|
method: 'POST',
|
|
body: JSON.stringify({
|
|
prompt: format_prompt(question),
|
|
temperature: 0.2,
|
|
top_k: 40,
|
|
top_p: 0.9,
|
|
n_keep: n_keep,
|
|
n_predict: 256,
|
|
cache_prompt: no_cached_prompt === "false",
|
|
slot_id: slot_id,
|
|
stop: ["\n### Human:"], // stop completion after generating this
|
|
grammar,
|
|
stream: true,
|
|
})
|
|
})
|
|
|
|
if (!result.ok) {
|
|
return
|
|
}
|
|
|
|
let answer = ''
|
|
|
|
for await (var chunk of result.body) {
|
|
const t = Buffer.from(chunk).toString('utf8')
|
|
if (t.startsWith('data: ')) {
|
|
const message = JSON.parse(t.substring(6))
|
|
slot_id = message.slot_id
|
|
answer += message.content
|
|
process.stdout.write(message.content)
|
|
if (message.stop) {
|
|
if (message.truncated) {
|
|
chat.shift()
|
|
}
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
process.stdout.write('\n')
|
|
chat.push({ human: question, assistant: answer.trimStart() })
|
|
}
|
|
|
|
const rl = readline.createInterface({ input: stdin, output: stdout });
|
|
|
|
const readlineQuestion = (rl, query, options) => new Promise((resolve, reject) => {
|
|
rl.question(query, options, resolve)
|
|
});
|
|
|
|
while(true) {
|
|
const question = await readlineQuestion(rl, '> ')
|
|
await chat_completion(question)
|
|
}
|