mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-10 12:30:50 +01:00
1c641e6aac
* `main`/`server`: rename to `llama` / `llama-server` for consistency w/ homebrew * server: update refs -> llama-server gitignore llama-server * server: simplify nix package * main: update refs -> llama fix examples/main ref * main/server: fix targets * update more names * Update build.yml * rm accidentally checked in bins * update straggling refs * Update .gitignore * Update server-llm.sh * main: target name -> llama-cli * Prefix all example bins w/ llama- * fix main refs * rename {main->llama}-cmake-pkg binary * prefix more cmake targets w/ llama- * add/fix gbnf-validator subfolder to cmake * sort cmake example subdirs * rm bin files * fix llama-lookup-* Makefile rules * gitignore /llama-* * rename Dockerfiles * rename llama|main -> llama-cli; consistent RPM bin prefixes * fix some missing -cli suffixes * rename dockerfile w/ llama-cli * rename(make): llama-baby-llama * update dockerfile refs * more llama-cli(.exe) * fix test-eval-callback * rename: llama-cli-cmake-pkg(.exe) * address gbnf-validator unused fread warning (switched to C++ / ifstream) * add two missing llama- prefixes * Updating docs for eval-callback binary to use new `llama-` prefix. * Updating a few lingering doc references for rename of main to llama-cli * Updating `run-with-preset.py` to use new binary names. Updating docs around `perplexity` binary rename. * Updating documentation references for lookup-merge and export-lora * Updating two small `main` references missed earlier in the finetune docs. * Update apps.nix * update grammar/README.md w/ new llama-* names * update llama-rpc-server bin name + doc * Revert "update llama-rpc-server bin name + doc" This reverts commit e474ef1df481fd8936cd7d098e3065d7de378930. * add hot topic notice to README.md * Update README.md * Update README.md * rename gguf-split & quantize bins refs in **/tests.sh --------- Co-authored-by: HanClinto <hanclinto@gmail.com>
75 lines
2.8 KiB
Python
75 lines
2.8 KiB
Python
# Usage:
|
|
#! ./llama-server -m some-model.gguf &
|
|
#! pip install pydantic
|
|
#! python json-schema-pydantic-example.py
|
|
|
|
from pydantic import BaseModel, TypeAdapter
|
|
from annotated_types import MinLen
|
|
from typing import Annotated, List, Optional
|
|
import json, requests
|
|
|
|
if True:
|
|
|
|
def create_completion(*, response_model=None, endpoint="http://localhost:8080/v1/chat/completions", messages, **kwargs):
|
|
'''
|
|
Creates a chat completion using an OpenAI-compatible endpoint w/ JSON schema support
|
|
(llama.cpp server, llama-cpp-python, Anyscale / Together...)
|
|
|
|
The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below)
|
|
'''
|
|
if response_model:
|
|
type_adapter = TypeAdapter(response_model)
|
|
schema = type_adapter.json_schema()
|
|
messages = [{
|
|
"role": "system",
|
|
"content": f"You respond in JSON format with the following schema: {json.dumps(schema, indent=2)}"
|
|
}] + messages
|
|
response_format={"type": "json_object", "schema": schema}
|
|
|
|
data = requests.post(endpoint, headers={"Content-Type": "application/json"},
|
|
json=dict(messages=messages, response_format=response_format, **kwargs)).json()
|
|
if 'error' in data:
|
|
raise Exception(data['error']['message'])
|
|
|
|
content = data["choices"][0]["message"]["content"]
|
|
return type_adapter.validate_json(content) if type_adapter else content
|
|
|
|
else:
|
|
|
|
# This alternative branch uses Instructor + OpenAI client lib.
|
|
# Instructor support streamed iterable responses, retry & more.
|
|
# (see https://python.useinstructor.com/)
|
|
#! pip install instructor openai
|
|
import instructor, openai
|
|
client = instructor.patch(
|
|
openai.OpenAI(api_key="123", base_url="http://localhost:8080"),
|
|
mode=instructor.Mode.JSON_SCHEMA)
|
|
create_completion = client.chat.completions.create
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
class QAPair(BaseModel):
|
|
question: str
|
|
concise_answer: str
|
|
justification: str
|
|
|
|
class PyramidalSummary(BaseModel):
|
|
title: str
|
|
summary: str
|
|
question_answers: Annotated[List[QAPair], MinLen(2)]
|
|
sub_sections: Optional[Annotated[List['PyramidalSummary'], MinLen(2)]]
|
|
|
|
print("# Summary\n", create_completion(
|
|
model="...",
|
|
response_model=PyramidalSummary,
|
|
messages=[{
|
|
"role": "user",
|
|
"content": f"""
|
|
You are a highly efficient corporate document summarizer.
|
|
Create a pyramidal summary of an imaginary internal document about our company processes
|
|
(starting high-level, going down to each sub sections).
|
|
Keep questions short, and answers even shorter (trivia / quizz style).
|
|
"""
|
|
}]))
|