2023-02-20 16:39:36 +01:00
|
|
|
'''
|
|
|
|
|
|
|
|
This is an example on how to use the API for oobabooga/text-generation-webui.
|
|
|
|
|
|
|
|
Make sure to start the web UI with the following flags:
|
|
|
|
|
|
|
|
python server.py --model MODEL --listen --no-stream
|
|
|
|
|
|
|
|
Optionally, you can also add the --share flag to generate a public gradio URL,
|
|
|
|
allowing you to use the API remotely.
|
|
|
|
|
|
|
|
'''
|
2023-04-06 06:22:15 +02:00
|
|
|
import json
|
|
|
|
|
2023-02-20 16:39:36 +01:00
|
|
|
import requests
|
|
|
|
|
|
|
|
# Server address
|
|
|
|
server = "127.0.0.1"
|
|
|
|
|
|
|
|
# Generation parameters
|
|
|
|
# Reference: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig
|
|
|
|
params = {
|
|
|
|
'max_new_tokens': 200,
|
|
|
|
'do_sample': True,
|
|
|
|
'temperature': 0.5,
|
|
|
|
'top_p': 0.9,
|
|
|
|
'typical_p': 1,
|
|
|
|
'repetition_penalty': 1.05,
|
2023-03-15 15:04:30 +01:00
|
|
|
'encoder_repetition_penalty': 1.0,
|
2023-02-20 16:39:36 +01:00
|
|
|
'top_k': 0,
|
|
|
|
'min_length': 0,
|
|
|
|
'no_repeat_ngram_size': 0,
|
|
|
|
'num_beams': 1,
|
|
|
|
'penalty_alpha': 0,
|
|
|
|
'length_penalty': 1,
|
|
|
|
'early_stopping': False,
|
2023-03-22 19:40:20 +01:00
|
|
|
'seed': -1,
|
2023-02-20 16:39:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
# Input prompt
|
|
|
|
prompt = "What I would like to say is the following: "
|
|
|
|
|
2023-04-06 06:22:15 +02:00
|
|
|
payload = json.dumps([prompt, params])
|
|
|
|
|
2023-02-20 16:39:36 +01:00
|
|
|
response = requests.post(f"http://{server}:7860/run/textgen", json={
|
|
|
|
"data": [
|
2023-04-06 06:22:15 +02:00
|
|
|
payload
|
2023-02-20 16:39:36 +01:00
|
|
|
]
|
|
|
|
}).json()
|
|
|
|
|
|
|
|
reply = response["data"][0]
|
|
|
|
print(reply)
|