2023-03-07 00:34:36 +01:00
|
|
|
'''
|
|
|
|
|
|
|
|
Contributed by SagsMug. Thank you SagsMug.
|
|
|
|
https://github.com/oobabooga/text-generation-webui/pull/175
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
2023-03-07 04:23:36 +01:00
|
|
|
import asyncio
|
|
|
|
import json
|
2023-03-06 12:13:50 +01:00
|
|
|
import random
|
2023-03-07 04:23:36 +01:00
|
|
|
import string
|
|
|
|
|
2023-03-06 12:13:50 +01:00
|
|
|
import websockets
|
2023-03-07 04:23:36 +01:00
|
|
|
|
2023-04-16 17:12:31 +02:00
|
|
|
# Gradio changes this index from time to time. To rediscover it, set VISIBLE = False in
|
|
|
|
# modules/api.py and use the dev tools to inspect the request made after clicking on the
|
|
|
|
# button called "Run" at the bottom of the UI
|
|
|
|
GRADIO_FN = 34
|
2023-04-12 05:15:12 +02:00
|
|
|
|
2023-03-06 12:13:50 +01:00
|
|
|
|
|
|
|
def random_hash():
|
2023-03-06 23:52:26 +01:00
|
|
|
letters = string.ascii_lowercase + string.digits
|
|
|
|
return ''.join(random.choice(letters) for i in range(9))
|
2023-03-06 12:13:50 +01:00
|
|
|
|
2023-04-07 05:15:45 +02:00
|
|
|
|
2023-03-06 12:13:50 +01:00
|
|
|
async def run(context):
|
2023-03-06 23:52:26 +01:00
|
|
|
server = "127.0.0.1"
|
|
|
|
params = {
|
|
|
|
'max_new_tokens': 200,
|
|
|
|
'do_sample': True,
|
|
|
|
'temperature': 0.5,
|
|
|
|
'top_p': 0.9,
|
|
|
|
'typical_p': 1,
|
|
|
|
'repetition_penalty': 1.05,
|
2023-03-15 15:04:30 +01:00
|
|
|
'encoder_repetition_penalty': 1.0,
|
2023-03-06 23:52:26 +01:00
|
|
|
'top_k': 0,
|
|
|
|
'min_length': 0,
|
|
|
|
'no_repeat_ngram_size': 0,
|
|
|
|
'num_beams': 1,
|
|
|
|
'penalty_alpha': 0,
|
|
|
|
'length_penalty': 1,
|
|
|
|
'early_stopping': False,
|
2023-03-22 19:40:20 +01:00
|
|
|
'seed': -1,
|
2023-04-12 05:25:30 +02:00
|
|
|
'add_bos_token': True,
|
2023-04-18 01:10:55 +02:00
|
|
|
'custom_stopping_strings': '',
|
2023-04-16 19:24:49 +02:00
|
|
|
'truncation_length': 2048,
|
|
|
|
'ban_eos_token': False,
|
|
|
|
'skip_special_tokens': True,
|
2023-03-06 23:52:26 +01:00
|
|
|
}
|
2023-04-06 06:22:15 +02:00
|
|
|
payload = json.dumps([context, params])
|
2023-03-06 23:52:26 +01:00
|
|
|
session = random_hash()
|
2023-03-06 12:13:50 +01:00
|
|
|
|
2023-03-06 23:52:26 +01:00
|
|
|
async with websockets.connect(f"ws://{server}:7860/queue/join") as websocket:
|
|
|
|
while content := json.loads(await websocket.recv()):
|
2023-04-07 05:15:45 +02:00
|
|
|
# Python3.10 syntax, replace with if elif on older
|
2023-03-06 23:52:26 +01:00
|
|
|
match content["msg"]:
|
|
|
|
case "send_hash":
|
|
|
|
await websocket.send(json.dumps({
|
|
|
|
"session_hash": session,
|
2023-04-12 05:15:12 +02:00
|
|
|
"fn_index": GRADIO_FN
|
2023-03-06 23:52:26 +01:00
|
|
|
}))
|
|
|
|
case "estimation":
|
|
|
|
pass
|
|
|
|
case "send_data":
|
|
|
|
await websocket.send(json.dumps({
|
|
|
|
"session_hash": session,
|
2023-04-12 05:15:12 +02:00
|
|
|
"fn_index": GRADIO_FN,
|
2023-03-06 23:52:26 +01:00
|
|
|
"data": [
|
2023-04-06 06:22:15 +02:00
|
|
|
payload
|
2023-03-06 23:52:26 +01:00
|
|
|
]
|
|
|
|
}))
|
|
|
|
case "process_starts":
|
|
|
|
pass
|
|
|
|
case "process_generating" | "process_completed":
|
|
|
|
yield content["output"]["data"][0]
|
2023-04-07 05:15:45 +02:00
|
|
|
# You can search for your desired end indicator and
|
2023-03-06 23:52:26 +01:00
|
|
|
# stop generation by closing the websocket here
|
|
|
|
if (content["msg"] == "process_completed"):
|
|
|
|
break
|
2023-03-06 12:13:50 +01:00
|
|
|
|
|
|
|
prompt = "What I would like to say is the following: "
|
|
|
|
|
2023-04-07 05:15:45 +02:00
|
|
|
|
2023-03-06 12:13:50 +01:00
|
|
|
async def get_result():
|
2023-03-06 23:52:26 +01:00
|
|
|
async for response in run(prompt):
|
|
|
|
# Print intermediate steps
|
|
|
|
print(response)
|
2023-03-06 12:13:50 +01:00
|
|
|
|
2023-03-06 23:52:26 +01:00
|
|
|
# Print final result
|
|
|
|
print(response)
|
2023-03-06 12:13:50 +01:00
|
|
|
|
2023-03-06 23:52:26 +01:00
|
|
|
asyncio.run(get_result())
|