2023-02-25 00:26:15 -03:00
|
|
|
import base64
|
|
|
|
from io import BytesIO
|
|
|
|
|
|
|
|
import gradio as gr
|
2023-03-22 15:55:03 -03:00
|
|
|
import torch
|
|
|
|
from transformers import BlipForConditionalGeneration, BlipProcessor
|
2023-03-02 01:23:18 -03:00
|
|
|
|
2023-08-13 01:12:15 -03:00
|
|
|
from modules import chat, shared, ui_chat
|
2023-04-12 10:27:06 -03:00
|
|
|
from modules.ui import gather_interface_values
|
2023-07-11 20:56:01 -07:00
|
|
|
from modules.utils import gradio
|
2023-04-06 01:22:15 -03:00
|
|
|
|
2023-02-25 00:26:15 -03:00
|
|
|
input_hijack = {
|
2023-02-25 02:13:24 -03:00
|
|
|
'state': False,
|
2023-02-25 01:08:17 -03:00
|
|
|
'value': ["", ""]
|
2023-02-25 00:49:18 -03:00
|
|
|
}
|
|
|
|
|
2023-02-25 03:00:19 -03:00
|
|
|
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
|
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu")
|
|
|
|
|
2023-04-07 00:15:45 -03:00
|
|
|
|
2023-07-25 18:49:56 -03:00
|
|
|
def chat_input_modifier(text, visible_text, state):
|
|
|
|
global input_hijack
|
|
|
|
if input_hijack['state']:
|
|
|
|
input_hijack['state'] = False
|
|
|
|
return input_hijack['value']
|
|
|
|
else:
|
|
|
|
return text, visible_text
|
|
|
|
|
|
|
|
|
2023-02-25 03:00:19 -03:00
|
|
|
def caption_image(raw_image):
|
|
|
|
inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32)
|
|
|
|
out = model.generate(**inputs, max_new_tokens=100)
|
|
|
|
return processor.decode(out[0], skip_special_tokens=True)
|
|
|
|
|
2023-04-07 00:15:45 -03:00
|
|
|
|
2023-02-25 00:26:15 -03:00
|
|
|
def generate_chat_picture(picture, name1, name2):
|
2023-04-08 07:55:16 +03:00
|
|
|
text = f'*{name1} sends {name2} a picture that contains the following: “{caption_image(picture)}”*'
|
2023-03-22 07:47:54 +03:00
|
|
|
# lower the resolution of sent images for the chat, otherwise the log size gets out of control quickly with all the base64 values in visible history
|
2023-03-22 16:09:48 -03:00
|
|
|
picture.thumbnail((300, 300))
|
2023-02-25 00:26:15 -03:00
|
|
|
buffer = BytesIO()
|
|
|
|
picture.save(buffer, format="JPEG")
|
|
|
|
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
2023-03-22 07:47:54 +03:00
|
|
|
visible_text = f'<img src="data:image/jpeg;base64,{img_str}" alt="{text}">'
|
2023-02-25 00:26:15 -03:00
|
|
|
return text, visible_text
|
|
|
|
|
2023-04-07 00:15:45 -03:00
|
|
|
|
2023-02-25 00:26:15 -03:00
|
|
|
def ui():
|
|
|
|
picture_select = gr.Image(label='Send a picture', type='pil')
|
|
|
|
|
2023-04-12 10:30:12 -03:00
|
|
|
# Prepare the input hijack, update the interface values, call the generation function, and clear the picture
|
2023-04-12 10:27:06 -03:00
|
|
|
picture_select.upload(
|
2023-07-25 18:49:56 -03:00
|
|
|
lambda picture, name1, name2: input_hijack.update({
|
|
|
|
"state": True,
|
|
|
|
"value": generate_chat_picture(picture, name1, name2)
|
|
|
|
}), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then(
|
2023-07-11 20:56:01 -07:00
|
|
|
gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
2023-08-13 01:12:15 -03:00
|
|
|
chat.generate_chat_reply_wrapper, gradio(ui_chat.inputs), gradio('display', 'history'), show_progress=False).then(
|
2023-04-12 10:27:06 -03:00
|
|
|
lambda: None, None, picture_select, show_progress=False)
|