mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Move bot_picture.py inside the extension
This commit is contained in:
parent
5ac24b019e
commit
91f5852245
@ -2,13 +2,11 @@ import base64
|
|||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
import torch
|
||||||
|
from transformers import BlipForConditionalGeneration, BlipProcessor
|
||||||
|
|
||||||
import modules.chat as chat
|
import modules.chat as chat
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
from modules.bot_picture import caption_image
|
|
||||||
|
|
||||||
params = {
|
|
||||||
}
|
|
||||||
|
|
||||||
# If 'state' is True, will hijack the next chat generation with
|
# If 'state' is True, will hijack the next chat generation with
|
||||||
# custom input text
|
# custom input text
|
||||||
@ -17,6 +15,14 @@ input_hijack = {
|
|||||||
'value': ["", ""]
|
'value': ["", ""]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
||||||
|
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu")
|
||||||
|
|
||||||
|
def caption_image(raw_image):
|
||||||
|
inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32)
|
||||||
|
out = model.generate(**inputs, max_new_tokens=100)
|
||||||
|
return processor.decode(out[0], skip_special_tokens=True)
|
||||||
|
|
||||||
def generate_chat_picture(picture, name1, name2):
|
def generate_chat_picture(picture, name1, name2):
|
||||||
text = f'*{name1} sends {name2} a picture that contains the following: "{caption_image(picture)}"*'
|
text = f'*{name1} sends {name2} a picture that contains the following: "{caption_image(picture)}"*'
|
||||||
buffer = BytesIO()
|
buffer = BytesIO()
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
import torch
|
|
||||||
from transformers import BlipForConditionalGeneration, BlipProcessor
|
|
||||||
|
|
||||||
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
||||||
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu")
|
|
||||||
|
|
||||||
def caption_image(raw_image):
|
|
||||||
inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32)
|
|
||||||
out = model.generate(**inputs, max_new_tokens=100)
|
|
||||||
return processor.decode(out[0], skip_special_tokens=True)
|
|
@ -33,10 +33,11 @@ def apply_extensions(text, typ):
|
|||||||
def create_extensions_block():
|
def create_extensions_block():
|
||||||
# Updating the default values
|
# Updating the default values
|
||||||
for extension, name in iterator():
|
for extension, name in iterator():
|
||||||
for param in extension.params:
|
if hasattr(extension, 'params'):
|
||||||
_id = f"{name}-{param}"
|
for param in extension.params:
|
||||||
if _id in shared.settings:
|
_id = f"{name}-{param}"
|
||||||
extension.params[param] = shared.settings[_id]
|
if _id in shared.settings:
|
||||||
|
extension.params[param] = shared.settings[_id]
|
||||||
|
|
||||||
# Creating the extension ui elements
|
# Creating the extension ui elements
|
||||||
for extension, name in iterator():
|
for extension, name in iterator():
|
||||||
|
Loading…
Reference in New Issue
Block a user