mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-23 08:28:21 +01:00
commit
3f28925a8d
@ -1,7 +1,7 @@
|
|||||||
.message {
|
.message {
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-template-columns: 60px minmax(0, 1fr);
|
grid-template-columns: 60px minmax(0, 1fr);
|
||||||
padding-bottom: 25px;
|
padding-bottom: 15px;
|
||||||
font-size: 15px;
|
font-size: 15px;
|
||||||
font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
|
font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
|
||||||
line-height: 22.5px !important;
|
line-height: 22.5px !important;
|
||||||
@ -52,10 +52,6 @@
|
|||||||
margin-bottom: 10px !important;
|
margin-bottom: 10px !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
.message-body p:last-child, .chat .message-body ul:last-child, .chat .message-body ol:last-child {
|
|
||||||
margin-bottom: 0 !important;
|
|
||||||
}
|
|
||||||
|
|
||||||
.dark .message-body p em {
|
.dark .message-body p em {
|
||||||
color: rgb(138 138 138) !important;
|
color: rgb(138 138 138) !important;
|
||||||
}
|
}
|
||||||
|
@ -492,6 +492,10 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
|
|||||||
left: calc(100% - 168px);
|
left: calc(100% - 168px);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#show-controls span {
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
#typing-container {
|
#typing-container {
|
||||||
display: none;
|
display: none;
|
||||||
position: absolute;
|
position: absolute;
|
||||||
|
@ -126,7 +126,6 @@ def load_tokenizer(model_name, model):
|
|||||||
|
|
||||||
|
|
||||||
def huggingface_loader(model_name):
|
def huggingface_loader(model_name):
|
||||||
|
|
||||||
path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
|
path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
|
||||||
params = {
|
params = {
|
||||||
'low_cpu_mem_usage': True,
|
'low_cpu_mem_usage': True,
|
||||||
@ -170,10 +169,8 @@ def huggingface_loader(model_name):
|
|||||||
|
|
||||||
# Load with quantization and/or offloading
|
# Load with quantization and/or offloading
|
||||||
else:
|
else:
|
||||||
|
|
||||||
if not any((shared.args.cpu, torch.cuda.is_available(), is_xpu_available(), torch.backends.mps.is_available())):
|
if not any((shared.args.cpu, torch.cuda.is_available(), is_xpu_available(), torch.backends.mps.is_available())):
|
||||||
logger.warning('torch.cuda.is_available() and is_xpu_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.')
|
logger.warning('torch.cuda.is_available() and is_xpu_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.')
|
||||||
|
|
||||||
shared.args.cpu = True
|
shared.args.cpu = True
|
||||||
|
|
||||||
if shared.args.cpu:
|
if shared.args.cpu:
|
||||||
|
@ -284,7 +284,6 @@ def create_event_handlers():
|
|||||||
shared.gradio['mode'].change(
|
shared.gradio['mode'].change(
|
||||||
lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then(
|
lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then(
|
||||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||||
partial(chat.character_is_loaded, raise_exception=True), gradio('interface_state'), None).success(
|
|
||||||
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
|
||||||
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
chat.redraw_html, gradio(reload_arr), gradio('display')).then(
|
||||||
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))
|
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))
|
||||||
|
Loading…
Reference in New Issue
Block a user