Merge pull request #5152 from oobabooga/dev

Merge dev branch
This commit is contained in:
oobabooga 2024-01-02 13:22:14 -03:00 committed by GitHub
commit 3f28925a8d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 5 additions and 9 deletions

View File

@ -1,7 +1,7 @@
.message { .message {
display: grid; display: grid;
grid-template-columns: 60px minmax(0, 1fr); grid-template-columns: 60px minmax(0, 1fr);
padding-bottom: 25px; padding-bottom: 15px;
font-size: 15px; font-size: 15px;
font-family: 'Noto Sans', Helvetica, Arial, sans-serif; font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
line-height: 22.5px !important; line-height: 22.5px !important;
@ -52,10 +52,6 @@
margin-bottom: 10px !important; margin-bottom: 10px !important;
} }
.message-body p:last-child, .chat .message-body ul:last-child, .chat .message-body ol:last-child {
margin-bottom: 0 !important;
}
.dark .message-body p em { .dark .message-body p em {
color: rgb(138 138 138) !important; color: rgb(138 138 138) !important;
} }

View File

@ -492,6 +492,10 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
left: calc(100% - 168px); left: calc(100% - 168px);
} }
#show-controls span {
opacity: 0.6;
}
#typing-container { #typing-container {
display: none; display: none;
position: absolute; position: absolute;

View File

@ -126,7 +126,6 @@ def load_tokenizer(model_name, model):
def huggingface_loader(model_name): def huggingface_loader(model_name):
path_to_model = Path(f'{shared.args.model_dir}/{model_name}') path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
params = { params = {
'low_cpu_mem_usage': True, 'low_cpu_mem_usage': True,
@ -170,10 +169,8 @@ def huggingface_loader(model_name):
# Load with quantization and/or offloading # Load with quantization and/or offloading
else: else:
if not any((shared.args.cpu, torch.cuda.is_available(), is_xpu_available(), torch.backends.mps.is_available())): if not any((shared.args.cpu, torch.cuda.is_available(), is_xpu_available(), torch.backends.mps.is_available())):
logger.warning('torch.cuda.is_available() and is_xpu_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.') logger.warning('torch.cuda.is_available() and is_xpu_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.')
shared.args.cpu = True shared.args.cpu = True
if shared.args.cpu: if shared.args.cpu:

View File

@ -284,7 +284,6 @@ def create_event_handlers():
shared.gradio['mode'].change( shared.gradio['mode'].change(
lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then( lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then( ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
partial(chat.character_is_loaded, raise_exception=True), gradio('interface_state'), None).success(
chat.load_latest_history, gradio('interface_state'), gradio('history')).then( chat.load_latest_history, gradio('interface_state'), gradio('history')).then(
chat.redraw_html, gradio(reload_arr), gradio('display')).then( chat.redraw_html, gradio(reload_arr), gradio('display')).then(
lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id')) lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))