diff --git a/css/chat_style-cai-chat.css b/css/chat_style-cai-chat.css index 547082b5..ba0c8f13 100644 --- a/css/chat_style-cai-chat.css +++ b/css/chat_style-cai-chat.css @@ -1,7 +1,7 @@ .message { display: grid; grid-template-columns: 60px minmax(0, 1fr); - padding-bottom: 25px; + padding-bottom: 15px; font-size: 15px; font-family: 'Noto Sans', Helvetica, Arial, sans-serif; line-height: 22.5px !important; @@ -52,10 +52,6 @@ margin-bottom: 10px !important; } -.message-body p:last-child, .chat .message-body ul:last-child, .chat .message-body ol:last-child { - margin-bottom: 0 !important; -} - .dark .message-body p em { color: rgb(138 138 138) !important; } diff --git a/css/main.css b/css/main.css index bb944cc9..653da3ee 100644 --- a/css/main.css +++ b/css/main.css @@ -492,6 +492,10 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* { left: calc(100% - 168px); } +#show-controls span { + opacity: 0.6; +} + #typing-container { display: none; position: absolute; diff --git a/modules/models.py b/modules/models.py index 7f338712..efdae7cb 100644 --- a/modules/models.py +++ b/modules/models.py @@ -126,7 +126,6 @@ def load_tokenizer(model_name, model): def huggingface_loader(model_name): - path_to_model = Path(f'{shared.args.model_dir}/{model_name}') params = { 'low_cpu_mem_usage': True, @@ -170,10 +169,8 @@ def huggingface_loader(model_name): # Load with quantization and/or offloading else: - if not any((shared.args.cpu, torch.cuda.is_available(), is_xpu_available(), torch.backends.mps.is_available())): logger.warning('torch.cuda.is_available() and is_xpu_available() returned False. This means that no GPU has been detected. Falling back to CPU mode.') - shared.args.cpu = True if shared.args.cpu: diff --git a/modules/ui_chat.py b/modules/ui_chat.py index db1f5da1..76c7d5ff 100644 --- a/modules/ui_chat.py +++ b/modules/ui_chat.py @@ -284,7 +284,6 @@ def create_event_handlers(): shared.gradio['mode'].change( lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then( ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then( - partial(chat.character_is_loaded, raise_exception=True), gradio('interface_state'), None).success( chat.load_latest_history, gradio('interface_state'), gradio('history')).then( chat.redraw_html, gradio(reload_arr), gradio('display')).then( lambda x: gr.update(choices=(histories := chat.find_all_histories(x)), value=histories[0]), gradio('interface_state'), gradio('unique_id'))