From def97f658c016d4c50fe9d682265841154eb5336 Mon Sep 17 00:00:00 2001 From: HideLord Date: Sun, 12 Mar 2023 02:54:22 +0200 Subject: [PATCH 1/3] Small patch to fix loading of character jsons. Now it correctly reads non-ascii characters on Windows. --- modules/chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/chat.py b/modules/chat.py index f40f8299..4a7fb873 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -332,7 +332,7 @@ def load_character(_character, name1, name2): shared.history['visible'] = [] if _character != 'None': shared.character = _character - data = json.loads(open(Path(f'characters/{_character}.json'), 'r').read()) + data = json.loads(open(Path(f'characters/{_character}.json'), 'r', encoding='utf-8').read()) name2 = data['char_name'] if 'char_persona' in data and data['char_persona'] != '': context += f"{data['char_name']}'s Persona: {data['char_persona']}\n" From 8403152257b3e0c405f88a0cbf08dc640e29e206 Mon Sep 17 00:00:00 2001 From: HideLord Date: Sun, 12 Mar 2023 17:28:15 +0200 Subject: [PATCH 2/3] Fixing compatibility with GPTQ repo commit 2f667f7da051967566a5fb0546f8614bcd3a1ccd. Expects string and breaks on --- modules/quantized_LLaMA.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py index ca4eebf2..9ab7f333 100644 --- a/modules/quantized_LLaMA.py +++ b/modules/quantized_LLaMA.py @@ -41,7 +41,7 @@ def load_quantized_LLaMA(model_name): print(f"Could not find {pt_model}, exiting...") exit() - model = load_quant(path_to_model, pt_path, bits) + model = load_quant(path_to_model, str(pt_path), bits) # Multi-GPU setup if shared.args.gpu_memory: From fda376d9c386aebffe6966ed72cf7202c491bd3f Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 12 Mar 2023 12:41:04 -0300 Subject: [PATCH 3/3] Use os.path.abspath() instead of str() --- modules/quantized_LLaMA.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py index 9ab7f333..5e4a38e8 100644 --- a/modules/quantized_LLaMA.py +++ b/modules/quantized_LLaMA.py @@ -41,7 +41,7 @@ def load_quantized_LLaMA(model_name): print(f"Could not find {pt_model}, exiting...") exit() - model = load_quant(path_to_model, str(pt_path), bits) + model = load_quant(path_to_model, os.path.abspath(pt_path), bits) # Multi-GPU setup if shared.args.gpu_memory: