diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index 4a02abaa..bc660483 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -1,4 +1,3 @@
-import os
import time
from pathlib import Path
@@ -113,7 +112,7 @@ def output_modifier(string):
output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
prosody = ''.format(params['voice_speed'], params['voice_pitch'])
silero_input = f'{prosody}{xmlesc(string)}'
- model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=os.path.abspath(output_file))
+ model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
autoplay = 'autoplay' if params['autoplay'] else ''
string = f''
diff --git a/modules/RWKV.py b/modules/RWKV.py
index d97c1706..5cf8937a 100644
--- a/modules/RWKV.py
+++ b/modules/RWKV.py
@@ -25,10 +25,10 @@ class RWKVModel:
tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
if shared.args.rwkv_strategy is None:
- model = RWKV(model=os.path.abspath(path), strategy=f'{device} {dtype}')
+ model = RWKV(model=str(path), strategy=f'{device} {dtype}')
else:
- model = RWKV(model=os.path.abspath(path), strategy=shared.args.rwkv_strategy)
- pipeline = PIPELINE(model, os.path.abspath(tokenizer_path))
+ model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
+ pipeline = PIPELINE(model, str(tokenizer_path))
result = self()
result.pipeline = pipeline
@@ -61,7 +61,7 @@ class RWKVTokenizer:
@classmethod
def from_pretrained(self, path):
tokenizer_path = path / "20B_tokenizer.json"
- tokenizer = Tokenizer.from_file(os.path.abspath(tokenizer_path))
+ tokenizer = Tokenizer.from_file(str(tokenizer_path))
result = self()
result.tokenizer = tokenizer
diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py
index 5e4a38e8..fa7f15c2 100644
--- a/modules/quantized_LLaMA.py
+++ b/modules/quantized_LLaMA.py
@@ -1,4 +1,3 @@
-import os
import sys
from pathlib import Path
@@ -7,7 +6,7 @@ import torch
import modules.shared as shared
-sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
+sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
from llama import load_quant
@@ -41,7 +40,7 @@ def load_quantized_LLaMA(model_name):
print(f"Could not find {pt_model}, exiting...")
exit()
- model = load_quant(path_to_model, os.path.abspath(pt_path), bits)
+ model = load_quant(path_to_model, str(pt_path), bits)
# Multi-GPU setup
if shared.args.gpu_memory: