mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-23 21:18:00 +01:00
Use str(Path) instead of os.path.abspath(Path)
This commit is contained in:
parent
b9e0712b92
commit
77294b27dd
@ -1,4 +1,3 @@
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
@ -113,7 +112,7 @@ def output_modifier(string):
|
||||
output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
|
||||
prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch'])
|
||||
silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'
|
||||
model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=os.path.abspath(output_file))
|
||||
model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
|
||||
|
||||
autoplay = 'autoplay' if params['autoplay'] else ''
|
||||
string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
|
||||
|
@ -25,10 +25,10 @@ class RWKVModel:
|
||||
tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
|
||||
|
||||
if shared.args.rwkv_strategy is None:
|
||||
model = RWKV(model=os.path.abspath(path), strategy=f'{device} {dtype}')
|
||||
model = RWKV(model=str(path), strategy=f'{device} {dtype}')
|
||||
else:
|
||||
model = RWKV(model=os.path.abspath(path), strategy=shared.args.rwkv_strategy)
|
||||
pipeline = PIPELINE(model, os.path.abspath(tokenizer_path))
|
||||
model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
|
||||
pipeline = PIPELINE(model, str(tokenizer_path))
|
||||
|
||||
result = self()
|
||||
result.pipeline = pipeline
|
||||
@ -61,7 +61,7 @@ class RWKVTokenizer:
|
||||
@classmethod
|
||||
def from_pretrained(self, path):
|
||||
tokenizer_path = path / "20B_tokenizer.json"
|
||||
tokenizer = Tokenizer.from_file(os.path.abspath(tokenizer_path))
|
||||
tokenizer = Tokenizer.from_file(str(tokenizer_path))
|
||||
|
||||
result = self()
|
||||
result.tokenizer = tokenizer
|
||||
|
@ -1,4 +1,3 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@ -7,7 +6,7 @@ import torch
|
||||
|
||||
import modules.shared as shared
|
||||
|
||||
sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
|
||||
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
||||
from llama import load_quant
|
||||
|
||||
|
||||
@ -41,7 +40,7 @@ def load_quantized_LLaMA(model_name):
|
||||
print(f"Could not find {pt_model}, exiting...")
|
||||
exit()
|
||||
|
||||
model = load_quant(path_to_model, os.path.abspath(pt_path), bits)
|
||||
model = load_quant(path_to_model, str(pt_path), bits)
|
||||
|
||||
# Multi-GPU setup
|
||||
if shared.args.gpu_memory:
|
||||
|
Loading…
Reference in New Issue
Block a user