From ad6b699503eeabcad141efb6172ff43dc1976522 Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Wed, 8 Mar 2023 22:02:17 +1100
Subject: [PATCH 01/33] Better TTS with autoplay
- Adds "still_streaming" to shared module for extensions to know if generation is complete
- Changed TTS extension with new options:
- Show text under the audio widget
- Automatically play the audio once text generation finishes
- manage the generated wav files (only keep files for finished generations, optional max file limit)
- [wip] ability to change voice pitch and speed
- added 'tensorboard' to requirements, since python sent "tensorboard not found" errors after a fresh installation.
---
extensions/silero_tts/requirements.txt | 1 +
extensions/silero_tts/script.py | 60 +++++++++++++++++++++++---
modules/shared.py | 1 +
modules/text_generation.py | 11 ++++-
requirements.txt | 1 +
5 files changed, 67 insertions(+), 7 deletions(-)
diff --git a/extensions/silero_tts/requirements.txt b/extensions/silero_tts/requirements.txt
index f2f0bff5..b4444306 100644
--- a/extensions/silero_tts/requirements.txt
+++ b/extensions/silero_tts/requirements.txt
@@ -4,3 +4,4 @@ pydub
PyYAML
torch
torchaudio
+simpleaudio
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index f697d0e2..03319dbf 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -4,20 +4,36 @@ from pathlib import Path
import gradio as gr
import torch
+import modules.shared as shared
+import simpleaudio as sa
+
torch._C._jit_set_profiling_mode(False)
params = {
'activate': True,
- 'speaker': 'en_56',
+ 'speaker': 'en_5',
'language': 'en',
'model_id': 'v3_en',
'sample_rate': 48000,
'device': 'cpu',
+ 'max_wavs': 20,
+ 'play_audio': True,
+ 'show_text': True,
}
current_params = params.copy()
voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
wav_idx = 0
+table = str.maketrans({
+ "<": "<",
+ ">": ">",
+ "&": "&",
+ "'": "'",
+ '"': """,
+})
+def xmlesc(txt):
+ return txt.translate(table)
+
def load_model():
model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
model.to(params['device'])
@@ -58,20 +74,45 @@ def output_modifier(string):
if params['activate'] == False:
return string
+ orig_string = string
string = remove_surrounded_chars(string)
string = string.replace('"', '')
string = string.replace('“', '')
string = string.replace('\n', ' ')
string = string.strip()
+ auto_playable=True
if string == '':
- string = 'empty reply, try regenerating'
+ string = 'empty reply, try regenerating'
+ auto_playable=False
+
+ #x-slow, slow, medium, fast, x-fast
+ #x-low, low, medium, high, x-high
+ #prosody=''
+ prosody=''
+ string =''+prosody+xmlesc(string)+''
+
output_file = Path(f'extensions/silero_tts/outputs/{wav_idx:06d}.wav')
- audio = model.save_wav(text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
-
+ audio = model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
string = f''
- wav_idx += 1
+
+ #reset if too many wavs. set max to -1 for unlimited.
+ if wav_idx < params['max_wavs'] and params['max_wavs'] > 0:
+ #only increment if starting a new stream, else replace during streaming. Does not update duration on webui sometimes?
+ if not shared.still_streaming:
+ wav_idx += 1
+ else:
+ wav_idx = 0
+
+ if params['show_text']:
+ string+='\n\n'+orig_string
+
+ #if params['play_audio'] == True and auto_playable and shared.stop_everything:
+ if params['play_audio'] == True and auto_playable and not shared.still_streaming:
+ stop_autoplay()
+ wave_obj = sa.WaveObject.from_wave_file(output_file.as_posix())
+ wave_obj.play()
return string
@@ -84,11 +125,20 @@ def bot_prefix_modifier(string):
return string
+def stop_autoplay():
+ sa.stop_all()
+
def ui():
# Gradio elements
activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
+ show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
+ play_audio = gr.Checkbox(value=params['play_audio'], label='Play TTS automatically')
+ stop_audio = gr.Button("Stop Auto-Play")
voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
# Event functions to update the parameters in the backend
activate.change(lambda x: params.update({"activate": x}), activate, None)
+ play_audio.change(lambda x: params.update({"play_audio": x}), play_audio, None)
+ show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
+ stop_audio.click(stop_autoplay)
voice.change(lambda x: params.update({"speaker": x}), voice, None)
diff --git a/modules/shared.py b/modules/shared.py
index e9dfdaa2..90adb320 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -12,6 +12,7 @@ is_LLaMA = False
history = {'internal': [], 'visible': []}
character = 'None'
stop_everything = False
+still_streaming = False
# UI elements (buttons, sliders, HTML, etc)
gradio = {}
diff --git a/modules/text_generation.py b/modules/text_generation.py
index f9082a31..c9f4fc6a 100644
--- a/modules/text_generation.py
+++ b/modules/text_generation.py
@@ -182,6 +182,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
# Generate the reply 8 tokens at a time
else:
yield formatted_outputs(original_question, shared.model_name)
+ shared.still_streaming = True
for i in tqdm(range(max_new_tokens//8+1)):
with torch.no_grad():
output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
@@ -191,8 +192,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
reply = decode(output)
if not (shared.args.chat or shared.args.cai_chat):
reply = original_question + apply_extensions(reply[len(question):], "output")
- yield formatted_outputs(reply, shared.model_name)
-
+
if not shared.args.flexgen:
if output[-1] == n:
break
@@ -201,6 +201,13 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
break
input_ids = np.reshape(output, (1, output.shape[0]))
+
+ #Mid-stream yield, ran if no breaks
+ yield formatted_outputs(reply, shared.model_name)
if shared.soft_prompt:
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
+
+ #Stream finished from max tokens or break. Do final yield.
+ shared.still_streaming = False
+ yield formatted_outputs(reply, shared.model_name)
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 55aeb8fd..48ca1e4e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,3 +6,4 @@ numpy
rwkv==0.0.6
safetensors==0.2.8
git+https://github.com/huggingface/transformers
+tensorboard
From 738be6dd59a6f9c2ee215093675f2d55111d89ca Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Wed, 8 Mar 2023 22:25:55 +1100
Subject: [PATCH 02/33] Fix merge errors and unlimited wav bug
---
extensions/silero_tts/script.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index 53bd554c..eaf56159 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -93,11 +93,11 @@ def output_modifier(string):
string =''+prosody+xmlesc(string)+''
output_file = Path(f'extensions/silero_tts/outputs/{wav_idx:06d}.wav')
- model.save_wav(text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
+ model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
string = f''
#reset if too many wavs. set max to -1 for unlimited.
- if wav_idx < params['max_wavs'] and params['max_wavs'] > 0:
+ if wav_idx < params['max_wavs'] or params['max_wavs'] < 0:
#only increment if starting a new stream, else replace during streaming. Does not update duration on webui sometimes?
if not shared.still_streaming:
wav_idx += 1
From a2b5383398adc6da5c46811179bfadaefa5e23f7 Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Thu, 9 Mar 2023 10:48:44 +1100
Subject: [PATCH 03/33] Merge in audio generation only on text stream finish.,
postpone audioblock autoplay
- Keeping simpleaudio until audio block "autoplay" doesn't play previous messages
- Only generate audio for finished messages
- Better name for autoplay, clean up comments
- set default to unlimited wav files. Still a few bugs when wav id resets
Co-Authored-By: Christoph Hess <9931495+ChristophHess@users.noreply.github.com>
---
extensions/silero_tts/script.py | 34 +++++++++++++++++++--------------
1 file changed, 20 insertions(+), 14 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index eaf56159..334b02b9 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -15,14 +15,15 @@ params = {
'model_id': 'v3_en',
'sample_rate': 48000,
'device': 'cpu',
- 'max_wavs': 20,
- 'play_audio': True,
+ 'max_wavs': -1,
+ 'autoplay': True,
'show_text': True,
}
current_params = params.copy()
voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
wav_idx = 0
+#Used for making text xml compatible, needed for voice pitch and speed control
table = str.maketrans({
"<": "<",
">": ">",
@@ -88,27 +89,32 @@ def output_modifier(string):
#x-slow, slow, medium, fast, x-fast
#x-low, low, medium, high, x-high
- #prosody=''
- prosody=''
+ prosody=''
string =''+prosody+xmlesc(string)+''
output_file = Path(f'extensions/silero_tts/outputs/{wav_idx:06d}.wav')
- model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
- string = f''
+ autoplay_str = ''
+ if not shared.still_streaming:
+ model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
+ #diabled until autoplay doesn't run on previous messages
+ #autoplay = 'autoplay' if (params['autoplay'] and auto_playable) else ''
+ string = f'\n\n'
+ else:
+ #placeholder so text doesnt shift around so much
+ string =f'\n\n'
#reset if too many wavs. set max to -1 for unlimited.
if wav_idx < params['max_wavs'] or params['max_wavs'] < 0:
- #only increment if starting a new stream, else replace during streaming. Does not update duration on webui sometimes?
+ #only increment if starting a new stream, else replace during streaming.
if not shared.still_streaming:
wav_idx += 1
else:
wav_idx = 0
-
+
if params['show_text']:
- string+='\n\n'+orig_string
-
- #if params['play_audio'] == True and auto_playable and shared.stop_everything:
- if params['play_audio'] == True and auto_playable and not shared.still_streaming:
+ string+=orig_string
+
+ if params['autoplay'] == True and auto_playable and not shared.still_streaming:
stop_autoplay()
wave_obj = sa.WaveObject.from_wave_file(output_file.as_posix())
wave_obj.play()
@@ -131,13 +137,13 @@ def ui():
# Gradio elements
activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
- play_audio = gr.Checkbox(value=params['play_audio'], label='Play TTS automatically')
+ autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
stop_audio = gr.Button("Stop Auto-Play")
voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
# Event functions to update the parameters in the backend
activate.change(lambda x: params.update({"activate": x}), activate, None)
- play_audio.change(lambda x: params.update({"play_audio": x}), play_audio, None)
+ autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
stop_audio.click(stop_autoplay)
voice.change(lambda x: params.update({"speaker": x}), voice, None)
From 0dfac4b777009d415d848c2f0bc718ec1bbac7e5 Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Sat, 11 Mar 2023 16:34:59 +1100
Subject: [PATCH 04/33] Working html autoplay, clean up, improve wav naming
- New autoplay using html tag, removed from old message when new input provided
- Add voice pitch and speed control
- Group settings together
- Use name + conversation history to match wavs to messages, minimize problems when changing characters
Current minor bugs:
- Gradio seems to cache the audio files, so using "clear history" and generating new messages will play the old audio (the new messages are saving correctly). Gradio will clear cache and use correct audio after a few messages or after a page refresh.
- Switching characters does not immediately update the message ID used for the audio. ID is updated after the first new message, but that message will use the wrong ID
---
extensions/silero_tts/requirements.txt | 1 -
extensions/silero_tts/script.py | 79 +++++++++++++-------------
2 files changed, 38 insertions(+), 42 deletions(-)
diff --git a/extensions/silero_tts/requirements.txt b/extensions/silero_tts/requirements.txt
index b4444306..f2f0bff5 100644
--- a/extensions/silero_tts/requirements.txt
+++ b/extensions/silero_tts/requirements.txt
@@ -4,4 +4,3 @@ pydub
PyYAML
torch
torchaudio
-simpleaudio
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index 334b02b9..b66963e2 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -4,7 +4,6 @@ import gradio as gr
import torch
import modules.shared as shared
-import simpleaudio as sa
torch._C._jit_set_profiling_mode(False)
@@ -15,13 +14,16 @@ params = {
'model_id': 'v3_en',
'sample_rate': 48000,
'device': 'cpu',
- 'max_wavs': -1,
- 'autoplay': True,
'show_text': True,
+ 'autoplay': True,
+ 'voice_pitch': 'medium',
+ 'voice_speed': 'medium',
}
current_params = params.copy()
voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
-wav_idx = 0
+voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
+voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
+last_msg_id = 0
#Used for making text xml compatible, needed for voice pitch and speed control
table = str.maketrans({
@@ -55,6 +57,14 @@ def input_modifier(string):
This function is applied to your text inputs before
they are fed into the model.
"""
+ #remove autoplay from previous
+ if len(shared.history['internal'])>0:
+ [text, reply] = shared.history['internal'][-1]
+ [visible_text, visible_reply] = shared.history['visible'][-1]
+ rep_clean = reply.replace('controls autoplay>','controls>')
+ vis_rep_clean = visible_reply.replace('controls autoplay>','controls>')
+ shared.history['internal'][-1] = [text, rep_clean]
+ shared.history['visible'][-1] = [visible_text, vis_rep_clean]
return string
@@ -63,7 +73,7 @@ def output_modifier(string):
This function is applied to the model outputs.
"""
- global wav_idx, model, current_params
+ global model, current_params
for i in params:
if params[i] != current_params[i]:
@@ -81,44 +91,31 @@ def output_modifier(string):
string = string.replace('\n', ' ')
string = string.strip()
- auto_playable=True
+ silent_string = False #Used to prevent unnecessary audio file generation
if string == '':
string = 'empty reply, try regenerating'
- auto_playable=False
-
+ silent_string = True
#x-slow, slow, medium, fast, x-fast
#x-low, low, medium, high, x-high
- prosody=''
+ pitch = params['voice_pitch']
+ speed = params['voice_speed']
+ prosody=f''
string =''+prosody+xmlesc(string)+''
-
- output_file = Path(f'extensions/silero_tts/outputs/{wav_idx:06d}.wav')
- autoplay_str = ''
- if not shared.still_streaming:
+
+ current_msg_id=len(shared.history['visible'])#check length here, since output_modifier can run many times on the same message
+ output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{current_msg_id:06d}.wav')
+ if not shared.still_streaming and not silent_string:
model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
- #diabled until autoplay doesn't run on previous messages
- #autoplay = 'autoplay' if (params['autoplay'] and auto_playable) else ''
- string = f'\n\n'
+ string = f'\n\n'
else:
- #placeholder so text doesnt shift around so much
- string =f'\n\n'
-
- #reset if too many wavs. set max to -1 for unlimited.
- if wav_idx < params['max_wavs'] or params['max_wavs'] < 0:
- #only increment if starting a new stream, else replace during streaming.
- if not shared.still_streaming:
- wav_idx += 1
- else:
- wav_idx = 0
+ #placeholder so text doesn't shift around so much
+ string ='\n\n'
if params['show_text']:
+ #string+=f'*[{current_msg_id}]:*'+orig_string #Debug, looks like there is a delay in "current_msg_id" being updated when switching characters (updates after new message sent). Can't find the source. "shared.character" is updating properly.
string+=orig_string
- if params['autoplay'] == True and auto_playable and not shared.still_streaming:
- stop_autoplay()
- wave_obj = sa.WaveObject.from_wave_file(output_file.as_posix())
- wave_obj.play()
-
return string
def bot_prefix_modifier(string):
@@ -130,20 +127,20 @@ def bot_prefix_modifier(string):
return string
-def stop_autoplay():
- sa.stop_all()
-
def ui():
# Gradio elements
- activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
- show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
- autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
- stop_audio = gr.Button("Stop Auto-Play")
- voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
+ with gr.Accordion("Silero TTS"):
+ activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
+ show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
+ autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
+ voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
+ v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')
+ v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')
# Event functions to update the parameters in the backend
activate.change(lambda x: params.update({"activate": x}), activate, None)
- autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
- stop_audio.click(stop_autoplay)
+ autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
voice.change(lambda x: params.update({"speaker": x}), voice, None)
+ v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None)
+ v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None)
From b8f7d34c1df5b12e60491e4c8a6494d5e6aec20e Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Sat, 11 Mar 2023 17:05:09 +1100
Subject: [PATCH 05/33] Undo changes to requirements
needing to manually install tensorboard might be a windows-only problem. Can be easily solved manually.
---
requirements.txt | 1 -
1 file changed, 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index a8a6eada..47c56a45 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,6 +5,5 @@ gradio==3.18.0
numpy
rwkv==0.1.0
safetensors==0.2.8
-tensorboard
sentencepiece
git+https://github.com/oobabooga/transformers@llama_push
From 8f8da6707d7e71c2eef01c2d33ca6623cebf080c Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Sat, 11 Mar 2023 11:17:13 -0300
Subject: [PATCH 06/33] Minor style changes to silero_tts
---
extensions/silero_tts/script.py | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index b66963e2..7e63d8b7 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -14,18 +14,19 @@ params = {
'model_id': 'v3_en',
'sample_rate': 48000,
'device': 'cpu',
- 'show_text': True,
+ 'show_text': False,
'autoplay': True,
'voice_pitch': 'medium',
'voice_speed': 'medium',
}
+
current_params = params.copy()
voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
last_msg_id = 0
-#Used for making text xml compatible, needed for voice pitch and speed control
+# Used for making text xml compatible, needed for voice pitch and speed control
table = str.maketrans({
"<": "<",
">": ">",
@@ -33,6 +34,7 @@ table = str.maketrans({
"'": "'",
'"': """,
})
+
def xmlesc(txt):
return txt.translate(table)
@@ -57,7 +59,8 @@ def input_modifier(string):
This function is applied to your text inputs before
they are fed into the model.
"""
- #remove autoplay from previous
+
+ # Remove autoplay from previous
if len(shared.history['internal'])>0:
[text, reply] = shared.history['internal'][-1]
[visible_text, visible_reply] = shared.history['visible'][-1]
@@ -91,30 +94,30 @@ def output_modifier(string):
string = string.replace('\n', ' ')
string = string.strip()
- silent_string = False #Used to prevent unnecessary audio file generation
+ silent_string = False # Used to prevent unnecessary audio file generation
if string == '':
- string = 'empty reply, try regenerating'
- silent_string = True
+ string = 'empty reply, try regenerating'
+ silent_string = True
- #x-slow, slow, medium, fast, x-fast
- #x-low, low, medium, high, x-high
+ # x-slow, slow, medium, fast, x-fast
+ # x-low, low, medium, high, x-high
pitch = params['voice_pitch']
speed = params['voice_speed']
prosody=f''
- string =''+prosody+xmlesc(string)+''
+ string = ''+prosody+xmlesc(string)+''
- current_msg_id=len(shared.history['visible'])#check length here, since output_modifier can run many times on the same message
+ current_msg_id = len(shared.history['visible']) # Check length here, since output_modifier can run many times on the same message
output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{current_msg_id:06d}.wav')
if not shared.still_streaming and not silent_string:
model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
string = f'\n\n'
else:
- #placeholder so text doesn't shift around so much
- string ='\n\n'
+ # Placeholder so text doesn't shift around so much
+ string = '\n\n'
if params['show_text']:
- #string+=f'*[{current_msg_id}]:*'+orig_string #Debug, looks like there is a delay in "current_msg_id" being updated when switching characters (updates after new message sent). Can't find the source. "shared.character" is updating properly.
- string+=orig_string
+ #string += f'*[{current_msg_id}]:*'+orig_string #Debug, looks like there is a delay in "current_msg_id" being updated when switching characters (updates after new message sent). Can't find the source. "shared.character" is updating properly.
+ string += orig_string
return string
From d4afed4e44a748c22d9fa97edb3f818ae8af191f Mon Sep 17 00:00:00 2001
From: Xan <70198941+xanthousm@users.noreply.github.com>
Date: Sun, 12 Mar 2023 17:56:57 +1100
Subject: [PATCH 07/33] Fixes and polish
- Change wav naming to be completely unique using timestamp instead of message ID, stops browser using cached audio when new audio is made with the same file name (eg after regenerate or clear history).
- Make the autoplay setting actually disable autoplay.
- Make Settings panel a bit more compact.
- Hide html errors when audio file of chat history is missing.
- Add button to permanently convert TTS history to normal text messages
- Changed the "show message text" toggle to affect the chat history.
---
extensions/silero_tts/script.py | 89 ++++++++++++++++++++++++++-------
1 file changed, 72 insertions(+), 17 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index 7e63d8b7..1a60c901 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -2,8 +2,10 @@ from pathlib import Path
import gradio as gr
import torch
-
+import time
+import re
import modules.shared as shared
+import modules.chat as chat
torch._C._jit_set_profiling_mode(False)
@@ -54,19 +56,57 @@ def remove_surrounded_chars(string):
new_string += char
return new_string
+def remove_tts_from_history():
+ suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
+ for i, entry in enumerate(shared.history['internal']):
+ reply = entry[1]
+ reply = re.sub("(||{{user}})", shared.settings[f'name1{suffix}'], reply)
+ if shared.args.chat:
+ reply = reply.replace('\n', '
')
+ shared.history['visible'][i][1] = reply
+
+ if shared.args.cai_chat:
+ return chat.generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name1{suffix}'], shared.character)
+ else:
+ return shared.history['visible']
+
+def toggle_text_in_history():
+ suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
+ audio_str='\n\n' # The '\n\n' used after
+ if shared.args.chat:
+ audio_str='
'
+
+ if params['show_text']==True:
+ #for i, entry in enumerate(shared.history['internal']):
+ for i, entry in enumerate(shared.history['visible']):
+ vis_reply = entry[1]
+ if vis_reply.startswith('"
+ return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
def input_modifier(string):
"""
@@ -104,11 +78,9 @@ def input_modifier(string):
they are fed into the model.
"""
- # Remove autoplay from previous chat history
- if (shared.args.chat or shared.args.cai_chat)and len(shared.history['internal'])>0:
- [visible_text, visible_reply] = shared.history['visible'][-1]
- vis_rep_clean = visible_reply.replace('controls autoplay>','controls>')
- shared.history['visible'][-1] = [visible_text, vis_rep_clean]
+ # Remove autoplay from the last reply
+ if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
+ shared.history['visible'][-1][1] = shared.history['visible'][-1][1].replace('controls autoplay>','controls>')
return string
@@ -128,34 +100,25 @@ def output_modifier(string):
if params['activate'] == False:
return string
- orig_string = string
+ original_string = string
string = remove_surrounded_chars(string)
string = string.replace('"', '')
string = string.replace('“', '')
string = string.replace('\n', ' ')
string = string.strip()
- silent_string = False # Used to prevent unnecessary audio file generation
if string == '':
- string = 'empty reply, try regenerating'
- silent_string = True
-
- pitch = params['voice_pitch']
- speed = params['voice_speed']
- prosody=f''
- string = ''+prosody+xmlesc(string)+''
-
- if not shared.still_streaming and not silent_string:
- output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
- model.save_wav(ssml_text=string, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
- autoplay_str = ' autoplay' if params['autoplay'] else ''
- string = f'\n\n'
+ string = '*Empty reply, try regenerating*'
else:
- # Placeholder so text doesn't shift around so much
- string = '\n\n'
+ output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
+ prosody = ''.format(params['voice_speed'], params['voice_pitch'])
+ silero_input = f'{prosody}{xmlesc(string)}'
+ model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=os.path.abspath(output_file))
- if params['show_text']:
- string += orig_string
+ autoplay = 'autoplay' if params['autoplay'] else ''
+ string = f''
+ if params['show_text']:
+ string += f'\n\n{original_string}'
return string
@@ -180,21 +143,21 @@ def ui():
v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')
v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')
with gr.Row():
- convert = gr.Button('Permanently replace chat history audio with message text')
- convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
+ convert = gr.Button('Permanently replace audios with the message texts')
convert_cancel = gr.Button('Cancel', visible=False)
+ convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
# Convert history with confirmation
convert_arr = [convert_confirm, convert, convert_cancel]
convert.click(lambda :[gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
convert_confirm.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
- convert_confirm.click(remove_tts_from_history, [], shared.gradio['display'])
+ convert_confirm.click(remove_tts_from_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
convert_confirm.click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
convert_cancel.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
# Toggle message text in history
show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
- show_text.change(toggle_text_in_history, [], shared.gradio['display'])
+ show_text.change(toggle_text_in_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
show_text.change(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
# Event functions to update the parameters in the backend
diff --git a/modules/shared.py b/modules/shared.py
index a06c9774..5f6c01f3 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -11,7 +11,6 @@ is_RWKV = False
history = {'internal': [], 'visible': []}
character = 'None'
stop_everything = False
-still_streaming = False
# UI elements (buttons, sliders, HTML, etc)
gradio = {}
diff --git a/modules/text_generation.py b/modules/text_generation.py
index 7cf68c06..6ee9d931 100644
--- a/modules/text_generation.py
+++ b/modules/text_generation.py
@@ -189,7 +189,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
def generate_with_streaming(**kwargs):
return Iteratorize(generate_with_callback, kwargs, callback=None)
- shared.still_streaming = True
yield formatted_outputs(original_question, shared.model_name)
with eval(f"generate_with_streaming({', '.join(generate_params)})") as generator:
for output in generator:
@@ -204,12 +203,10 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
break
yield formatted_outputs(reply, shared.model_name)
- shared.still_streaming = False
yield formatted_outputs(reply, shared.model_name)
# Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
else:
- shared.still_streaming = True
for i in range(max_new_tokens//8+1):
clear_torch_cache()
with torch.no_grad():
@@ -229,7 +226,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
if shared.soft_prompt:
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
- shared.still_streaming = False
yield formatted_outputs(reply, shared.model_name)
finally:
From b9e0712b92ab81eee50740253798d90ed835a43a Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Sun, 12 Mar 2023 23:58:25 -0300
Subject: [PATCH 21/33] Fix Open Assistant
---
modules/text_generation.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/modules/text_generation.py b/modules/text_generation.py
index 6ee9d931..f5d2b8d0 100644
--- a/modules/text_generation.py
+++ b/modules/text_generation.py
@@ -37,9 +37,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
return input_ids.cuda()
def decode(output_ids):
- reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True)
- reply = reply.replace(r'<|endoftext|>', '')
- return reply
+ # Open Assistant relies on special tokens like <|endoftext|>
+ if re.match('oasst-*', shared.model_name.lower()):
+ return shared.tokenizer.decode(output_ids, skip_special_tokens=False)
+ else:
+ reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True)
+ reply = reply.replace(r'<|endoftext|>', '')
+ return reply
def generate_softprompt_input_tensors(input_ids):
inputs_embeds = shared.model.transformer.wte(input_ids)
From 77294b27ddce0c098a8f51b8cd9bd8c151a506f8 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 00:08:01 -0300
Subject: [PATCH 22/33] Use str(Path) instead of os.path.abspath(Path)
---
extensions/silero_tts/script.py | 3 +--
modules/RWKV.py | 8 ++++----
modules/quantized_LLaMA.py | 5 ++---
3 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index 4a02abaa..bc660483 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -1,4 +1,3 @@
-import os
import time
from pathlib import Path
@@ -113,7 +112,7 @@ def output_modifier(string):
output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
prosody = ''.format(params['voice_speed'], params['voice_pitch'])
silero_input = f'{prosody}{xmlesc(string)}'
- model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=os.path.abspath(output_file))
+ model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
autoplay = 'autoplay' if params['autoplay'] else ''
string = f''
diff --git a/modules/RWKV.py b/modules/RWKV.py
index d97c1706..5cf8937a 100644
--- a/modules/RWKV.py
+++ b/modules/RWKV.py
@@ -25,10 +25,10 @@ class RWKVModel:
tokenizer_path = Path(f"{path.parent}/20B_tokenizer.json")
if shared.args.rwkv_strategy is None:
- model = RWKV(model=os.path.abspath(path), strategy=f'{device} {dtype}')
+ model = RWKV(model=str(path), strategy=f'{device} {dtype}')
else:
- model = RWKV(model=os.path.abspath(path), strategy=shared.args.rwkv_strategy)
- pipeline = PIPELINE(model, os.path.abspath(tokenizer_path))
+ model = RWKV(model=str(path), strategy=shared.args.rwkv_strategy)
+ pipeline = PIPELINE(model, str(tokenizer_path))
result = self()
result.pipeline = pipeline
@@ -61,7 +61,7 @@ class RWKVTokenizer:
@classmethod
def from_pretrained(self, path):
tokenizer_path = path / "20B_tokenizer.json"
- tokenizer = Tokenizer.from_file(os.path.abspath(tokenizer_path))
+ tokenizer = Tokenizer.from_file(str(tokenizer_path))
result = self()
result.tokenizer = tokenizer
diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py
index 5e4a38e8..fa7f15c2 100644
--- a/modules/quantized_LLaMA.py
+++ b/modules/quantized_LLaMA.py
@@ -1,4 +1,3 @@
-import os
import sys
from pathlib import Path
@@ -7,7 +6,7 @@ import torch
import modules.shared as shared
-sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
+sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
from llama import load_quant
@@ -41,7 +40,7 @@ def load_quantized_LLaMA(model_name):
print(f"Could not find {pt_model}, exiting...")
exit()
- model = load_quant(path_to_model, os.path.abspath(pt_path), bits)
+ model = load_quant(path_to_model, str(pt_path), bits)
# Multi-GPU setup
if shared.args.gpu_memory:
From 0a7acb3bd9217b8d38e35679cb3911aaa07ba864 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 00:12:21 -0300
Subject: [PATCH 23/33] Remove redundant comments
---
modules/chat.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/modules/chat.py b/modules/chat.py
index 47398afc..d78278c4 100644
--- a/modules/chat.py
+++ b/modules/chat.py
@@ -127,7 +127,6 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
if not regenerate:
- # Display user input and "*is typing...*" imediately
yield shared.history['visible']+[[visible_text, '*Is typing...*']]
# Generate
@@ -168,10 +167,8 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ
prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
- # Display "*is typing...*" imediately
- yield '*Is typing...*'
-
reply = ''
+ yield '*Is typing...*'
for i in range(chat_generation_attempts):
for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
reply, next_character_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True)
From 2c4699a7e9a1e611052f6e5635ddb9942b26524a Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 00:20:02 -0300
Subject: [PATCH 24/33] Change a comment
---
modules/quantized_LLaMA.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py
index fa7f15c2..e9352f90 100644
--- a/modules/quantized_LLaMA.py
+++ b/modules/quantized_LLaMA.py
@@ -42,7 +42,7 @@ def load_quantized_LLaMA(model_name):
model = load_quant(path_to_model, str(pt_path), bits)
- # Multi-GPU setup
+ # Multiple GPUs or GPU+CPU
if shared.args.gpu_memory:
max_memory = {}
for i in range(len(shared.args.gpu_memory)):
From 91c2a8e88d4271991f85a61cb8721faba6a34efd Mon Sep 17 00:00:00 2001
From: stefanhamburger <9825318+stefanhamburger@users.noreply.github.com>
Date: Mon, 13 Mar 2023 07:42:09 +0100
Subject: [PATCH 25/33] Fix: tuple object does not support item assignment
---
extensions/silero_tts/script.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index bc660483..1d068229 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -57,7 +57,7 @@ def remove_surrounded_chars(string):
def remove_tts_from_history(name1, name2):
for i, entry in enumerate(shared.history['internal']):
- shared.history['visible'][i][1] = entry[1]
+ shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
def toggle_text_in_history(name1, name2):
@@ -66,9 +66,9 @@ def toggle_text_in_history(name1, name2):
if visible_reply.startswith('')[0]}\n\n{reply}"
+ shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('')[0]}\n\n{reply}"]
else:
- shared.history['visible'][i][1] = f"{visible_reply.split('')[0]}"
+ shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('')[0]}"]
return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
def input_modifier(string):
@@ -79,7 +79,7 @@ def input_modifier(string):
# Remove autoplay from the last reply
if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
- shared.history['visible'][-1][1] = shared.history['visible'][-1][1].replace('controls autoplay>','controls>')
+ shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
return string
From 0c224cf4f4d9c85ecce7aaf00af0e880c46fb7ac Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 10:32:28 -0300
Subject: [PATCH 26/33] Fix GALACTICA (#285)
---
modules/text_generation.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/text_generation.py b/modules/text_generation.py
index f5d2b8d0..d64481b2 100644
--- a/modules/text_generation.py
+++ b/modules/text_generation.py
@@ -123,7 +123,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
original_input_ids = input_ids
output = input_ids[0]
cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
- eos_token_ids = [shared.tokenizer.eos_token_id]
+ eos_token_ids = [shared.tokenizer.eos_token_id] if shared.tokenizer.eos_token_id is not None else []
if eos_token is not None:
eos_token_ids.append(int(encode(eos_token)[0][-1]))
stopping_criteria_list = transformers.StoppingCriteriaList()
From 72757088fa6082676badf987725b27b50628a265 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 10:55:00 -0300
Subject: [PATCH 27/33] Create FUNDING.yml
---
.github/FUNDING.yml | 1 +
1 file changed, 1 insertion(+)
create mode 100644 .github/FUNDING.yml
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000..57b7f698
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1 @@
+ko_fi: oobabooga
From bdff37f0bb174d05a17c02beba11ee3c6fc49453 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 11:05:51 -0300
Subject: [PATCH 28/33] Update README.md
---
README.md | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 89b567f2..ec5063b9 100644
--- a/README.md
+++ b/README.md
@@ -60,8 +60,7 @@ pip3 install torch torchvision torchaudio --extra-index-url https://download.pyt
conda install pytorch torchvision torchaudio git -c pytorch
```
-See also: [Installation instructions for human beings
-](https://github.com/oobabooga/text-generation-webui/wiki/Installation-instructions-for-human-beings)
+See also: [Installation instructions for human beings](https://github.com/oobabooga/text-generation-webui/wiki/Installation-instructions-for-human-beings).
## Installation option 2: one-click installers
From 372363bc3d5383d8351e45ee77323ba686a59769 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 12:07:02 -0300
Subject: [PATCH 29/33] Fix GPTQ load_quant call on Windows
---
modules/quantized_LLaMA.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/quantized_LLaMA.py b/modules/quantized_LLaMA.py
index e9352f90..a5757c68 100644
--- a/modules/quantized_LLaMA.py
+++ b/modules/quantized_LLaMA.py
@@ -40,7 +40,7 @@ def load_quantized_LLaMA(model_name):
print(f"Could not find {pt_model}, exiting...")
exit()
- model = load_quant(path_to_model, str(pt_path), bits)
+ model = load_quant(str(path_to_model), str(pt_path), bits)
# Multiple GPUs or GPU+CPU
if shared.args.gpu_memory:
From d97bfb871331528aa7217f65299a72baa3e64516 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 12:39:33 -0300
Subject: [PATCH 30/33] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index ec5063b9..b6cc6687 100644
--- a/README.md
+++ b/README.md
@@ -157,7 +157,7 @@ Optionally, you can use the following command-line flags:
| `--local_rank LOCAL_RANK` | DeepSpeed: Optional argument for distributed setups. |
| `--rwkv-strategy RWKV_STRATEGY` | RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8". |
| `--rwkv-cuda-on` | RWKV: Compile the CUDA kernel for better performance. |
-| `--no-stream` | Don't stream the text output in real time. This improves the text generation performance.|
+| `--no-stream` | Don't stream the text output in real time. |
| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag.|
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
| `--listen` | Make the web UI reachable from your local network.|
From ddea518e0fb06ba2bd38b6d9672178ad669bda1f Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 12:43:33 -0300
Subject: [PATCH 31/33] Document --auto-launch
---
README.md | 1 +
modules/shared.py | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index b6cc6687..79a66f14 100644
--- a/README.md
+++ b/README.md
@@ -163,6 +163,7 @@ Optionally, you can use the following command-line flags:
| `--listen` | Make the web UI reachable from your local network.|
| `--listen-port LISTEN_PORT` | The listening port that the server will use. |
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
+| '--auto-launch' | 'Open the web UI in the default browser upon launch' |
| `--verbose` | Print the prompts to the terminal. |
Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
diff --git a/modules/shared.py b/modules/shared.py
index 5f6c01f3..66b00f93 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -85,12 +85,12 @@ parser.add_argument('--nvme-offload-dir', type=str, help='DeepSpeed: Directory t
parser.add_argument('--local_rank', type=int, default=0, help='DeepSpeed: Optional argument for distributed setups.')
parser.add_argument('--rwkv-strategy', type=str, default=None, help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
parser.add_argument('--rwkv-cuda-on', action='store_true', help='RWKV: Compile the CUDA kernel for better performance.')
-parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time. This improves the text generation performance.')
+parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
-parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch')
+parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
args = parser.parse_args()
From 66b6971b61c7a783be8d5416baf21e896e3e2164 Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Mon, 13 Mar 2023 12:44:18 -0300
Subject: [PATCH 32/33] Update README
---
README.md | 2 +-
modules/shared.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 79a66f14..dbc8c59c 100644
--- a/README.md
+++ b/README.md
@@ -163,7 +163,7 @@ Optionally, you can use the following command-line flags:
| `--listen` | Make the web UI reachable from your local network.|
| `--listen-port LISTEN_PORT` | The listening port that the server will use. |
| `--share` | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
-| '--auto-launch' | 'Open the web UI in the default browser upon launch' |
+| `--auto-launch` | Open the web UI in the default browser upon launch. |
| `--verbose` | Print the prompts to the terminal. |
Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
diff --git a/modules/shared.py b/modules/shared.py
index 66b00f93..8fcd4745 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -91,6 +91,6 @@ parser.add_argument('--extensions', type=str, nargs="+", help='The list of exten
parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
-parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch')
+parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
args = parser.parse_args()
From 435a69e357926d2ae10cf9285f73b52971d4b572 Mon Sep 17 00:00:00 2001
From: Luis Cosio
Date: Mon, 13 Mar 2023 11:41:35 -0600
Subject: [PATCH 33/33] Fix for issue #282
RuntimeError: Tensors must have same number of dimensions: got 3 and 4
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index b078ecf4..6d0095aa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,4 +8,4 @@ rwkv==0.3.1
safetensors==0.3.0
sentencepiece
tqdm
-git+https://github.com/zphang/transformers@llama_push
+git+https://github.com/zphang/transformers.git@68d640f7c368bcaaaecfc678f11908ebbd3d6176