diff --git a/README.md b/README.md index 827430a7..2062be4f 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ Optionally, you can use the following command-line flags: --cpu Use the CPU to generate text. --auto-devices Automatically split the model across the available GPU(s) and CPU. --load-in-8bit Load the model with 8-bit precision. ---listen Make the webui reachable from your local network. +--no-listen Make the webui unreachable from your local network. ``` ## Presets diff --git a/html_generator.py b/html_generator.py index e6c9e85c..e679eb3b 100644 --- a/html_generator.py +++ b/html_generator.py @@ -20,7 +20,7 @@ def process_post(post, c): src = f'Anonymous No.{number}\n{src}' return src -def generate_html(f): +def generate_4chan_html(f): css = """ #container { background-color: #eef2ff; diff --git a/server.py b/server.py index 5c994306..a3d25f33 100644 --- a/server.py +++ b/server.py @@ -18,7 +18,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the webui in cha parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.') parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.') parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.') -parser.add_argument('--listen', action='store_true', help='Make the webui reachable from your local network.') +parser.add_argument('--no-listen', action='store_true', help='Make the webui unreachable from your local network.') args = parser.parse_args() loaded_preset = None available_models = sorted(set(map(lambda x : str(x.name).replace('.pt', ''), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*'))))) @@ -63,7 +63,7 @@ def load_model(model_name): model = eval(command) # Loading the tokenizer - if model_name.lower().startswith('gpt4chan') and Path(f"models/gpt-j-6B/").exists(): + if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists(): tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/")) else: tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/")) @@ -79,6 +79,7 @@ def fix_gpt4chan(s): s = re.sub("--- [0-9]*\n\n\n---", "---", s) return s +# Fix the LaTeX equations in GALACTICA def fix_galactica(s): s = s.replace(r'\[', r'$') s = s.replace(r'\]', r'$') @@ -87,6 +88,11 @@ def fix_galactica(s): s = s.replace(r'$$', r'$') return s +def generate_html(s): + s = '\n'.join([f'
{line}
' for line in s.split('\n')]) + s = f'