From e04ecd4bcea7b92642b2a54e29737474b57bd59b Mon Sep 17 00:00:00 2001
From: oobabooga <112222186+oobabooga@users.noreply.github.com>
Date: Sun, 15 Jan 2023 16:43:31 -0300
Subject: [PATCH] Minor improvements
---
html_generator.py | 5 +++++
server.py | 9 ++-------
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/html_generator.py b/html_generator.py
index 755276de..05c63f0e 100644
--- a/html_generator.py
+++ b/html_generator.py
@@ -7,6 +7,11 @@ This is a library for formatting gpt4chan outputs as nice HTML.
import re
from pathlib import Path
+def generate_basic_html(s):
+ s = '\n'.join([f'
{line}
' for line in s.split('\n')])
+ s = f'{s}
'
+ return s
+
def process_post(post, c):
t = post.split('\n')
number = t[0].split(' ')[1]
diff --git a/server.py b/server.py
index 49f89f15..69755d85 100644
--- a/server.py
+++ b/server.py
@@ -119,11 +119,6 @@ def fix_galactica(s):
s = s.replace(r'$$', r'$')
return s
-def generate_html(s):
- s = '\n'.join([f'{line}
' for line in s.split('\n')])
- s = f'{s}
'
- return s
-
def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None):
global model, tokenizer, model_name, loaded_preset, preset
@@ -157,12 +152,12 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
reply = reply.replace(r'<|endoftext|>', '')
if model_name.lower().startswith('galactica'):
reply = fix_galactica(reply)
- return reply, reply, generate_html(reply)
+ return reply, reply, generate_basic_html(reply)
elif model_name.lower().startswith('gpt4chan'):
reply = fix_gpt4chan(reply)
return reply, 'Only applicable for galactica models.', generate_4chan_html(reply)
else:
- return reply, 'Only applicable for galactica models.', generate_html(reply)
+ return reply, 'Only applicable for galactica models.', generate_basic_html(reply)
# Choosing the default model
if args.model is not None: