mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Add RWKV tokenizer
This commit is contained in:
parent
c855b828fe
commit
e91f4bc25a
@ -2,6 +2,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from tokenizers import Tokenizer
|
||||||
|
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
|
|
||||||
@ -43,3 +44,22 @@ class RWKVModel:
|
|||||||
)
|
)
|
||||||
|
|
||||||
return context+self.pipeline.generate(context, token_count=token_count, args=args, callback=callback)
|
return context+self.pipeline.generate(context, token_count=token_count, args=args, callback=callback)
|
||||||
|
|
||||||
|
class RWKVTokenizer:
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_pretrained(self, path):
|
||||||
|
tokenizer_path = path / "20B_tokenizer.json"
|
||||||
|
tokenizer = Tokenizer.from_file(os.path.abspath(tokenizer_path))
|
||||||
|
|
||||||
|
result = self()
|
||||||
|
result.tokenizer = tokenizer
|
||||||
|
return result
|
||||||
|
|
||||||
|
def encode(self, prompt):
|
||||||
|
return self.tokenizer.encode(prompt).ids
|
||||||
|
|
||||||
|
def decode(self, ids):
|
||||||
|
return self.tokenizer.decode(ids)
|
||||||
|
@ -79,11 +79,12 @@ def load_model(model_name):
|
|||||||
|
|
||||||
# RMKV model (not on HuggingFace)
|
# RMKV model (not on HuggingFace)
|
||||||
elif shared.is_RWKV:
|
elif shared.is_RWKV:
|
||||||
from modules.RWKV import RWKVModel
|
from modules.RWKV import RWKVModel, RWKVTokenizer
|
||||||
|
|
||||||
model = RWKVModel.from_pretrained(Path(f'models/{model_name}'), dtype="fp32" if shared.args.cpu else "bf16" if shared.args.bf16 else "fp16", device="cpu" if shared.args.cpu else "cuda")
|
model = RWKVModel.from_pretrained(Path(f'models/{model_name}'), dtype="fp32" if shared.args.cpu else "bf16" if shared.args.bf16 else "fp16", device="cpu" if shared.args.cpu else "cuda")
|
||||||
|
tokenizer = RWKVTokenizer.from_pretrained(Path('models'))
|
||||||
|
|
||||||
return model, None
|
return model, tokenizer
|
||||||
|
|
||||||
# Custom
|
# Custom
|
||||||
else:
|
else:
|
||||||
|
@ -21,12 +21,10 @@ def get_max_prompt_length(tokens):
|
|||||||
return max_length
|
return max_length
|
||||||
|
|
||||||
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
||||||
|
|
||||||
# These models do not have explicit tokenizers for now, so
|
|
||||||
# we return an estimate for the number of tokens
|
|
||||||
if shared.is_RWKV:
|
if shared.is_RWKV:
|
||||||
return np.zeros((1, len(prompt)//4))
|
input_ids = shared.tokenizer.encode(str(prompt))
|
||||||
|
input_ids = np.array(input_ids).reshape(1, len(input_ids))
|
||||||
|
else:
|
||||||
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
||||||
if shared.args.cpu:
|
if shared.args.cpu:
|
||||||
return input_ids
|
return input_ids
|
||||||
|
Loading…
Reference in New Issue
Block a user