Move RWKV loader into a separate file

This commit is contained in:
oobabooga 2023-02-27 23:50:16 -03:00
parent ebc64a408c
commit 70e522732c
3 changed files with 29 additions and 24 deletions

26
modules/RWKV.py Normal file
View File

@ -0,0 +1,26 @@
import os, time, types, torch
from pathlib import Path
import numpy as np
np.set_printoptions(precision=4, suppress=True, linewidth=200)
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '0' # '1' : use CUDA kernel for seq mode (much faster)
import repositories.ChatRWKV.v2.rwkv as rwkv
from rwkv.model import RWKV
from rwkv.utils import PIPELINE, PIPELINE_ARGS
def load_RWKV_model(path):
os.system("ls")
model = RWKV(model=path.as_posix(), strategy='cuda fp16')
out, state = model.forward([187, 510, 1563, 310, 247], None) # use 20B_tokenizer.json
print(out.detach().cpu().numpy()) # get logits
out, state = model.forward([187, 510], None)
out, state = model.forward([1563], state) # RNN has state (use deepcopy if you want to clone it)
out, state = model.forward([310, 247], state)
print(out.detach().cpu().numpy()) # same result as above
pipeline = PIPELINE(model, Path("repositories/ChatRWKV/20B_tokenizer.json").as_posix())
return pipeline

View File

@ -79,27 +79,9 @@ def load_model(model_name):
# RMKV model (not on HuggingFace)
elif shared.is_RWKV:
import types
np.set_printoptions(precision=4, suppress=True, linewidth=200)
from modules.RWKV import load_RWKV_model
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '0' # '1' : use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
from rwkv.utils import PIPELINE, PIPELINE_ARGS
model = RWKV(model='models/RWKV-4-Pile-169M-20220807-8023.pth', strategy='cuda fp16')
out, state = model.forward([187, 510, 1563, 310, 247], None) # use 20B_tokenizer.json
print(out.detach().cpu().numpy()) # get logits
out, state = model.forward([187, 510], None)
out, state = model.forward([1563], state) # RNN has state (use deepcopy if you want to clone it)
out, state = model.forward([310, 247], state)
print(out.detach().cpu().numpy()) # same result as above
pipeline = PIPELINE(model, "20B_tokenizer.json")
return pipeline, None
return load_RWKV_model(Path('models/RWKV-4-Pile-169M-20220807-8023.pth')), None
# Custom
else:

View File

@ -82,15 +82,12 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
torch.cuda.empty_cache()
if shared.is_RWKV:
def my_print(s):
print(s, end='', flush=True)
args = PIPELINE_ARGS(temperature = temperature, top_p = top_p,
alpha_frequency = 0.25, # Frequency Penalty (as in GPT-3)
alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
token_ban = [0], # ban the generation of some tokens
token_stop = []) # stop generation whenever you see any token here
reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
print(formatted_outputs(reply, None))
yield formatted_outputs(reply, None)
return formatted_outputs(reply, None)