RWKV support prototype

This commit is contained in:
oobabooga 2023-02-27 23:03:35 -03:00
parent 021bd55886
commit ebc64a408c
3 changed files with 42 additions and 1 deletions

View File

@ -38,8 +38,10 @@ def load_model(model_name):
print(f"Loading {model_name}...") print(f"Loading {model_name}...")
t0 = time.time() t0 = time.time()
shared.is_RWKV = model_name.lower().startswith('rwkv-')
# Default settings # Default settings
if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen): if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
else: else:
@ -75,6 +77,30 @@ def load_model(model_name):
model.module.eval() # Inference model.module.eval() # Inference
print(f"DeepSpeed ZeRO-3 is enabled: {is_deepspeed_zero3_enabled()}") print(f"DeepSpeed ZeRO-3 is enabled: {is_deepspeed_zero3_enabled()}")
# RMKV model (not on HuggingFace)
elif shared.is_RWKV:
import types
np.set_printoptions(precision=4, suppress=True, linewidth=200)
os.environ['RWKV_JIT_ON'] = '1'
os.environ["RWKV_CUDA_ON"] = '0' # '1' : use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
from rwkv.utils import PIPELINE, PIPELINE_ARGS
model = RWKV(model='models/RWKV-4-Pile-169M-20220807-8023.pth', strategy='cuda fp16')
out, state = model.forward([187, 510, 1563, 310, 247], None) # use 20B_tokenizer.json
print(out.detach().cpu().numpy()) # get logits
out, state = model.forward([187, 510], None)
out, state = model.forward([1563], state) # RNN has state (use deepcopy if you want to clone it)
out, state = model.forward([310, 247], state)
print(out.detach().cpu().numpy()) # same result as above
pipeline = PIPELINE(model, "20B_tokenizer.json")
return pipeline, None
# Custom # Custom
else: else:
command = "AutoModelForCausalLM.from_pretrained" command = "AutoModelForCausalLM.from_pretrained"

View File

@ -5,6 +5,7 @@ tokenizer = None
model_name = "" model_name = ""
soft_prompt_tensor = None soft_prompt_tensor = None
soft_prompt = False soft_prompt = False
is_RWKV = False
# Chat variables # Chat variables
history = {'internal': [], 'visible': []} history = {'internal': [], 'visible': []}

View File

@ -6,6 +6,7 @@ import numpy as np
import torch import torch
import transformers import transformers
from tqdm import tqdm from tqdm import tqdm
from rwkv.utils import PIPELINE, PIPELINE_ARGS
import modules.shared as shared import modules.shared as shared
from modules.extensions import apply_extensions from modules.extensions import apply_extensions
@ -80,6 +81,19 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
if not shared.args.cpu: if not shared.args.cpu:
torch.cuda.empty_cache() torch.cuda.empty_cache()
if shared.is_RWKV:
def my_print(s):
print(s, end='', flush=True)
args = PIPELINE_ARGS(temperature = temperature, top_p = top_p,
alpha_frequency = 0.25, # Frequency Penalty (as in GPT-3)
alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
token_ban = [0], # ban the generation of some tokens
token_stop = []) # stop generation whenever you see any token here
reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
print(formatted_outputs(reply, None))
yield formatted_outputs(reply, None)
return formatted_outputs(reply, None)
original_question = question original_question = question
if not (shared.args.chat or shared.args.cai_chat): if not (shared.args.chat or shared.args.cai_chat):
question = apply_extensions(question, "input") question = apply_extensions(question, "input")