From 678283ea8d0ed4f40aaf7a0e5708ccc6812fc6b4 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 17 Mar 2023 11:49:31 -0300 Subject: [PATCH] Updated Home (markdown) --- Home.md | 1 - Using-LoRAs.md | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) delete mode 100644 Home.md create mode 100644 Using-LoRAs.md diff --git a/Home.md b/Home.md deleted file mode 100644 index 8448d13..0000000 --- a/Home.md +++ /dev/null @@ -1 +0,0 @@ -Welcome to the text-generation-webui wiki! diff --git a/Using-LoRAs.md b/Using-LoRAs.md new file mode 100644 index 0000000..91fdaaf --- /dev/null +++ b/Using-LoRAs.md @@ -0,0 +1,54 @@ +Based on https://github.com/tloen/alpaca-lora/ + +Probably not the best way to do it. WIP. Suggestions are welcome. + +## Instructions +1. Re-install the requirements + +``` +pip install -r requirements.txt +``` + +2. Download the LoRA + +``` +python download-model.py tloen/alpaca-lora-7b +``` + +3. Load llama-7b in 8-bit mode (it only seems to work in 8-bit mode, don't ask me why) + +``` +python server.py --model llama-7b --load-in-8bit +``` + +4. Select the LoRA in the Parameters tab. + +## Prompt +For this particular LoRA, apparently the prompt must be formatted like this: + +``` +Below is an instruction that describes a task. Write a response that appropriately completes the request. +### Instruction: +Write a Python script that generates text using the transformers library. +### Response: +``` + +Sample output: + +``` +Below is an instruction that describes a task. Write a response that appropriately completes the request. +### Instruction: +Write a Python script that generates text using the transformers library. +### Response: + +import transformers +from transformers import AutoTokenizer, AutoModelForCausalLM +tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +model = AutoModelForCausalLM.from_pretrained("bert-base-uncased") +texts = ["Hello world", "How are you"] +for sentence in texts: +sentence = tokenizer(sentence) +print(f"Generated {len(sentence)} tokens from '{sentence}'") +output = model(sentences=sentence).predict() +print(f"Predicted {len(output)} tokens for '{sentence}':\n{output}") +``` \ No newline at end of file