mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-26 09:40:20 +01:00
Merge remote-tracking branch 'refs/remotes/origin/main'
This commit is contained in:
commit
d93087adc3
@ -249,8 +249,9 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. |
|
| `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. |
|
||||||
| `--n_ctx N_CTX` | Size of the prompt context. |
|
| `--n_ctx N_CTX` | Size of the prompt context. |
|
||||||
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
| `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). |
|
||||||
| `--n_gqa N_GQA` | grouped-query attention. Must be 8 for llama2 70b. |
|
| `--n_gqa N_GQA` | grouped-query attention. Must be 8 for llama-2 70b. |
|
||||||
| `--rms_norm_eps RMS_NORM_EPS` | Must be 1e-5 for llama2 70b. |
|
| `--rms_norm_eps RMS_NORM_EPS` | 5e-6 is a good value for llama-2 models. |
|
||||||
|
| `--cpu` | Use the CPU version of llama-cpp-python instead of the GPU-accelerated version. |
|
||||||
|
|
||||||
#### AutoGPTQ
|
#### AutoGPTQ
|
||||||
|
|
||||||
|
@ -10,13 +10,22 @@ from transformers.modeling_outputs import CausalLMOutputWithPast
|
|||||||
from modules import shared
|
from modules import shared
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
|
|
||||||
|
import llama_cpp
|
||||||
|
|
||||||
if torch.cuda.is_available() and not torch.version.hip:
|
if torch.cuda.is_available() and not torch.version.hip:
|
||||||
try:
|
try:
|
||||||
from llama_cpp_cuda import Llama
|
import llama_cpp_cuda
|
||||||
except:
|
except:
|
||||||
from llama_cpp import Llama
|
llama_cpp_cuda = None
|
||||||
else:
|
else:
|
||||||
from llama_cpp import Llama
|
llama_cpp_cuda = None
|
||||||
|
|
||||||
|
|
||||||
|
def llama_cpp_lib():
|
||||||
|
if shared.args.cpu or llama_cpp_cuda is None:
|
||||||
|
return llama_cpp
|
||||||
|
else:
|
||||||
|
return llama_cpp_cuda
|
||||||
|
|
||||||
|
|
||||||
class LlamacppHF(PreTrainedModel):
|
class LlamacppHF(PreTrainedModel):
|
||||||
@ -111,5 +120,7 @@ class LlamacppHF(PreTrainedModel):
|
|||||||
'logits_all': True,
|
'logits_all': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Llama = llama_cpp_lib().Llama
|
||||||
model = Llama(**params)
|
model = Llama(**params)
|
||||||
|
|
||||||
return LlamacppHF(model)
|
return LlamacppHF(model)
|
||||||
|
@ -7,13 +7,22 @@ from modules import shared
|
|||||||
from modules.callbacks import Iteratorize
|
from modules.callbacks import Iteratorize
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
|
|
||||||
|
import llama_cpp
|
||||||
|
|
||||||
if torch.cuda.is_available() and not torch.version.hip:
|
if torch.cuda.is_available() and not torch.version.hip:
|
||||||
try:
|
try:
|
||||||
from llama_cpp_cuda import Llama, LlamaCache, LogitsProcessorList
|
import llama_cpp_cuda
|
||||||
except:
|
except:
|
||||||
from llama_cpp import Llama, LlamaCache, LogitsProcessorList
|
llama_cpp_cuda = None
|
||||||
else:
|
else:
|
||||||
from llama_cpp import Llama, LlamaCache, LogitsProcessorList
|
llama_cpp_cuda = None
|
||||||
|
|
||||||
|
|
||||||
|
def llama_cpp_lib():
|
||||||
|
if shared.args.cpu or llama_cpp_cuda is None:
|
||||||
|
return llama_cpp
|
||||||
|
else:
|
||||||
|
return llama_cpp_cuda
|
||||||
|
|
||||||
|
|
||||||
def ban_eos_logits_processor(eos_token, input_ids, logits):
|
def ban_eos_logits_processor(eos_token, input_ids, logits):
|
||||||
@ -30,6 +39,10 @@ class LlamaCppModel:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_pretrained(self, path):
|
def from_pretrained(self, path):
|
||||||
|
|
||||||
|
Llama = llama_cpp_lib().Llama
|
||||||
|
LlamaCache = llama_cpp_lib().LlamaCache
|
||||||
|
|
||||||
result = self()
|
result = self()
|
||||||
cache_capacity = 0
|
cache_capacity = 0
|
||||||
if shared.args.cache_capacity is not None:
|
if shared.args.cache_capacity is not None:
|
||||||
@ -74,6 +87,9 @@ class LlamaCppModel:
|
|||||||
return self.model.detokenize(tokens)
|
return self.model.detokenize(tokens)
|
||||||
|
|
||||||
def generate(self, prompt, state, callback=None):
|
def generate(self, prompt, state, callback=None):
|
||||||
|
|
||||||
|
LogitsProcessorList = llama_cpp_lib().LogitsProcessorList
|
||||||
|
|
||||||
prompt = prompt if type(prompt) is str else prompt.decode()
|
prompt = prompt if type(prompt) is str else prompt.decode()
|
||||||
completion_chunks = self.model.create_completion(
|
completion_chunks = self.model.create_completion(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
|
@ -41,6 +41,7 @@ loaders_and_params = {
|
|||||||
'llama_cpp_seed',
|
'llama_cpp_seed',
|
||||||
'compress_pos_emb',
|
'compress_pos_emb',
|
||||||
'alpha_value',
|
'alpha_value',
|
||||||
|
'cpu',
|
||||||
],
|
],
|
||||||
'llamacpp_HF': [
|
'llamacpp_HF': [
|
||||||
'n_ctx',
|
'n_ctx',
|
||||||
@ -55,6 +56,7 @@ loaders_and_params = {
|
|||||||
'llama_cpp_seed',
|
'llama_cpp_seed',
|
||||||
'compress_pos_emb',
|
'compress_pos_emb',
|
||||||
'alpha_value',
|
'alpha_value',
|
||||||
|
'cpu',
|
||||||
'llamacpp_HF_info',
|
'llamacpp_HF_info',
|
||||||
],
|
],
|
||||||
'Transformers': [
|
'Transformers': [
|
||||||
|
@ -132,8 +132,8 @@ parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity.
|
|||||||
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.')
|
||||||
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.')
|
||||||
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)')
|
||||||
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama2 70b.')
|
parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama-2 70b.')
|
||||||
parser.add_argument('--rms_norm_eps', type=float, default=0, help='Must be 1e-5 for llama2 70b.')
|
parser.add_argument('--rms_norm_eps', type=float, default=0, help='5e-6 is a good value for llama-2 models.')
|
||||||
|
|
||||||
# GPTQ
|
# GPTQ
|
||||||
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
parser.add_argument('--wbits', type=int, default=0, help='Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
|
||||||
|
Loading…
Reference in New Issue
Block a user