mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 09:19:23 +01:00
Reorder imports
This commit is contained in:
parent
41ec682834
commit
0bec15ebcd
@ -5,14 +5,15 @@ from pathlib import Path
|
|||||||
import accelerate
|
import accelerate
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from transformers import AutoConfig, AutoModelForCausalLM
|
from transformers import AutoConfig, AutoModelForCausalLM
|
||||||
|
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
|
|
||||||
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
|
||||||
import llama_inference_offload
|
import llama_inference_offload
|
||||||
from quant import make_quant
|
|
||||||
from modelutils import find_layers
|
from modelutils import find_layers
|
||||||
|
from quant import make_quant
|
||||||
|
|
||||||
|
|
||||||
def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exclude_layers=['lm_head'], kernel_switch_threshold=128):
|
def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exclude_layers=['lm_head'], kernel_switch_threshold=128):
|
||||||
config = AutoConfig.from_pretrained(model)
|
config = AutoConfig.from_pretrained(model)
|
||||||
|
Loading…
Reference in New Issue
Block a user