Don't download a model during installation

And some other updates/minor improvements
This commit is contained in:
oobabooga 2023-06-01 01:20:56 -03:00
parent 2e53caa806
commit 290a3374e4
5 changed files with 42 additions and 45 deletions

View File

@ -8,24 +8,21 @@ everything for you.
To launch the web UI in the future after it is already installed, run To launch the web UI in the future after it is already installed, run
this same "start" script. this same "start" script.
# Updating # Updating the web UI
Run the "update" script. This will only install the updates, so it should Run the "update" script. This will only install the updates, so it should
be much faster than the initial installation. be much faster than the initial installation.
May need to delete the 'text-generation-webui\repositories\GPTQ-for-LLaMa'
folder if GPTQ-for-LLaMa needs to be updated.
# Adding flags like --chat, --notebook, etc # Adding flags like --chat, --notebook, etc
Edit the "webui.py" script using a text editor and add the desired flags Edit the "webui.py" script using a text editor and add the desired flags
to the CMD_FLAGS variable at the top. It should look like this: to the CMD_FLAGS variable at the top. It should look like this:
CMD_FLAGS = '--chat --model-menu' CMD_FLAGS = '--chat'
For instance, to add the --notebook flag, change it to For instance, to add the --api flag, change it to
CMD_FLAGS = '--notebook --model-menu' CMD_FLAGS = '--chat --api'
# Running an interactive shell # Running an interactive shell

View File

@ -62,6 +62,3 @@ conda activate "$INSTALL_ENV_DIR"
# setup installer env # setup installer env
python webui.py python webui.py
echo
echo "Done!"

View File

@ -62,6 +62,3 @@ conda activate "$INSTALL_ENV_DIR"
# setup installer env # setup installer env
python webui.py python webui.py
echo
echo "Done!"

View File

@ -59,8 +59,5 @@ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || (
@rem setup installer env @rem setup installer env
call python webui.py call python webui.py
echo.
echo Done!
:end :end
pause pause

View File

@ -11,15 +11,26 @@ conda_env_path = os.path.join(script_dir, "installer_files", "env")
# Use this to set your command-line flags. For the full list, see: # Use this to set your command-line flags. For the full list, see:
# https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui # https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui
CMD_FLAGS = '--chat --model-menu' CMD_FLAGS = '--chat'
# Allows users to set flags in "OOBABOOGA_FLAGS" environment variable # Allows users to set flags in "OOBABOOGA_FLAGS" environment variable
if "OOBABOOGA_FLAGS" in os.environ: if "OOBABOOGA_FLAGS" in os.environ:
CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"] CMD_FLAGS = os.environ["OOBABOOGA_FLAGS"]
print("\33[1;32mFlags have been taken from enivroment Variable 'OOBABOOGA_FLAGS'\33[0m") print("The following flags have been taken from the environment variable 'OOBABOOGA_FLAGS':")
print(CMD_FLAGS) print(CMD_FLAGS)
print("\33[1;32mTo use flags from webui.py remove 'OOBABOOGA_FLAGS'\33[0m") print("To use the CMD_FLAGS Inside webui.py, unset 'OOBABOOGA_FLAGS'.\n")
def print_big_message(message):
message = message.strip()
lines = message.split('\n')
print("\n\n*******************************************************************")
for line in lines:
if line.strip() != '':
print("*", line)
print("*******************************************************************\n\n")
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None): def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
@ -39,6 +50,7 @@ def run_cmd(cmd, assert_success=False, environment=False, capture_output=False,
if assert_success and result.returncode != 0: if assert_success and result.returncode != 0:
print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...") print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...")
sys.exit() sys.exit()
return result return result
@ -113,12 +125,14 @@ def update_dependencies():
old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl" old_bnb = "bitsandbytes==0.38.1" if not sys.platform.startswith("win") else "https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.38.1-py3-none-any.whl"
if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')): if compute_array.returncode == 0 and not any(int(compute) >= min_compute for compute in compute_array.stdout.decode('utf-8').split(',')):
old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0 old_bnb_install = run_cmd(f"python -m pip install {old_bnb} --force-reinstall --no-deps", environment=True).returncode == 0
print("\n\nWARNING: GPU with compute < 7.0 detected!") message = "\n\nWARNING: GPU with compute < 7.0 detected!\n"
if old_bnb_install: if old_bnb_install:
print("Older version of bitsandbytes has been installed to maintain compatibility.") message += "Older version of bitsandbytes has been installed to maintain compatibility.\n"
print("You will be unable to use --load-in-4bit!\n\n") message += "You will be unable to use --load-in-4bit!\n"
else: else:
print("You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n\n") message += "You will be unable to use --load-in-8bit until you install bitsandbytes 0.38.1!\n"
print_big_message(message)
# The following dependencies are for CUDA, not CPU # The following dependencies are for CUDA, not CPU
# Check if the package cpuonly exists to determine if torch uses CUDA or not # Check if the package cpuonly exists to determine if torch uses CUDA or not
@ -174,11 +188,7 @@ def update_dependencies():
if not glob.glob(quant_cuda_path_regex): if not glob.glob(quant_cuda_path_regex):
# Attempt installation via alternative, Windows/Linux-specific method # Attempt installation via alternative, Windows/Linux-specific method
if sys.platform.startswith("win") or sys.platform.startswith("linux"): if sys.platform.startswith("win") or sys.platform.startswith("linux"):
print("\n\n*******************************************************************") print_big_message("WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!\nThe installer will proceed to install a pre-compiled wheel.")
print("* WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored!")
print("* The installer will proceed to install a pre-compiled wheel.")
print("*******************************************************************\n\n")
url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl" url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl"
if sys.platform.startswith("linux"): if sys.platform.startswith("linux"):
url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl" url = "https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl"
@ -200,7 +210,7 @@ def download_model():
run_cmd("python download-model.py", environment=True) run_cmd("python download-model.py", environment=True)
def run_model(): def launch_webui():
os.chdir("text-generation-webui") os.chdir("text-generation-webui")
run_cmd(f"python server.py {CMD_FLAGS}", environment=True) run_cmd(f"python server.py {CMD_FLAGS}", environment=True)
@ -223,13 +233,12 @@ if __name__ == "__main__":
# Check if a model has been downloaded yet # Check if a model has been downloaded yet
if len(glob.glob("text-generation-webui/models/*/")) == 0: if len(glob.glob("text-generation-webui/models/*/")) == 0:
download_model() print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.")
os.chdir(script_dir)
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
conda_path_bin = os.path.join(conda_env_path, "bin") conda_path_bin = os.path.join(conda_env_path, "bin")
if not os.path.exists(conda_path_bin): if not os.path.exists(conda_path_bin):
os.mkdir(conda_path_bin) os.mkdir(conda_path_bin)
# Run the model with webui # Launch the webui
run_model() launch_webui()