2023-04-18 07:23:09 +02:00
import argparse
import glob
2023-07-16 06:31:33 +02:00
import re
2023-04-18 07:23:09 +02:00
import os
import site
import subprocess
import sys
script_dir = os . getcwd ( )
2023-05-31 19:41:03 +02:00
conda_env_path = os . path . join ( script_dir , " installer_files " , " env " )
2023-04-18 07:23:09 +02:00
2023-05-10 22:54:12 +02:00
# Use this to set your command-line flags. For the full list, see:
# https://github.com/oobabooga/text-generation-webui/#starting-the-web-ui
2023-07-07 18:32:11 +02:00
# Example: CMD_FLAGS = '--chat --listen'
2023-07-20 23:55:09 +02:00
CMD_FLAGS = ' --chat '
2023-04-18 07:23:09 +02:00
2023-05-10 23:03:12 +02:00
2023-05-31 19:43:22 +02:00
# Allows users to set flags in "OOBABOOGA_FLAGS" environment variable
if " OOBABOOGA_FLAGS " in os . environ :
CMD_FLAGS = os . environ [ " OOBABOOGA_FLAGS " ]
2023-06-01 06:20:56 +02:00
print ( " The following flags have been taken from the environment variable ' OOBABOOGA_FLAGS ' : " )
2023-05-31 19:43:22 +02:00
print ( CMD_FLAGS )
2023-06-01 06:20:56 +02:00
print ( " To use the CMD_FLAGS Inside webui.py, unset ' OOBABOOGA_FLAGS ' . \n " )
2023-07-26 22:33:02 +02:00
# Remove the '# ' from the following lines as needed for your AMD GPU on Linux
# os.environ["ROCM_PATH"] = '/opt/rocm'
# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0'
# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030'
2023-06-01 06:20:56 +02:00
def print_big_message ( message ) :
message = message . strip ( )
lines = message . split ( ' \n ' )
print ( " \n \n ******************************************************************* " )
for line in lines :
if line . strip ( ) != ' ' :
print ( " * " , line )
print ( " ******************************************************************* \n \n " )
2023-05-31 19:43:22 +02:00
2023-05-02 17:28:20 +02:00
def run_cmd ( cmd , assert_success = False , environment = False , capture_output = False , env = None ) :
# Use the conda environment
if environment :
if sys . platform . startswith ( " win " ) :
conda_bat_path = os . path . join ( script_dir , " installer_files " , " conda " , " condabin " , " conda.bat " )
cmd = " \" " + conda_bat_path + " \" activate \" " + conda_env_path + " \" >nul && " + cmd
else :
conda_sh_path = os . path . join ( script_dir , " installer_files " , " conda " , " etc " , " profile.d " , " conda.sh " )
cmd = " . \" " + conda_sh_path + " \" && conda activate \" " + conda_env_path + " \" && " + cmd
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
# Run shell commands
2023-05-02 17:28:20 +02:00
result = subprocess . run ( cmd , shell = True , capture_output = capture_output , env = env )
2023-06-01 06:20:56 +02:00
2023-05-02 17:28:20 +02:00
# Assert the command ran successfully
if assert_success and result . returncode != 0 :
print ( " Command ' " + cmd + " ' failed with exit status code ' " + str ( result . returncode ) + " ' . Exiting... " )
sys . exit ( )
2023-06-01 06:20:56 +02:00
2023-05-02 17:28:20 +02:00
return result
2023-04-18 07:23:09 +02:00
def check_env ( ) :
# If we have access to conda, we are probably in an environment
2023-05-02 17:28:20 +02:00
conda_exist = run_cmd ( " conda " , environment = True , capture_output = True ) . returncode == 0
if not conda_exist :
2023-04-18 07:23:09 +02:00
print ( " Conda is not installed. Exiting... " )
sys . exit ( )
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
# Ensure this is a new environment and not the base environment
if os . environ [ " CONDA_DEFAULT_ENV " ] == " base " :
print ( " Create an environment for this project and activate it. Exiting... " )
sys . exit ( )
def install_dependencies ( ) :
# Select your GPU or, choose to run in CPU mode
print ( " What is your GPU " )
print ( )
print ( " A) NVIDIA " )
2023-07-26 22:33:02 +02:00
print ( " B) AMD (Linux/MacOS only. Requires ROCm SDK 5.4.2/5.4.3 on Linux) " )
2023-04-18 07:23:09 +02:00
print ( " C) Apple M Series " )
print ( " D) None (I want to run in CPU mode) " )
print ( )
gpuchoice = input ( " Input> " ) . lower ( )
2023-05-25 15:57:52 +02:00
if gpuchoice == " d " :
2023-06-01 06:38:48 +02:00
print_big_message ( " Once the installation ends, make sure to open webui.py with a text editor \n and add the --cpu flag to CMD_FLAGS. " )
2023-05-25 15:57:52 +02:00
2023-04-18 07:23:09 +02:00
# Install the version of PyTorch needed
if gpuchoice == " a " :
2023-06-20 21:39:23 +02:00
run_cmd ( ' conda install -y -k cuda ninja git -c nvidia/label/cuda-11.7.0 -c nvidia && python -m pip install torch==2.0.1+cu117 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 ' , assert_success = True , environment = True )
2023-07-26 22:33:02 +02:00
elif gpuchoice == " b " and not sys . platform . startswith ( " darwin " ) :
if sys . platform . startswith ( " linux " ) :
run_cmd ( ' conda install -y -k ninja git && python -m pip install torch==2.0.1+rocm5.4.2 torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.4.2 ' , assert_success = True , environment = True )
else :
print ( " AMD GPUs are only supported on Linux. Exiting... " )
sys . exit ( )
elif ( gpuchoice == " c " or gpuchoice == " b " ) and sys . platform . startswith ( " darwin " ) :
2023-06-20 21:39:23 +02:00
run_cmd ( " conda install -y -k ninja git && python -m pip install torch torchvision torchaudio " , assert_success = True , environment = True )
2023-07-26 22:33:02 +02:00
elif gpuchoice == " d " or gpuchoice == " c " :
2023-07-08 01:40:31 +02:00
if sys . platform . startswith ( " linux " ) :
run_cmd ( " conda install -y -k ninja git && python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu " , assert_success = True , environment = True )
else :
run_cmd ( " conda install -y -k ninja git && python -m pip install torch torchvision torchaudio " , assert_success = True , environment = True )
2023-04-18 07:23:09 +02:00
else :
print ( " Invalid choice. Exiting... " )
sys . exit ( )
# Clone webui to our computer
2023-05-02 17:28:20 +02:00
run_cmd ( " git clone https://github.com/oobabooga/text-generation-webui.git " , assert_success = True , environment = True )
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
# Install the webui dependencies
update_dependencies ( )
def update_dependencies ( ) :
os . chdir ( " text-generation-webui " )
2023-05-02 17:28:20 +02:00
run_cmd ( " git pull " , assert_success = True , environment = True )
2023-04-18 07:23:09 +02:00
2023-07-16 06:31:33 +02:00
# Workaround for git+ packages not updating properly Also store requirements.txt for later use
2023-06-21 01:21:10 +02:00
with open ( " requirements.txt " ) as f :
2023-07-16 06:31:33 +02:00
textgen_requirements = f . read ( )
git_requirements = [ req for req in textgen_requirements . splitlines ( ) if req . startswith ( " git+ " ) ]
2023-06-21 01:23:21 +02:00
2023-06-21 01:21:10 +02:00
# Loop through each "git+" requirement and uninstall it
for req in git_requirements :
# Extract the package name from the "git+" requirement
url = req . replace ( " git+ " , " " )
package_name = url . split ( " / " ) [ - 1 ] . split ( " @ " ) [ 0 ]
2023-06-21 01:23:21 +02:00
2023-06-21 01:21:10 +02:00
# Uninstall the package using pip
2023-06-21 03:48:45 +02:00
run_cmd ( " python -m pip uninstall -y " + package_name , environment = True )
2023-06-21 01:21:10 +02:00
print ( f " Uninstalled { package_name } " )
2023-04-18 07:23:09 +02:00
# Installs/Updates dependencies from all requirements.txt
2023-05-02 17:28:20 +02:00
run_cmd ( " python -m pip install -r requirements.txt --upgrade " , assert_success = True , environment = True )
2023-04-18 07:23:09 +02:00
extensions = next ( os . walk ( " extensions " ) ) [ 1 ]
for extension in extensions :
2023-05-09 19:17:08 +02:00
if extension in [ ' superbooga ' ] : # No wheels available for dependencies
continue
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
extension_req_path = os . path . join ( " extensions " , extension , " requirements.txt " )
if os . path . exists ( extension_req_path ) :
2023-05-02 17:28:20 +02:00
run_cmd ( " python -m pip install -r " + extension_req_path + " --upgrade " , assert_success = True , environment = True )
2023-04-18 07:23:09 +02:00
# The following dependencies are for CUDA, not CPU
2023-06-20 21:39:23 +02:00
# Parse output of 'pip show torch' to determine torch version
torver_cmd = run_cmd ( " python -m pip show torch " , assert_success = True , environment = True , capture_output = True )
torver = [ v . split ( ) [ 1 ] for v in torver_cmd . stdout . decode ( ' utf-8 ' ) . splitlines ( ) if ' Version: ' in v ] [ 0 ]
2023-06-21 01:23:21 +02:00
2023-07-26 22:33:02 +02:00
# Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm check for pytorch-cuda as well for backwards compatibility
if ' +cu ' not in torver and ' +rocm ' not in torver and run_cmd ( " conda list -f pytorch-cuda | grep pytorch-cuda " , environment = True , capture_output = True ) . returncode == 1 :
2023-04-18 07:23:09 +02:00
return
2023-07-20 16:11:00 +02:00
# Get GPU CUDA/compute support
2023-07-26 22:33:02 +02:00
if ' +cu ' in torver :
nvcc_device_query = " __nvcc_device_query " if not sys . platform . startswith ( " win " ) else " __nvcc_device_query.exe "
compute_array = run_cmd ( os . path . join ( conda_env_path , " bin " , nvcc_device_query ) , environment = True , capture_output = True )
else :
compute_array = type ( ' obj ' , ( object , ) , { ' stdout ' : b ' ' , ' returncode ' : 1 } )
2023-04-18 07:23:09 +02:00
# Fix a bitsandbytes compatibility issue with Linux
2023-06-20 21:39:23 +02:00
# if sys.platform.startswith("linux"):
# shutil.copy(os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cuda117.so"), os.path.join(site_packages_path, "bitsandbytes", "libbitsandbytes_cpu.so"))
2023-04-18 07:23:09 +02:00
if not os . path . exists ( " repositories/ " ) :
os . mkdir ( " repositories " )
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
os . chdir ( " repositories " )
2023-06-21 01:23:21 +02:00
2023-06-18 00:10:36 +02:00
# Install or update exllama as needed
if not os . path . exists ( " exllama/ " ) :
run_cmd ( " git clone https://github.com/turboderp/exllama.git " , environment = True )
else :
os . chdir ( " exllama " )
run_cmd ( " git pull " , environment = True )
os . chdir ( " .. " )
2023-07-26 22:33:02 +02:00
# Pre-installed exllama module does not support AMD GPU
if ' +rocm ' in torver :
run_cmd ( " python -m pip uninstall -y exllama " , environment = True )
# Get download URL for latest exllama ROCm wheel
exllama_rocm = run_cmd ( ' curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \' " \' ' , environment = True , capture_output = True ) . stdout . decode ( ' utf-8 ' )
if ' rocm5.4.2-cp310-cp310-linux_x86_64.whl ' in exllama_rocm :
run_cmd ( " python -m pip install " + exllama_rocm , environment = True )
2023-06-21 01:23:21 +02:00
2023-06-20 21:39:23 +02:00
# Fix build issue with exllama in Linux/WSL
if sys . platform . startswith ( " linux " ) and not os . path . exists ( f " { conda_env_path } /lib64 " ) :
run_cmd ( f ' ln -s " { conda_env_path } /lib " " { conda_env_path } /lib64 " ' , environment = True )
2023-06-21 01:23:21 +02:00
2023-07-20 16:11:00 +02:00
# oobabooga fork requires min compute of 6.0
gptq_min_compute = 60
gptq_min_compute_check = any ( int ( compute ) > = gptq_min_compute for compute in compute_array . stdout . decode ( ' utf-8 ' ) . split ( ' , ' ) ) if compute_array . returncode == 0 else False
2023-06-18 00:10:36 +02:00
# Install GPTQ-for-LLaMa which enables 4bit CUDA quantization
2023-04-18 07:23:09 +02:00
if not os . path . exists ( " GPTQ-for-LLaMa/ " ) :
2023-07-20 16:11:00 +02:00
# Install oobabooga fork if min compute met or if failed to check
2023-07-26 22:33:02 +02:00
if ' +rocm ' in torver :
run_cmd ( " git clone https://github.com/WapaMario63/GPTQ-for-LLaMa-ROCm.git GPTQ-for-LLaMa -b rocm " , assert_success = True , environment = True )
elif gptq_min_compute_check or compute_array . returncode != 0 :
2023-07-20 16:11:00 +02:00
run_cmd ( " git clone https://github.com/oobabooga/GPTQ-for-LLaMa.git -b cuda " , assert_success = True , environment = True )
else :
run_cmd ( " git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda " , assert_success = True , environment = True )
2023-06-01 06:20:56 +02:00
2023-04-18 07:23:09 +02:00
# On some Linux distributions, g++ may not exist or be the wrong version to compile GPTQ-for-LLaMa
if sys . platform . startswith ( " linux " ) :
2023-05-31 19:44:36 +02:00
gxx_output = run_cmd ( " g++ -dumpfullversion -dumpversion " , environment = True , capture_output = True )
if gxx_output . returncode != 0 or int ( gxx_output . stdout . strip ( ) . split ( b " . " ) [ 0 ] ) > 11 :
2023-04-18 07:23:09 +02:00
# Install the correct version of g++
2023-07-26 06:55:08 +02:00
run_cmd ( " conda install -y -k gxx_linux-64=11.2.0 -c conda-forge " , environment = True )
2023-04-18 07:23:09 +02:00
2023-07-26 22:33:02 +02:00
# Install/Update ROCm AutoGPTQ for AMD GPUs
if ' +rocm ' in torver :
if run_cmd ( " [ -d ./AutoGPTQ-rocm ] && rm -rfd ./AutoGPTQ-rocm; git clone https://github.com/jllllll/AutoGPTQ.git ./AutoGPTQ-rocm -b rocm && cp ./AutoGPTQ-rocm/setup_rocm.py ./AutoGPTQ-rocm/setup.py && python -m pip install ./AutoGPTQ-rocm --force-reinstall --no-deps " , environment = True ) . returncode != 0 :
print_big_message ( " WARNING: AutoGPTQ kernel compilation failed! \n The installer will proceed to install a pre-compiled wheel. " )
if run_cmd ( " python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/Linux-x64/ROCm-5.4.2/auto_gptq-0.3.2 % 2Brocm5.4.2-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps " , environment = True ) . returncode != 0 :
print_big_message ( " ERROR: AutoGPTQ wheel installation failed! \n You will not be able to use GPTQ-based models with AutoGPTQ. " )
# Install GPTQ-for-LLaMa dependencies
os . chdir ( " GPTQ-for-LLaMa " )
run_cmd ( " git pull " , environment = True )
# Finds the path to your dependencies
for sitedir in site . getsitepackages ( ) :
if " site-packages " in sitedir :
site_packages_path = sitedir
break
# This path is critical to installing the following dependencies
if site_packages_path is None :
print ( " Could not find the path to your Python packages. Exiting... " )
sys . exit ( )
2023-05-02 17:28:20 +02:00
# Compile and install GPTQ-for-LLaMa
2023-07-26 22:33:02 +02:00
if ' +rocm ' in torver :
if os . path . exists ( ' setup_rocm.py ' ) :
os . replace ( " setup_rocm.py " , " setup.py " )
elif os . path . exists ( ' setup_cuda.py ' ) :
2023-05-06 06:14:09 +02:00
os . rename ( " setup_cuda.py " , " setup.py " )
2023-06-01 06:20:56 +02:00
2023-07-20 03:25:09 +02:00
build_gptq = run_cmd ( " python -m pip install . " , environment = True ) . returncode == 0
2023-06-01 06:20:56 +02:00
2023-05-02 17:28:20 +02:00
# Wheel installation can fail while in the build directory of a package with the same name
os . chdir ( " .. " )
2023-06-01 06:20:56 +02:00
2023-07-26 22:33:02 +02:00
# If the path does not exist or if command returncode is not 0, then the install failed or was potentially installed outside env
2023-04-18 07:23:09 +02:00
quant_cuda_path_regex = os . path . join ( site_packages_path , " quant_cuda*/ " )
2023-07-20 03:25:09 +02:00
quant_cuda_path = glob . glob ( quant_cuda_path_regex )
if not build_gptq :
2023-05-31 19:41:54 +02:00
# Attempt installation via alternative, Windows/Linux-specific method
2023-07-26 22:33:02 +02:00
if sys . platform . startswith ( " win " ) or sys . platform . startswith ( " linux " ) and not quant_cuda_path :
2023-06-01 06:20:56 +02:00
print_big_message ( " WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored! \n The installer will proceed to install a pre-compiled wheel. " )
2023-07-26 22:33:02 +02:00
if ' +rocm ' in torver :
wheel = ' ROCm-5.4.2/quant_cuda-0.0.0-cp310-cp310-linux_x86_64.whl '
else :
wheel = f " { ' ' if gptq_min_compute_check or compute_array . returncode != 0 else ' 832e220d6dbf11bec5eaa8b221a52c1c854d2a25/ ' } quant_cuda-0.0.0-cp310-cp310- { ' linux_x86_64 ' if sys . platform . startswith ( ' linux ' ) else ' win_amd64 ' } .whl "
2023-07-20 16:11:00 +02:00
url = f " https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/ { ' Linux-x64 ' if sys . platform . startswith ( ' linux ' ) else ' main ' } / " + wheel
2023-05-31 19:41:54 +02:00
result = run_cmd ( " python -m pip install " + url , environment = True )
2023-07-26 22:33:02 +02:00
if result . returncode == 0 and glob . glob ( quant_cuda_path_regex ) :
2023-05-02 17:28:20 +02:00
print ( " Wheel installation success! " )
else :
print ( " ERROR: GPTQ wheel installation failed. You will not be able to use GPTQ-based models. " )
2023-07-20 03:25:09 +02:00
elif quant_cuda_path :
print_big_message ( " WARNING: GPTQ-for-LLaMa compilation failed, but this is FINE and can be ignored! \n quant_cuda has already been installed. " )
2023-05-02 17:28:20 +02:00
else :
2023-05-18 15:56:49 +02:00
print ( " ERROR: GPTQ CUDA kernel compilation failed. " )
2023-07-26 22:33:02 +02:00
print ( " You will not be able to use GPTQ-based models with GPTQ-for-LLaMa. " )
2023-06-01 06:20:56 +02:00
2023-05-02 17:28:20 +02:00
print ( " Continuing with install.. " )
2023-04-18 07:23:09 +02:00
def download_model ( ) :
os . chdir ( " text-generation-webui " )
2023-05-02 17:28:20 +02:00
run_cmd ( " python download-model.py " , environment = True )
2023-04-18 07:23:09 +02:00
2023-06-01 06:20:56 +02:00
def launch_webui ( ) :
2023-04-18 07:23:09 +02:00
os . chdir ( " text-generation-webui " )
2023-05-10 23:01:04 +02:00
run_cmd ( f " python server.py { CMD_FLAGS } " , environment = True )
2023-04-18 07:23:09 +02:00
if __name__ == " __main__ " :
# Verifies we are in a conda environment
check_env ( )
parser = argparse . ArgumentParser ( )
parser . add_argument ( ' --update ' , action = ' store_true ' , help = ' Update the web UI. ' )
args = parser . parse_args ( )
if args . update :
update_dependencies ( )
else :
# If webui has already been installed, skip and run
if not os . path . exists ( " text-generation-webui/ " ) :
install_dependencies ( )
os . chdir ( script_dir )
# Check if a model has been downloaded yet
2023-06-01 19:01:19 +02:00
if len ( [ item for item in glob . glob ( ' text-generation-webui/models/* ' ) if not item . endswith ( ( ' .txt ' , ' .yaml ' ) ) ] ) == 0 :
2023-06-01 06:20:56 +02:00
print_big_message ( " WARNING: You haven ' t downloaded any model yet. \n Once the web UI launches, head over to the bottom of the \" Model \" tab and download one. " )
2023-04-18 07:23:09 +02:00
2023-05-31 19:41:03 +02:00
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
conda_path_bin = os . path . join ( conda_env_path , " bin " )
if not os . path . exists ( conda_path_bin ) :
os . mkdir ( conda_path_bin )
2023-06-01 06:20:56 +02:00
# Launch the webui
launch_webui ( )