mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-31 22:50:15 +01:00
0b4ee14edc
Have pip directly download and install backup GPTQ wheel instead of first downloading through curl. Install bitsandbytes from wheel compiled for Windows from modified source. Add clarification of minor, intermittent issue to instructions. Add system32 folder to end of PATH rather than beginning. Add warning when installed under a path containing spaces.
109 lines
4.4 KiB
Batchfile
109 lines
4.4 KiB
Batchfile
@echo off
|
|
|
|
@rem Based on the installer found here: https://github.com/Sygil-Dev/sygil-webui
|
|
@rem This script will install git and all dependencies
|
|
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
|
@rem This enables a user to install this project without manually installing conda and git.
|
|
|
|
&& echo WARNING: This script relies on Micromamba which may have issues on some systems when installed under a path with spaces.&& echo.
|
|
|
|
echo What is your GPU?
|
|
echo.
|
|
echo A) NVIDIA
|
|
echo B) None (I want to run in CPU mode)
|
|
echo.
|
|
set /p "gpuchoice=Input> "
|
|
set gpuchoice=%gpuchoice:~0,1%
|
|
|
|
if /I "%gpuchoice%" == "A" (
|
|
set "PACKAGES_TO_INSTALL=python=3.10.9 torchvision torchaudio pytorch-cuda=11.7 cuda-toolkit conda-forge::ninja conda-forge::git"
|
|
set "CHANNEL=-c pytorch -c nvidia/label/cuda-11.7.0 -c nvidia"
|
|
) else if /I "%gpuchoice%" == "B" (
|
|
set "PACKAGES_TO_INSTALL=pytorch torchvision torchaudio cpuonly git"
|
|
set "CHANNEL=-c conda-forge -c pytorch"
|
|
) else (
|
|
echo Invalid choice. Exiting...
|
|
exit
|
|
)
|
|
|
|
cd /D "%~dp0"
|
|
|
|
set PATH=%PATH%;%SystemRoot%\system32
|
|
|
|
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
|
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
|
set MICROMAMBA_DOWNLOAD_URL=https://github.com/mamba-org/micromamba-releases/releases/download/1.4.0-0/micromamba-win-64
|
|
set REPO_URL=https://github.com/oobabooga/text-generation-webui.git
|
|
set umamba_exists=F
|
|
|
|
@rem figure out whether git and conda needs to be installed
|
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >nul 2>&1
|
|
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
|
|
|
@rem (if necessary) install git and conda into a contained environment
|
|
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
|
@rem download micromamba
|
|
if "%umamba_exists%" == "F" (
|
|
echo "Downloading Micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
|
|
|
mkdir "%MAMBA_ROOT_PREFIX%"
|
|
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe" || ( echo Micromamba failed to download. && goto end )
|
|
|
|
@rem test the mamba binary
|
|
echo Micromamba version:
|
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version || ( echo Micromamba not found. && goto end )
|
|
)
|
|
|
|
@rem create micromamba hook
|
|
if not exist "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" (
|
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook >nul 2>&1
|
|
)
|
|
|
|
@rem create the installer env
|
|
if not exist "%INSTALL_ENV_DIR%" (
|
|
echo Packages to install: %PACKAGES_TO_INSTALL%
|
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%" %CHANNEL% %PACKAGES_TO_INSTALL%
|
|
)
|
|
)
|
|
|
|
@rem activate installer env
|
|
call "%MAMBA_ROOT_PREFIX%\condabin\micromamba.bat" activate "%INSTALL_ENV_DIR%" || ( echo MicroMamba hook not found. && goto end )
|
|
|
|
@rem clone the repository and install the pip requirements
|
|
if exist text-generation-webui\ (
|
|
cd text-generation-webui
|
|
git pull
|
|
) else (
|
|
git clone https://github.com/oobabooga/text-generation-webui.git
|
|
call python -m pip install https://github.com/jllllll/bitsandbytes-windows-webui/raw/main/bitsandbytes-0.37.2-py3-none-any.whl
|
|
cd text-generation-webui || goto end
|
|
)
|
|
call python -m pip install -r requirements.txt --upgrade
|
|
call python -m pip install -r extensions\api\requirements.txt --upgrade
|
|
call python -m pip install -r extensions\elevenlabs_tts\requirements.txt --upgrade
|
|
call python -m pip install -r extensions\google_translate\requirements.txt --upgrade
|
|
call python -m pip install -r extensions\silero_tts\requirements.txt --upgrade
|
|
call python -m pip install -r extensions\whisper_stt\requirements.txt --upgrade
|
|
|
|
@rem skip gptq install if cpu only
|
|
if /I not "%gpuchoice%" == "A" goto bandaid
|
|
|
|
@rem download gptq and compile locally and if compile fails, install from wheel
|
|
if not exist repositories\ (
|
|
mkdir repositories
|
|
)
|
|
cd repositories || goto end
|
|
if not exist GPTQ-for-LLaMa\ (
|
|
git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git -b cuda
|
|
cd GPTQ-for-LLaMa || goto end
|
|
call python -m pip install -r requirements.txt
|
|
call python setup_cuda.py install
|
|
if not exist "%INSTALL_ENV_DIR%\lib\site-packages\quant_cuda-0.0.0-py3.10-win-amd64.egg" (
|
|
echo CUDA kernal compilation failed. Will try to install from wheel.
|
|
call python -m pip install https://github.com/jllllll/GPTQ-for-LLaMa-Wheels/raw/main/quant_cuda-0.0.0-cp310-cp310-win_amd64.whl || ( echo Wheel installation failed. && goto end )
|
|
)
|
|
)
|
|
|
|
:end
|
|
pause
|