mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-23 00:18:20 +01:00
commit
992affefef
@ -66,13 +66,17 @@
|
|||||||
" print(f\"TORCH: {torver}\")\n",
|
" print(f\"TORCH: {torver}\")\n",
|
||||||
" is_cuda118 = '+cu118' in torver # 2.1.0+cu118\n",
|
" is_cuda118 = '+cu118' in torver # 2.1.0+cu118\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
" if is_cuda118:\n",
|
||||||
|
" !python -m pip install --upgrade torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118\n",
|
||||||
|
" else:\n",
|
||||||
|
" !python -m pip install --upgrade torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121\n",
|
||||||
|
"\n",
|
||||||
" textgen_requirements = open('requirements.txt').read().splitlines()\n",
|
" textgen_requirements = open('requirements.txt').read().splitlines()\n",
|
||||||
" if is_cuda118:\n",
|
" if is_cuda118:\n",
|
||||||
" textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]\n",
|
" textgen_requirements = [req.replace('+cu121', '+cu118').replace('+cu122', '+cu118') for req in textgen_requirements]\n",
|
||||||
" with open('temp_requirements.txt', 'w') as file:\n",
|
" with open('temp_requirements.txt', 'w') as file:\n",
|
||||||
" file.write('\\n'.join(textgen_requirements))\n",
|
" file.write('\\n'.join(textgen_requirements))\n",
|
||||||
"\n",
|
"\n",
|
||||||
" !pip install -r extensions/openai/requirements.txt --upgrade\n",
|
|
||||||
" !pip install -r temp_requirements.txt --upgrade\n",
|
" !pip install -r temp_requirements.txt --upgrade\n",
|
||||||
"\n",
|
"\n",
|
||||||
" print(\"\\033[1;32;1m\\n --> If you see a warning about \\\"previously imported packages\\\", just ignore it.\\033[0;37;0m\")\n",
|
" print(\"\\033[1;32;1m\\n --> If you see a warning about \\\"previously imported packages\\\", just ignore it.\\033[0;37;0m\")\n",
|
||||||
|
21
README.md
21
README.md
@ -32,7 +32,7 @@ To restart the web UI in the future, just run the `start_` script again. This sc
|
|||||||
|
|
||||||
The script accepts command-line flags. Alternatively, you can edit the `CMD_FLAGS.txt` file with a text editor and add your flags there.
|
The script accepts command-line flags. Alternatively, you can edit the `CMD_FLAGS.txt` file with a text editor and add your flags there.
|
||||||
|
|
||||||
To get updates in the future, run `update_linux.sh`, `update_windows.bat`, `update_macos.sh`, or `update_wsl.bat`.
|
To get updates in the future, run `update_wizard_linux.sh`, `update_wizard_windows.bat`, `update_wizard_macos.sh`, or `update_wizard_wsl.bat`.
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>
|
<summary>
|
||||||
@ -45,9 +45,10 @@ The script uses Miniconda to set up a Conda environment in the `installer_files`
|
|||||||
|
|
||||||
If you ever need to install something manually in the `installer_files` environment, you can launch an interactive shell using the cmd script: `cmd_linux.sh`, `cmd_windows.bat`, `cmd_macos.sh`, or `cmd_wsl.bat`.
|
If you ever need to install something manually in the `installer_files` environment, you can launch an interactive shell using the cmd script: `cmd_linux.sh`, `cmd_windows.bat`, `cmd_macos.sh`, or `cmd_wsl.bat`.
|
||||||
|
|
||||||
* There is no need to run any of those scripts (`start_`, `update_`, or `cmd_`) as admin/root.
|
* There is no need to run any of those scripts (`start_`, `update_wizard_`, or `cmd_`) as admin/root.
|
||||||
|
* To install the requirements for extensions, you can use the `extensions_reqs` script for your OS. At the end, this script will install the main requirements for the project to make sure that they take precedence in case of version conflicts.
|
||||||
* For additional instructions about AMD and WSL setup, consult [the documentation](https://github.com/oobabooga/text-generation-webui/wiki).
|
* For additional instructions about AMD and WSL setup, consult [the documentation](https://github.com/oobabooga/text-generation-webui/wiki).
|
||||||
* For automated installation, you can use the `GPU_CHOICE`, `USE_CUDA118`, `LAUNCH_AFTER_INSTALL`, and `INSTALL_EXTENSIONS` environment variables. For instance: `GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=FALSE ./start_linux.sh`.
|
* For automated installation, you can use the `GPU_CHOICE`, `USE_CUDA118`, and `LAUNCH_AFTER_INSTALL` environment variables. For instance: `GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE ./start_linux.sh`.
|
||||||
|
|
||||||
### Manual installation using Conda
|
### Manual installation using Conda
|
||||||
|
|
||||||
@ -75,12 +76,12 @@ conda activate textgen
|
|||||||
|
|
||||||
| System | GPU | Command |
|
| System | GPU | Command |
|
||||||
|--------|---------|---------|
|
|--------|---------|---------|
|
||||||
| Linux/WSL | NVIDIA | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* --index-url https://download.pytorch.org/whl/cu121` |
|
| Linux/WSL | NVIDIA | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121` |
|
||||||
| Linux/WSL | CPU only | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* --index-url https://download.pytorch.org/whl/cpu` |
|
| Linux/WSL | CPU only | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu` |
|
||||||
| Linux | AMD | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* --index-url https://download.pytorch.org/whl/rocm5.6` |
|
| Linux | AMD | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/rocm5.6` |
|
||||||
| MacOS + MPS | Any | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.*` |
|
| MacOS + MPS | Any | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1` |
|
||||||
| Windows | NVIDIA | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* --index-url https://download.pytorch.org/whl/cu121` |
|
| Windows | NVIDIA | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121` |
|
||||||
| Windows | CPU only | `pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.*` |
|
| Windows | CPU only | `pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1` |
|
||||||
|
|
||||||
The up-to-date commands can be found here: https://pytorch.org/get-started/locally/.
|
The up-to-date commands can be found here: https://pytorch.org/get-started/locally/.
|
||||||
|
|
||||||
@ -145,7 +146,7 @@ Then browse to
|
|||||||
1) For Kepler GPUs and older, you will need to install CUDA 11.8 instead of 12:
|
1) For Kepler GPUs and older, you will need to install CUDA 11.8 instead of 12:
|
||||||
|
|
||||||
```
|
```
|
||||||
pip3 install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* --index-url https://download.pytorch.org/whl/cu118
|
pip3 install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118
|
||||||
conda install -y -c "nvidia/label/cuda-11.8.0" cuda-runtime
|
conda install -y -c "nvidia/label/cuda-11.8.0" cuda-runtime
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw \
|
|||||||
WORKDIR /home/app/
|
WORKDIR /home/app/
|
||||||
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
RUN GPU_CHOICE=B USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=TRUE ./start_linux.sh --verbose
|
RUN GPU_CHOICE=B USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE ./start_linux.sh --verbose
|
||||||
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
||||||
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
|
@ -17,7 +17,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw \
|
|||||||
WORKDIR /home/app/
|
WORKDIR /home/app/
|
||||||
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
RUN GPU_CHOICE=N USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=TRUE ./start_linux.sh --verbose
|
RUN GPU_CHOICE=N USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE ./start_linux.sh --verbose
|
||||||
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
||||||
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
||||||
# set umask to ensure group read / write at runtime
|
# set umask to ensure group read / write at runtime
|
||||||
|
@ -13,7 +13,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw \
|
|||||||
WORKDIR /home/app/
|
WORKDIR /home/app/
|
||||||
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
RUN GPU_CHOICE=D USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=TRUE ./start_linux.sh --verbose
|
RUN GPU_CHOICE=D USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE ./start_linux.sh --verbose
|
||||||
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
||||||
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
||||||
# set umask to ensure group read / write at runtime
|
# set umask to ensure group read / write at runtime
|
||||||
|
@ -13,7 +13,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw \
|
|||||||
WORKDIR /home/app/
|
WORKDIR /home/app/
|
||||||
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
RUN GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=TRUE ./start_linux.sh --verbose
|
RUN GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE ./start_linux.sh --verbose
|
||||||
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
||||||
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
||||||
WORKDIR /home/app/text-generation-webui
|
WORKDIR /home/app/text-generation-webui
|
||||||
|
@ -7,12 +7,6 @@ The main API for this project is meant to be a drop-in replacement to the OpenAI
|
|||||||
* It doesn't connect to OpenAI.
|
* It doesn't connect to OpenAI.
|
||||||
* It doesn't use the openai-python library.
|
* It doesn't use the openai-python library.
|
||||||
|
|
||||||
If you did not use the one-click installers, you may need to install the requirements first:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip install -r extensions/openai/requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
### Starting the API
|
### Starting the API
|
||||||
|
|
||||||
Add `--api` to your command-line flags.
|
Add `--api` to your command-line flags.
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
SpeechRecognition==3.10.0
|
|
||||||
flask_cloudflared==0.0.14
|
|
||||||
sse-starlette==1.6.5
|
|
||||||
tiktoken
|
|
@ -36,7 +36,7 @@ def load_extensions():
|
|||||||
try:
|
try:
|
||||||
extension = importlib.import_module(f"extensions.{name}.script")
|
extension = importlib.import_module(f"extensions.{name}.script")
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
logger.error(f"Could not import the requirements for '{name}'. Make sure to install the requirements for the extension.\n\nLinux / Mac:\n\npip install -r extensions/{name}/requirements.txt --upgrade\n\nWindows:\n\npip install -r extensions\\{name}\\requirements.txt --upgrade\n\nIf you used the one-click installer, paste the command above in the terminal window opened after launching the cmd script for your OS.")
|
logger.error(f"Could not import the requirements for '{name}'. Make sure to install the requirements for the extension.\n\n* To install requirements for all available extensions, launch the\n update_wizard script for your OS and choose the B option.\n\n* To install the requirements for this extension alone, launch the\n cmd script for your OS and paste the following command in the\n terminal window that appears:\n\nLinux / Mac:\n\npip install -r extensions/{name}/requirements.txt --upgrade\n\nWindows:\n\npip install -r extensions\\{name}\\requirements.txt --upgrade\n")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Only run setup() and apply settings from settings.yaml once
|
# Only run setup() and apply settings from settings.yaml once
|
||||||
|
266
one_click.py
266
one_click.py
@ -9,14 +9,21 @@ import site
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
script_dir = os.getcwd()
|
|
||||||
conda_env_path = os.path.join(script_dir, "installer_files", "env")
|
|
||||||
|
|
||||||
# Remove the '# ' from the following lines as needed for your AMD GPU on Linux
|
# Remove the '# ' from the following lines as needed for your AMD GPU on Linux
|
||||||
# os.environ["ROCM_PATH"] = '/opt/rocm'
|
# os.environ["ROCM_PATH"] = '/opt/rocm'
|
||||||
# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0'
|
# os.environ["HSA_OVERRIDE_GFX_VERSION"] = '10.3.0'
|
||||||
# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030'
|
# os.environ["HCC_AMDGPU_TARGET"] = 'gfx1030'
|
||||||
|
|
||||||
|
|
||||||
|
# Define the required PyTorch version
|
||||||
|
TORCH_VERSION = "2.2.1"
|
||||||
|
TORCHVISION_VERSION = "0.17.1"
|
||||||
|
TORCHAUDIO_VERSION = "2.2.1"
|
||||||
|
|
||||||
|
# Environment
|
||||||
|
script_dir = os.getcwd()
|
||||||
|
conda_env_path = os.path.join(script_dir, "installer_files", "env")
|
||||||
|
|
||||||
# Command-line flags
|
# Command-line flags
|
||||||
cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt")
|
cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt")
|
||||||
if os.path.exists(cmd_flags_path):
|
if os.path.exists(cmd_flags_path):
|
||||||
@ -25,7 +32,7 @@ if os.path.exists(cmd_flags_path):
|
|||||||
else:
|
else:
|
||||||
CMD_FLAGS = ''
|
CMD_FLAGS = ''
|
||||||
|
|
||||||
flags = f"{' '.join([flag for flag in sys.argv[1:] if flag != '--update'])} {CMD_FLAGS}"
|
flags = f"{' '.join([flag for flag in sys.argv[1:] if flag != '--update-wizard'])} {CMD_FLAGS}"
|
||||||
|
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
def signal_handler(sig, frame):
|
||||||
@ -86,13 +93,42 @@ def torch_version():
|
|||||||
|
|
||||||
if site_packages_path:
|
if site_packages_path:
|
||||||
torch_version_file = open(os.path.join(site_packages_path, 'torch', 'version.py')).read().splitlines()
|
torch_version_file = open(os.path.join(site_packages_path, 'torch', 'version.py')).read().splitlines()
|
||||||
torver = [line for line in torch_version_file if '__version__' in line][0].split('__version__ = ')[1].strip("'")
|
torver = [line for line in torch_version_file if line.startswith('__version__')][0].split('__version__ = ')[1].strip("'")
|
||||||
else:
|
else:
|
||||||
from torch import __version__ as torver
|
from torch import __version__ as torver
|
||||||
|
|
||||||
return torver
|
return torver
|
||||||
|
|
||||||
|
|
||||||
|
def update_pytorch():
|
||||||
|
print_big_message("Checking for PyTorch updates")
|
||||||
|
|
||||||
|
torver = torch_version()
|
||||||
|
is_cuda = '+cu' in torver
|
||||||
|
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
|
||||||
|
is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
|
||||||
|
is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
|
||||||
|
is_cpu = '+cpu' in torver # 2.0.1+cpu
|
||||||
|
|
||||||
|
install_pytorch = f"python -m pip install --upgrade torch=={TORCH_VERSION} torchvision=={TORCHVISION_VERSION} torchaudio=={TORCHAUDIO_VERSION} "
|
||||||
|
|
||||||
|
if is_cuda118:
|
||||||
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
|
||||||
|
elif is_cuda:
|
||||||
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
|
||||||
|
elif is_rocm:
|
||||||
|
install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
|
||||||
|
elif is_cpu:
|
||||||
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
|
||||||
|
elif is_intel:
|
||||||
|
if is_linux():
|
||||||
|
install_pytorch = "python -m pip install --upgrade torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
||||||
|
else:
|
||||||
|
install_pytorch = "python -m pip install --upgrade torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
||||||
|
|
||||||
|
run_cmd(f"{install_pytorch}", assert_success=True, environment=True)
|
||||||
|
|
||||||
|
|
||||||
def is_installed():
|
def is_installed():
|
||||||
site_packages_path = None
|
site_packages_path = None
|
||||||
for sitedir in site.getsitepackages():
|
for sitedir in site.getsitepackages():
|
||||||
@ -129,8 +165,7 @@ def print_big_message(message):
|
|||||||
lines = message.split('\n')
|
lines = message.split('\n')
|
||||||
print("\n\n*******************************************************************")
|
print("\n\n*******************************************************************")
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.strip() != '':
|
print("*", line)
|
||||||
print("*", line)
|
|
||||||
|
|
||||||
print("*******************************************************************\n\n")
|
print("*******************************************************************\n\n")
|
||||||
|
|
||||||
@ -165,26 +200,51 @@ def run_cmd(cmd, assert_success=False, environment=False, capture_output=False,
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def generate_alphabetic_sequence(index):
|
||||||
|
result = ''
|
||||||
|
while index >= 0:
|
||||||
|
index, remainder = divmod(index, 26)
|
||||||
|
result = chr(ord('A') + remainder) + result
|
||||||
|
index -= 1
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_choice(question, options_dict):
|
||||||
|
print()
|
||||||
|
print(question)
|
||||||
|
print()
|
||||||
|
|
||||||
|
for key, value in options_dict.items():
|
||||||
|
print(f"{key}) {value}")
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
choice = input("Input> ").upper()
|
||||||
|
while choice not in options_dict.keys():
|
||||||
|
print("Invalid choice. Please try again.")
|
||||||
|
choice = input("Input> ").upper()
|
||||||
|
|
||||||
|
return choice
|
||||||
|
|
||||||
|
|
||||||
def install_webui():
|
def install_webui():
|
||||||
# Select your GPU, or choose to run in CPU mode
|
|
||||||
|
# Ask the user for the GPU vendor
|
||||||
if "GPU_CHOICE" in os.environ:
|
if "GPU_CHOICE" in os.environ:
|
||||||
choice = os.environ["GPU_CHOICE"].upper()
|
choice = os.environ["GPU_CHOICE"].upper()
|
||||||
print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.")
|
print_big_message(f"Selected GPU choice \"{choice}\" based on the GPU_CHOICE environment variable.")
|
||||||
else:
|
else:
|
||||||
print()
|
choice = get_user_choice(
|
||||||
print("What is your GPU?")
|
"What is your GPU?",
|
||||||
print()
|
{
|
||||||
print("A) NVIDIA")
|
'A': 'NVIDIA',
|
||||||
print("B) AMD (Linux/MacOS only. Requires ROCm SDK 5.6 on Linux)")
|
'B': 'AMD (Linux/MacOS only. Requires ROCm SDK 5.6 on Linux)',
|
||||||
print("C) Apple M Series")
|
'C': 'Apple M Series',
|
||||||
print("D) Intel Arc (IPEX)")
|
'D': 'Intel Arc (IPEX)',
|
||||||
print("N) None (I want to run models in CPU mode)")
|
'N': 'None (I want to run models in CPU mode)'
|
||||||
print()
|
},
|
||||||
|
)
|
||||||
choice = input("Input> ").upper()
|
|
||||||
while choice not in 'ABCDN':
|
|
||||||
print("Invalid choice. Please try again.")
|
|
||||||
choice = input("Input> ").upper()
|
|
||||||
|
|
||||||
gpu_choice_to_name = {
|
gpu_choice_to_name = {
|
||||||
"A": "NVIDIA",
|
"A": "NVIDIA",
|
||||||
@ -195,24 +255,21 @@ def install_webui():
|
|||||||
}
|
}
|
||||||
|
|
||||||
selected_gpu = gpu_choice_to_name[choice]
|
selected_gpu = gpu_choice_to_name[choice]
|
||||||
|
use_cuda118 = "N"
|
||||||
|
|
||||||
|
# Write a flag to CMD_FLAGS.txt for CPU mode
|
||||||
if selected_gpu == "NONE":
|
if selected_gpu == "NONE":
|
||||||
with open(cmd_flags_path, 'r+') as cmd_flags_file:
|
with open(cmd_flags_path, 'r+') as cmd_flags_file:
|
||||||
if "--cpu" not in cmd_flags_file.read():
|
if "--cpu" not in cmd_flags_file.read():
|
||||||
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
|
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.")
|
||||||
cmd_flags_file.write("\n--cpu")
|
cmd_flags_file.write("\n--cpu\n")
|
||||||
|
|
||||||
# Find the proper Pytorch installation command
|
# Check if the user wants CUDA 11.8
|
||||||
install_git = "conda install -y -k ninja git"
|
elif any((is_windows(), is_linux())) and selected_gpu == "NVIDIA":
|
||||||
install_pytorch = "python -m pip install torch==2.1.* torchvision==0.16.* torchaudio==2.1.* "
|
|
||||||
|
|
||||||
use_cuda118 = "N"
|
|
||||||
if any((is_windows(), is_linux())) and selected_gpu == "NVIDIA":
|
|
||||||
if "USE_CUDA118" in os.environ:
|
if "USE_CUDA118" in os.environ:
|
||||||
use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N"
|
use_cuda118 = "Y" if os.environ.get("USE_CUDA118", "").lower() in ("yes", "y", "true", "1", "t", "on") else "N"
|
||||||
else:
|
else:
|
||||||
# Ask for CUDA version if using NVIDIA
|
print("\nDo you want to use CUDA 11.8 instead of 12.1?\nOnly choose this option if your GPU is very old (Kepler or older).\n\nFor RTX and GTX series GPUs, say \"N\".\nIf unsure, say \"N\".\n")
|
||||||
print("\nDo you want to use CUDA 11.8 instead of 12.1? Only choose this option if your GPU is very old (Kepler or older).\nFor RTX and GTX series GPUs, say \"N\". If unsure, say \"N\".\n")
|
|
||||||
use_cuda118 = input("Input (Y/N)> ").upper().strip('"\'').strip()
|
use_cuda118 = input("Input (Y/N)> ").upper().strip('"\'').strip()
|
||||||
while use_cuda118 not in 'YN':
|
while use_cuda118 not in 'YN':
|
||||||
print("Invalid choice. Please try again.")
|
print("Invalid choice. Please try again.")
|
||||||
@ -220,29 +277,35 @@ def install_webui():
|
|||||||
|
|
||||||
if use_cuda118 == 'Y':
|
if use_cuda118 == 'Y':
|
||||||
print("CUDA: 11.8")
|
print("CUDA: 11.8")
|
||||||
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
|
|
||||||
else:
|
else:
|
||||||
print("CUDA: 12.1")
|
print("CUDA: 12.1")
|
||||||
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
|
|
||||||
elif not is_macos() and selected_gpu == "AMD":
|
# No PyTorch for AMD on Windows (?)
|
||||||
if is_linux():
|
elif is_windows() and selected_gpu == "AMD":
|
||||||
install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
|
print("PyTorch setup on Windows is not implemented yet. Exiting...")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Find the Pytorch installation command
|
||||||
|
install_pytorch = f"python -m pip install torch=={TORCH_VERSION} torchvision=={TORCHVISION_VERSION} torchaudio=={TORCHAUDIO_VERSION} "
|
||||||
|
|
||||||
|
if selected_gpu == "NVIDIA":
|
||||||
|
if use_cuda118 == 'Y':
|
||||||
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cu118"
|
||||||
else:
|
else:
|
||||||
print("AMD GPUs are only supported on Linux. Exiting...")
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cu121"
|
||||||
sys.exit(1)
|
elif selected_gpu == "AMD":
|
||||||
elif is_linux() and selected_gpu in ["APPLE", "NONE"]:
|
install_pytorch += "--index-url https://download.pytorch.org/whl/rocm5.6"
|
||||||
|
elif selected_gpu in ["APPLE", "NONE"]:
|
||||||
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
|
install_pytorch += "--index-url https://download.pytorch.org/whl/cpu"
|
||||||
elif selected_gpu == "INTEL":
|
elif selected_gpu == "INTEL":
|
||||||
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
if is_linux():
|
||||||
|
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
||||||
|
else:
|
||||||
|
install_pytorch = "python -m pip install torch==2.1.0a0 torchvision==0.16.0a0 torchaudio==2.1.0a0 intel-extension-for-pytorch==2.1.10 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
|
||||||
|
|
||||||
# Install Git and then Pytorch
|
# Install Git and then Pytorch
|
||||||
print_big_message("Installing PyTorch.")
|
print_big_message("Installing PyTorch.")
|
||||||
run_cmd(f"{install_git} && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
|
run_cmd(f"conda install -y -k ninja git && {install_pytorch} && python -m pip install py-cpuinfo==9.0.0", assert_success=True, environment=True)
|
||||||
|
|
||||||
# Install CUDA libraries (this wasn't necessary for Pytorch before...)
|
|
||||||
if selected_gpu == "NVIDIA":
|
|
||||||
print_big_message("Installing the CUDA runtime libraries.")
|
|
||||||
run_cmd(f"conda install -y -c \"nvidia/label/{'cuda-12.1.1' if use_cuda118 == 'N' else 'cuda-11.8.0'}\" cuda-runtime", assert_success=True, environment=True)
|
|
||||||
|
|
||||||
if selected_gpu == "INTEL":
|
if selected_gpu == "INTEL":
|
||||||
# Install oneAPI dependencies via conda
|
# Install oneAPI dependencies via conda
|
||||||
@ -255,47 +318,49 @@ def install_webui():
|
|||||||
update_requirements(initial_installation=True)
|
update_requirements(initial_installation=True)
|
||||||
|
|
||||||
|
|
||||||
def update_requirements(initial_installation=False):
|
def get_extensions_names():
|
||||||
|
return [foldername for foldername in os.listdir('extensions') if os.path.isfile(os.path.join('extensions', foldername, 'requirements.txt'))]
|
||||||
|
|
||||||
|
|
||||||
|
def install_extensions_requirements():
|
||||||
|
print_big_message("Installing extensions requirements.\nSome of these may fail on Windows.\nDon\'t worry if you see error messages, as they will not affect the main program.")
|
||||||
|
extensions = get_extensions_names()
|
||||||
|
for i, extension in enumerate(extensions):
|
||||||
|
print(f"\n\n--- [{i+1}/{len(extensions)}]: {extension}\n\n")
|
||||||
|
extension_req_path = os.path.join("extensions", extension, "requirements.txt")
|
||||||
|
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
|
||||||
|
|
||||||
|
|
||||||
|
def update_requirements(initial_installation=False, pull=True):
|
||||||
# Create .git directory if missing
|
# Create .git directory if missing
|
||||||
if not os.path.exists(os.path.join(script_dir, ".git")):
|
if not os.path.exists(os.path.join(script_dir, ".git")):
|
||||||
git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main && git reset --hard origin/main && git branch --set-upstream-to=origin/main'
|
git_creation_cmd = 'git init -b main && git remote add origin https://github.com/oobabooga/text-generation-webui && git fetch && git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main && git reset --hard origin/main && git branch --set-upstream-to=origin/main'
|
||||||
run_cmd(git_creation_cmd, environment=True, assert_success=True)
|
run_cmd(git_creation_cmd, environment=True, assert_success=True)
|
||||||
|
|
||||||
files_to_check = [
|
if pull:
|
||||||
'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat',
|
print_big_message("Updating the local copy of the repository with \"git pull\"")
|
||||||
'update_linux.sh', 'update_macos.sh', 'update_windows.bat', 'update_wsl.bat',
|
|
||||||
'one_click.py'
|
|
||||||
]
|
|
||||||
|
|
||||||
before_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
files_to_check = [
|
||||||
run_cmd("git pull --autostash", assert_success=True, environment=True)
|
'start_linux.sh', 'start_macos.sh', 'start_windows.bat', 'start_wsl.bat',
|
||||||
after_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
'update_linux.sh', 'update_macos.sh', 'update_windows.bat', 'update_wsl.bat',
|
||||||
|
'one_click.py'
|
||||||
|
]
|
||||||
|
|
||||||
# Check for differences in installation file hashes
|
before_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
||||||
for file_name in files_to_check:
|
run_cmd("git pull --autostash", assert_success=True, environment=True)
|
||||||
if before_pull_hashes[file_name] != after_pull_hashes[file_name]:
|
after_pull_hashes = {file_name: calculate_file_hash(file_name) for file_name in files_to_check}
|
||||||
print_big_message(f"File '{file_name}' was updated during 'git pull'. Please run the script again.")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
# Extensions requirements are installed only during the initial install by default.
|
# Check for differences in installation file hashes
|
||||||
# That can be changed with the INSTALL_EXTENSIONS environment variable.
|
for file_name in files_to_check:
|
||||||
install = initial_installation
|
if before_pull_hashes[file_name] != after_pull_hashes[file_name]:
|
||||||
if "INSTALL_EXTENSIONS" in os.environ:
|
print_big_message(f"File '{file_name}' was updated during 'git pull'. Please run the script again.")
|
||||||
install = os.environ["INSTALL_EXTENSIONS"].lower() in ("yes", "y", "true", "1", "t", "on")
|
exit(1)
|
||||||
|
|
||||||
if install:
|
# Update PyTorch
|
||||||
print_big_message("Installing extensions requirements.")
|
if not initial_installation:
|
||||||
skip = ['superbooga', 'superboogav2', 'coqui_tts'] # Fail to install on Windows
|
update_pytorch()
|
||||||
extensions = [foldername for foldername in os.listdir('extensions') if os.path.isfile(os.path.join('extensions', foldername, 'requirements.txt'))]
|
|
||||||
extensions = [x for x in extensions if x not in skip]
|
|
||||||
for i, extension in enumerate(extensions):
|
|
||||||
print(f"\n\n--- [{i+1}/{len(extensions)}]: {extension}\n\n")
|
|
||||||
extension_req_path = os.path.join("extensions", extension, "requirements.txt")
|
|
||||||
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
|
|
||||||
elif initial_installation:
|
|
||||||
print_big_message("Will not install extensions due to INSTALL_EXTENSIONS environment variable.")
|
|
||||||
|
|
||||||
# Detect the Python and PyTorch versions
|
# Detect the PyTorch version
|
||||||
torver = torch_version()
|
torver = torch_version()
|
||||||
is_cuda = '+cu' in torver
|
is_cuda = '+cu' in torver
|
||||||
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
|
is_cuda118 = '+cu118' in torver # 2.1.0+cu118
|
||||||
@ -335,11 +400,6 @@ def update_requirements(initial_installation=False):
|
|||||||
run_cmd(f"python -m pip uninstall -y {package_name}", environment=True)
|
run_cmd(f"python -m pip uninstall -y {package_name}", environment=True)
|
||||||
print(f"Uninstalled {package_name}")
|
print(f"Uninstalled {package_name}")
|
||||||
|
|
||||||
# Make sure that API requirements are installed (temporary)
|
|
||||||
extension_req_path = os.path.join("extensions", "openai", "requirements.txt")
|
|
||||||
if os.path.exists(extension_req_path):
|
|
||||||
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", environment=True)
|
|
||||||
|
|
||||||
# Install/update the project requirements
|
# Install/update the project requirements
|
||||||
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
|
run_cmd("python -m pip install -r temp_requirements.txt --upgrade", assert_success=True, environment=True)
|
||||||
os.remove('temp_requirements.txt')
|
os.remove('temp_requirements.txt')
|
||||||
@ -364,19 +424,49 @@ if __name__ == "__main__":
|
|||||||
check_env()
|
check_env()
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(add_help=False)
|
parser = argparse.ArgumentParser(add_help=False)
|
||||||
parser.add_argument('--update', action='store_true', help='Update the web UI.')
|
parser.add_argument('--update-wizard', action='store_true', help='Launch a menu with update options.')
|
||||||
args, _ = parser.parse_known_args()
|
args, _ = parser.parse_known_args()
|
||||||
|
|
||||||
if args.update:
|
if args.update_wizard:
|
||||||
update_requirements()
|
while True:
|
||||||
|
choice = get_user_choice(
|
||||||
|
"What would you like to do?",
|
||||||
|
{
|
||||||
|
'A': 'Update the web UI',
|
||||||
|
'B': 'Install/update extensions requirements',
|
||||||
|
'C': 'Revert local changes to repository files with \"git reset --hard\"',
|
||||||
|
'N': 'Nothing (exit)'
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if choice == 'A':
|
||||||
|
update_requirements()
|
||||||
|
elif choice == 'B':
|
||||||
|
choices = {'A': 'All extensions'}
|
||||||
|
for i, name in enumerate(get_extensions_names()):
|
||||||
|
key = generate_alphabetic_sequence(i + 1)
|
||||||
|
choices[key] = name
|
||||||
|
|
||||||
|
choice = get_user_choice("What extension?", choices)
|
||||||
|
|
||||||
|
if choice == 'A':
|
||||||
|
install_extensions_requirements()
|
||||||
|
else:
|
||||||
|
extension_req_path = os.path.join("extensions", choices[choice], "requirements.txt")
|
||||||
|
run_cmd(f"python -m pip install -r {extension_req_path} --upgrade", assert_success=False, environment=True)
|
||||||
|
|
||||||
|
update_requirements(pull=False)
|
||||||
|
elif choice == 'C':
|
||||||
|
run_cmd("git reset --hard", assert_success=True, environment=True)
|
||||||
|
elif choice == 'N':
|
||||||
|
sys.exit()
|
||||||
else:
|
else:
|
||||||
# If webui has already been installed, skip and run
|
|
||||||
if not is_installed():
|
if not is_installed():
|
||||||
install_webui()
|
install_webui()
|
||||||
os.chdir(script_dir)
|
os.chdir(script_dir)
|
||||||
|
|
||||||
if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"):
|
if os.environ.get("LAUNCH_AFTER_INSTALL", "").lower() in ("no", "n", "false", "0", "f", "off"):
|
||||||
print_big_message("Install finished successfully and will now exit due to LAUNCH_AFTER_INSTALL.")
|
print_big_message("Will now exit due to LAUNCH_AFTER_INSTALL.")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
# Check if a model has been downloaded yet
|
# Check if a model has been downloaded yet
|
||||||
@ -388,7 +478,7 @@ if __name__ == "__main__":
|
|||||||
model_dir = 'models'
|
model_dir = 'models'
|
||||||
|
|
||||||
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
|
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
|
||||||
print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")
|
print_big_message("You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")
|
||||||
|
|
||||||
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
|
# Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist
|
||||||
conda_path_bin = os.path.join(conda_env_path, "bin")
|
conda_path_bin = os.path.join(conda_env_path, "bin")
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,42 +23,48 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# bitsandbytes
|
# bitsandbytes
|
||||||
bitsandbytes==0.42.*; platform_system != "Windows"
|
bitsandbytes==0.42.*; platform_system != "Windows"
|
||||||
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
https://github.com/oobabooga/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.42.0-py3-none-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, no tensor cores)
|
# llama-cpp-python (CUDA, no tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, tensor cores)
|
# llama-cpp-python (CUDA, tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# CUDA wheels
|
# CUDA wheels
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
|
||||||
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,19 +23,25 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# AMD wheels
|
# AMD wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.52+rocm5.6.1-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.55+rocm5.6.1-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.52+rocm5.6.1-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/rocm/llama_cpp_python_cuda-0.2.55+rocm5.6.1-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,17 +23,23 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# AMD wheels
|
# AMD wheels
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1-py3-none-any.whl; platform_system != "Darwin" and platform_machine != "x86_64"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+rocm5.6-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,11 +23,17 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# Mac wheels
|
# Mac wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_11_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_12_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_14_0_x86_64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,13 +23,19 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# Mac wheels
|
# Mac wheels
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_11_0_arm64.whl; platform_system == "Darwin" and platform_release >= "20.0.0" and platform_release < "21.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_12_0_arm64.whl; platform_system == "Darwin" and platform_release >= "21.0.0" and platform_release < "22.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_13_0_arm64.whl; platform_system == "Darwin" and platform_release >= "22.0.0" and platform_release < "23.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp311-cp311-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.52-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/metal/llama_cpp_python-0.2.55-cp310-cp310-macosx_14_0_arm64.whl; platform_system == "Darwin" and platform_release >= "23.0.0" and platform_release < "24.0.0" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,8 +23,14 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, AVX2)
|
# llama-cpp-python (CPU only, AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,8 +23,14 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -23,42 +23,48 @@ transformers==4.38.*
|
|||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
|
||||||
# bitsandbytes
|
# bitsandbytes
|
||||||
bitsandbytes==0.42.*; platform_system != "Windows"
|
bitsandbytes==0.42.*; platform_system != "Windows"
|
||||||
https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.1-py3-none-win_amd64.whl; platform_system == "Windows"
|
https://github.com/oobabooga/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.42.0-py3-none-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
||||||
# llama-cpp-python (CPU only, no AVX2)
|
# llama-cpp-python (CPU only, no AVX2)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.52+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.2.55+cpuavx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, no tensor cores)
|
# llama-cpp-python (CUDA, no tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.52+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.2.55+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# llama-cpp-python (CUDA, tensor cores)
|
# llama-cpp-python (CUDA, tensor cores)
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121avx-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121avx-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121avx-cp311-cp311-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.52+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.2.55+cu121avx-cp310-cp310-manylinux_2_31_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
|
|
||||||
# CUDA wheels
|
# CUDA wheels
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/jllllll/AutoGPTQ/releases/download/v0.6.0/auto_gptq-0.6.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14/exllamav2-0.0.14-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
|
https://github.com/oobabooga/exllamav2/releases/download/v0.0.14.1/exllamav2-0.0.14.1-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
|
||||||
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu121torch2.1cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/oobabooga/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2.0cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
https://github.com/Dao-AILab/flash-attention/releases/download/v2.3.4/flash_attn-2.3.4+cu122torch2.1cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.6/flash_attn-2.5.6+cu122torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
||||||
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
https://github.com/jllllll/GPTQ-for-LLaMa-CUDA/releases/download/0.1.1/gptq_for_llama-0.1.1+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
||||||
|
@ -3,7 +3,7 @@ colorama
|
|||||||
datasets
|
datasets
|
||||||
einops
|
einops
|
||||||
gradio==3.50.*
|
gradio==3.50.*
|
||||||
hqq==0.1.3.post1
|
hqq==0.1.5
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
lm_eval==0.3.0
|
lm_eval==0.3.0
|
||||||
markdown
|
markdown
|
||||||
@ -22,3 +22,9 @@ tensorboard
|
|||||||
transformers==4.38.*
|
transformers==4.38.*
|
||||||
tqdm
|
tqdm
|
||||||
wandb
|
wandb
|
||||||
|
|
||||||
|
# API
|
||||||
|
SpeechRecognition==3.10.0
|
||||||
|
flask_cloudflared==0.0.14
|
||||||
|
sse-starlette==1.6.5
|
||||||
|
tiktoken
|
||||||
|
@ -39,6 +39,9 @@ if [ "$conda_exists" == "F" ]; then
|
|||||||
# test the conda binary
|
# test the conda binary
|
||||||
echo "Miniconda version:"
|
echo "Miniconda version:"
|
||||||
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
||||||
|
|
||||||
|
# delete the Miniconda installer
|
||||||
|
rm "$INSTALL_DIR/miniconda_installer.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# create the installer env
|
# create the installer env
|
||||||
|
@ -39,6 +39,9 @@ if [ "$conda_exists" == "F" ]; then
|
|||||||
# test the conda binary
|
# test the conda binary
|
||||||
echo "Miniconda version:"
|
echo "Miniconda version:"
|
||||||
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
||||||
|
|
||||||
|
# delete the Miniconda installer
|
||||||
|
rm "$INSTALL_DIR/miniconda_installer.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# create the installer env
|
# create the installer env
|
||||||
|
@ -37,7 +37,7 @@ if "%conda_exists%" == "F" (
|
|||||||
echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe
|
echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe
|
||||||
|
|
||||||
mkdir "%INSTALL_DIR%"
|
mkdir "%INSTALL_DIR%"
|
||||||
call curl -L "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end )
|
call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end )
|
||||||
|
|
||||||
echo Installing Miniconda to %CONDA_ROOT_PREFIX%
|
echo Installing Miniconda to %CONDA_ROOT_PREFIX%
|
||||||
start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
|
start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
|
||||||
@ -45,6 +45,9 @@ if "%conda_exists%" == "F" (
|
|||||||
@rem test the conda binary
|
@rem test the conda binary
|
||||||
echo Miniconda version:
|
echo Miniconda version:
|
||||||
call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end )
|
call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end )
|
||||||
|
|
||||||
|
@rem delete the Miniconda installer
|
||||||
|
del "%INSTALL_DIR%\miniconda_installer.exe"
|
||||||
)
|
)
|
||||||
|
|
||||||
@rem create the installer env
|
@rem create the installer env
|
||||||
|
@ -23,4 +23,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a
|
|||||||
conda activate "$INSTALL_ENV_DIR"
|
conda activate "$INSTALL_ENV_DIR"
|
||||||
|
|
||||||
# update installer env
|
# update installer env
|
||||||
python one_click.py --update && echo -e "\nDone!"
|
python one_click.py --update-wizard && echo -e "\nDone!"
|
@ -23,4 +23,4 @@ source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains a
|
|||||||
conda activate "$INSTALL_ENV_DIR"
|
conda activate "$INSTALL_ENV_DIR"
|
||||||
|
|
||||||
# update installer env
|
# update installer env
|
||||||
python one_click.py --update && echo -e "\nDone!"
|
python one_click.py --update-wizard && echo -e "\nDone!"
|
@ -28,7 +28,7 @@ set "CUDA_HOME=%CUDA_PATH%"
|
|||||||
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
||||||
|
|
||||||
@rem update installer env
|
@rem update installer env
|
||||||
call python one_click.py --update && (
|
call python one_click.py --update-wizard && (
|
||||||
echo.
|
echo.
|
||||||
echo Done!
|
echo Done!
|
||||||
)
|
)
|
@ -5,7 +5,7 @@ cd /D "%~dp0"
|
|||||||
set PATH=%PATH%;%SystemRoot%\system32
|
set PATH=%PATH%;%SystemRoot%\system32
|
||||||
|
|
||||||
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script calling wsl.sh with 'update' will run updater
|
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script calling wsl.sh with 'update' will run updater
|
||||||
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh update"
|
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh update-wizard"
|
||||||
|
|
||||||
:end
|
:end
|
||||||
pause
|
pause
|
5
wsl.sh
5
wsl.sh
@ -66,6 +66,9 @@ if [ "$conda_exists" == "F" ]; then
|
|||||||
# test the conda binary
|
# test the conda binary
|
||||||
echo "Miniconda version:"
|
echo "Miniconda version:"
|
||||||
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
"$CONDA_ROOT_PREFIX/bin/conda" --version
|
||||||
|
|
||||||
|
# delete the Miniconda installer
|
||||||
|
rm "$INSTALL_DIR/miniconda_installer.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# create the installer env
|
# create the installer env
|
||||||
@ -107,6 +110,6 @@ fi
|
|||||||
|
|
||||||
# setup installer env update env if called with 'wsl.sh update'
|
# setup installer env update env if called with 'wsl.sh update'
|
||||||
case "$1" in
|
case "$1" in
|
||||||
("update") python one_click.py --update;;
|
("update-wizard") python one_click.py --update-wizard;;
|
||||||
(*) python one_click.py $@;;
|
(*) python one_click.py $@;;
|
||||||
esac
|
esac
|
||||||
|
Loading…
Reference in New Issue
Block a user