2024-08-20 04:34:10 +02:00
|
|
|
accelerate==0.33.*
|
2024-09-28 01:59:30 +02:00
|
|
|
bitsandbytes==0.44.*
|
2023-04-17 04:26:52 +02:00
|
|
|
colorama
|
2023-04-03 01:34:25 +02:00
|
|
|
datasets
|
2023-05-29 15:20:18 +02:00
|
|
|
einops
|
2024-09-07 03:38:39 +02:00
|
|
|
fastapi==0.112.4
|
2024-04-11 07:24:53 +02:00
|
|
|
gradio==4.26.*
|
2024-06-28 02:12:39 +02:00
|
|
|
jinja2==3.1.4
|
2023-12-24 18:22:31 +01:00
|
|
|
lm_eval==0.3.0
|
2023-03-15 16:40:03 +01:00
|
|
|
markdown
|
2024-03-11 00:13:29 +01:00
|
|
|
numba==0.59.*
|
2024-02-13 20:26:35 +01:00
|
|
|
numpy==1.26.*
|
2024-02-19 23:15:21 +01:00
|
|
|
optimum==1.17.*
|
2023-04-21 05:20:33 +02:00
|
|
|
pandas
|
2024-08-20 04:33:56 +02:00
|
|
|
peft==0.12.*
|
2023-04-08 23:48:46 +02:00
|
|
|
Pillow>=9.5.0
|
2024-04-07 04:02:20 +02:00
|
|
|
psutil
|
2024-09-07 03:38:39 +02:00
|
|
|
pydantic==2.8.2
|
2023-04-21 05:20:33 +02:00
|
|
|
pyyaml
|
2023-03-11 18:47:30 +01:00
|
|
|
requests
|
2023-12-20 06:58:36 +01:00
|
|
|
rich
|
2024-02-05 03:40:25 +01:00
|
|
|
safetensors==0.4.*
|
2023-05-26 04:26:25 +02:00
|
|
|
scipy
|
2023-07-20 04:31:19 +02:00
|
|
|
sentencepiece
|
2023-07-12 16:53:31 +02:00
|
|
|
tensorboard
|
2024-09-26 20:55:51 +02:00
|
|
|
transformers==4.45.*
|
2023-07-20 04:31:19 +02:00
|
|
|
tqdm
|
|
|
|
wandb
|
2023-08-09 17:07:55 +02:00
|
|
|
|
2024-03-04 08:46:39 +01:00
|
|
|
# API
|
|
|
|
SpeechRecognition==3.10.0
|
|
|
|
flask_cloudflared==0.0.14
|
2024-04-18 20:05:00 +02:00
|
|
|
sse-starlette==1.6.5
|
2024-03-04 08:46:39 +01:00
|
|
|
tiktoken
|
|
|
|
|
2024-04-30 14:11:31 +02:00
|
|
|
# llama-cpp-python (CPU only, AVX2)
|
2024-09-28 00:06:31 +02:00
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.3.0+cpuavx2-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.3.0+cpuavx2-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.3.0+cpuavx2-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python-0.3.0+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
2024-04-30 14:11:31 +02:00
|
|
|
|
|
|
|
# llama-cpp-python (CUDA, no tensor cores)
|
2024-09-28 00:06:31 +02:00
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.3.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.3.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.3.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.3.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
2024-07-23 03:05:11 +02:00
|
|
|
|
|
|
|
# llama-cpp-python (CUDA, tensor cores)
|
2024-09-28 00:06:31 +02:00
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.3.0+cu121-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.3.0+cu121-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.3.0+cu121-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda_tensorcores-0.3.0+cu121-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
2024-04-30 14:11:31 +02:00
|
|
|
|
2023-09-24 14:58:29 +02:00
|
|
|
# CUDA wheels
|
2024-09-28 18:44:08 +02:00
|
|
|
https://github.com/oobabooga/exllamav2/releases/download/v0.2.2/exllamav2-0.2.2+cu121.torch2.4.1-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/exllamav2/releases/download/v0.2.2/exllamav2-0.2.2+cu121.torch2.4.1-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
|
|
|
https://github.com/oobabooga/exllamav2/releases/download/v0.2.2/exllamav2-0.2.2+cu121.torch2.4.1-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/exllamav2/releases/download/v0.2.2/exllamav2-0.2.2+cu121.torch2.4.1-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|
2024-09-28 00:04:08 +02:00
|
|
|
https://github.com/oobabooga/exllamav2/releases/download/v0.2.2/exllamav2-0.2.2-py3-none-any.whl; platform_system == "Linux" and platform_machine != "x86_64"
|
2024-09-28 18:44:08 +02:00
|
|
|
https://github.com/oobabooga/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu122torch2.4.1cxx11abiFALSE-cp311-cp311-win_amd64.whl; platform_system == "Windows" and python_version == "3.11"
|
|
|
|
https://github.com/oobabooga/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu122torch2.4.1cxx11abiFALSE-cp310-cp310-win_amd64.whl; platform_system == "Windows" and python_version == "3.10"
|
|
|
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiFALSE-cp311-cp311-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.11"
|
|
|
|
https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64" and python_version == "3.10"
|