mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-30 14:10:14 +01:00
Merge remote-tracking branch 'refs/remotes/origin/main'
This commit is contained in:
commit
86c45b67ca
@ -35,12 +35,12 @@ https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117
|
|||||||
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/jllllll/exllama/releases/download/0.0.10/exllama-0.0.10+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
|
||||||
# llama-cpp-python without GPU support
|
# llama-cpp-python without GPU support
|
||||||
llama-cpp-python==0.1.81; platform_system != "Windows"
|
llama-cpp-python==0.1.82; platform_system != "Windows"
|
||||||
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.81/llama_cpp_python-0.1.81-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.82/llama_cpp_python-0.1.82-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
|
|
||||||
# llama-cpp-python with CUDA support
|
# llama-cpp-python with CUDA support
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.81+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.82+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.81+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui/llama_cpp_python_cuda-0.1.82+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
|
||||||
|
|
||||||
# llama-cpp-python with GGML support
|
# llama-cpp-python with GGML support
|
||||||
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/cpu/llama_cpp_python_ggml-0.1.78+cpuavx2-cp310-cp310-win_amd64.whl; platform_system == "Windows"
|
||||||
|
Loading…
Reference in New Issue
Block a user