version: "3.3" services: text-generation-webui: build: context: . args: # Requirements file to use: # | GPU | requirements file to use | # |--------|---------|---------| # | NVIDIA | `requirements.txt` | # | AMD | `requirements_amd.txt` | # | CPU only | `requirements_cpu_only.txt` | # | Apple Intel | `requirements_apple_intel.txt` | # | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt # Extension requirements to build: # BUILD_EXTENSIONS: # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5} BUILD_EXTENSIONS: ${BUILD_EXTENSIONS:-} APP_GID: ${APP_GID:-6972} APP_UID: ${APP_UID-6972} env_file: .env user: "${APP_RUNTIME_UID:-6972}:${APP_RUNTIME_GID:-6972}" ports: - "${HOST_PORT:-7860}:${CONTAINER_PORT:-7860}" - "${HOST_API_PORT:-5000}:${CONTAINER_API_PORT:-5000}" stdin_open: true tty: true volumes: - ./cache:/home/app/text-generation-webui/cache - ./characters:/home/app/text-generation-webui/characters - ./extensions:/home/app/text-generation-webui/extensions - ./loras:/home/app/text-generation-webui/loras - ./logs:/home/app/text-generation-webui/logs - ./models:/home/app/text-generation-webui/models - ./presets:/home/app/text-generation-webui/presets - ./prompts:/home/app/text-generation-webui/prompts - ./softprompts:/home/app/text-generation-webui/softprompts - ./training:/home/app/text-generation-webui/training - ./cloudflared:/etc/cloudflared