Openweb-ui | ollama
Check:
[openweb-ui]https://docs.openwebui.com/
YAML
services:
openWebUI:
image: ghcr.io/open-webui/open-webui:main
restart: always
ports:
- "3000:8080"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./open-webui-local:/app/backend/data
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
- OLLAMA_BASE_URLS=http://ollama:11434
ollama:
image: ollama/ollama:0.5.4
ports:
- "11434:11434"
volumes:
- ./ollama-local:/root/.ollama
environment:
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_HOST=0.0.0.0
- Using GPU
YAML
services:
openWebUI:
image: ghcr.io/open-webui/open-webui:main
restart: always
ports:
- "3000:8080"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./open-webui-local:/app/backend/data
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
- OLLAMA_BASE_URLS=http://ollama:11434
ollama:
image: ollama/ollama:0.5.4
ports:
- "11434:11434"
volumes:
- ./ollama-local:/root/.ollama
environment:
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_HOST=0.0.0.0
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
- nvidia setup
Bash
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update
sudo apt-get install -y nvidia-container-toolkit
# Configure NVIDIA Container Toolkit
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker
# Test GPU integration
docker run --gpus all nvidia/cuda:11.5.2-base-ubuntu20.04 nvidia-smi
Tehno Tims stack
YAML
services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- OLLAMA_KEEP_ALIVE=24h
- ENABLE_IMAGE_GENERATION=True
- COMFYUI_BASE_URL=http://stable-diffusion-webui:7860
networks:
- ai-stack
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./ollama:/root/.ollama
ports:
- "11434:11434" # Add this line to expose the port
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
open-webui:
image: ghcr.io/open-webui/open-webui:latest
container_name: open-webui
restart: unless-stopped
networks:
- ai-stack
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- 'OLLAMA_BASE_URL=http://ollama:11434'
- ENABLE_RAG_WEB_SEARCH=True
- RAG_WEB_SEARCH_ENGINE=searxng
- RAG_WEB_SEARCH_RESULT_COUNT=3
- RAG_WEB_SEARCH_CONCURRENT_REQUESTS=10
- SEARXNG_QUERY_URL=http://searxng:8080/search?q=<query>
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./open-webui:/app/backend/data
depends_on:
- ollama
extra_hosts:
- host.docker.internal:host-gateway
ports:
- "8080:8080" # Add this line to expose the port
searxng:
image: searxng/searxng:latest
container_name: searxng
networks:
- ai-stack
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./searxng:/etc/searxng
depends_on:
- ollama
- open-webui
restart: unless-stopped
ports:
- "8081:8080" # Add this line to expose the port
stable-diffusion-download:
build: ./stable-diffusion-webui-docker/services/download/
image: comfy-download
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./stable-diffusion-webui-docker/data:/data
stable-diffusion-webui:
build: ./stable-diffusion-webui-docker/services/comfy/
image: comfy-ui
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- CLI_ARGS=
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./stable-diffusion-webui-docker/data:/data
- ./stable-diffusion-webui-docker/output:/output
stop_signal: SIGKILL
tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0']
capabilities: [compute, utility]
restart: unless-stopped
networks:
- ai-stack
ports:
- "7860:7860" # Add this line to expose the port
mongo:
image: mongo
env_file:
- .env
networks:
- ai-stack
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./whisper/db_data:/data/db
- ./whisper/db_data/logs/:/var/log/mongodb/
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- MONGO_INITDB_ROOT_USERNAME=${DB_USER:-whishper}
- MONGO_INITDB_ROOT_PASSWORD=${DB_PASS:-whishper}
command: ['--logpath', '/var/log/mongodb/mongod.log']
ports:
- "27017:27017" # Add this line to expose the port
translate:
container_name: whisper-libretranslate
image: libretranslate/libretranslate:latest-cuda
env_file:
- .env
networks:
- ai-stack
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./whisper/libretranslate/data:/home/libretranslate/.local/share
- ./whisper/libretranslate/cache:/home/libretranslate/.local/cache
user: root
tty: true
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- LT_DISABLE_WEB_UI=True
- LT_LOAD_ONLY=${LT_LOAD_ONLY:-en,fr,es}
- LT_UPDATE_MODELS=True
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ports:
- "5000:5000" # Add this line to expose the port
whisper:
container_name: whisper
pull_policy: always
image: pluja/whishper:latest-gpu
env_file:
- .env
networks:
- ai-stack
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- ./whisper/uploads:/app/uploads
- ./whisper/logs:/var/log/whishper
- ./whisper/models:/app/models
restart: unless-stopped
depends_on:
- mongo
- translate
environment:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
- PUBLIC_INTERNAL_API_HOST=${WHISHPER_HOST}
- PUBLIC_TRANSLATION_API_HOST=${WHISHPER_HOST}
- PUBLIC_API_HOST=${WHISHPER_HOST:-}
- PUBLIC_WHISHPER_PROFILE=gpu
- WHISPER_MODELS_DIR=/app/models
- UPLOAD_DIR=/app/uploads
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ports:
- "8000:80" # Add this line to expose the port
networks:
ai-stack:
external: true