mirror of
https://github.com/zebrajr/localGPT.git
synced 2025-12-06 00:20:19 +01:00
- Modified OllamaClient to read OLLAMA_HOST environment variable - Updated docker-compose.yml to pass OLLAMA_HOST to backend service - Changed docker.env to use Docker gateway IP (172.18.0.1:11434) - Configured Ollama service to bind to 0.0.0.0:11434 for container access - Added test script to verify Ollama connectivity from within container - All backend tests now pass including chat functionality Co-Authored-By: PromptEngineer <jnfarooq@outlook.com>
104 lines
2.5 KiB
YAML
104 lines
2.5 KiB
YAML
services:
|
|
# Ollama service for LLM inference (optional - can use host Ollama instead)
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
container_name: rag-ollama
|
|
ports:
|
|
- "11434:11434"
|
|
volumes:
|
|
- ollama_data:/root/.ollama
|
|
environment:
|
|
- OLLAMA_HOST=0.0.0.0
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
networks:
|
|
- rag-network
|
|
profiles:
|
|
- with-ollama # Optional service - enable with --profile with-ollama
|
|
|
|
# RAG API server
|
|
rag-api:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile.rag-api
|
|
container_name: rag-api
|
|
ports:
|
|
- "8001:8001"
|
|
environment:
|
|
# Use host Ollama by default, or containerized Ollama if enabled
|
|
- OLLAMA_HOST=${OLLAMA_HOST:-http://host.docker.internal:11434}
|
|
- NODE_ENV=production
|
|
volumes:
|
|
- ./lancedb:/app/lancedb
|
|
- ./index_store:/app/index_store
|
|
- ./shared_uploads:/app/shared_uploads
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8001/models"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
networks:
|
|
- rag-network
|
|
|
|
# Backend API server
|
|
backend:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile.backend
|
|
container_name: rag-backend
|
|
ports:
|
|
- "8000:8000"
|
|
environment:
|
|
- NODE_ENV=production
|
|
- RAG_API_URL=http://rag-api:8001
|
|
- OLLAMA_HOST=${OLLAMA_HOST:-http://172.18.0.1:11434}
|
|
volumes:
|
|
- ./backend:/app/backend
|
|
- ./shared_uploads:/app/shared_uploads
|
|
depends_on:
|
|
rag-api:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
networks:
|
|
- rag-network
|
|
|
|
# Frontend Next.js application
|
|
frontend:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile.frontend
|
|
container_name: rag-frontend
|
|
ports:
|
|
- "3000:3000"
|
|
environment:
|
|
- NODE_ENV=production
|
|
- NEXT_PUBLIC_API_URL=http://localhost:8000
|
|
depends_on:
|
|
backend:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:3000"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
networks:
|
|
- rag-network
|
|
|
|
volumes:
|
|
ollama_data:
|
|
driver: local
|
|
|
|
networks:
|
|
rag-network:
|
|
driver: bridge |