localGPT/docker-compose.yml
PromptEngineer 2421514f3e Integrate multimodal RAG codebase
- Replaced existing localGPT codebase with multimodal RAG implementation
- Includes full-stack application with backend, frontend, and RAG system
- Added Docker support and comprehensive documentation
- Enhanced with multimodal capabilities for document processing
- Preserved git history for localGPT while integrating new functionality
2025-07-11 00:17:15 -07:00

103 lines
2.4 KiB
YAML

services:
# Ollama service for LLM inference (optional - can use host Ollama instead)
ollama:
image: ollama/ollama:latest
container_name: rag-ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- rag-network
profiles:
- with-ollama # Optional service - enable with --profile with-ollama
# RAG API server
rag-api:
build:
context: .
dockerfile: Dockerfile.rag-api
container_name: rag-api
ports:
- "8001:8001"
environment:
# Use host Ollama by default, or containerized Ollama if enabled
- OLLAMA_HOST=${OLLAMA_HOST:-http://host.docker.internal:11434}
- NODE_ENV=production
volumes:
- ./lancedb:/app/lancedb
- ./index_store:/app/index_store
- ./shared_uploads:/app/shared_uploads
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8001/models"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- rag-network
# Backend API server
backend:
build:
context: .
dockerfile: Dockerfile.backend
container_name: rag-backend
ports:
- "8000:8000"
environment:
- NODE_ENV=production
- RAG_API_URL=http://rag-api:8001
volumes:
- ./backend/chat_data.db:/app/backend/chat_data.db
- ./shared_uploads:/app/shared_uploads
depends_on:
rag-api:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- rag-network
# Frontend Next.js application
frontend:
build:
context: .
dockerfile: Dockerfile.frontend
container_name: rag-frontend
ports:
- "3000:3000"
environment:
- NODE_ENV=production
- NEXT_PUBLIC_API_URL=http://localhost:8000
depends_on:
backend:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- rag-network
volumes:
ollama_data:
driver: local
networks:
rag-network:
driver: bridge