diff --git a/backend/ollama_client.py b/backend/ollama_client.py index d0b59b2..33ee103 100644 --- a/backend/ollama_client.py +++ b/backend/ollama_client.py @@ -1,9 +1,12 @@ import requests import json -from typing import List, Dict +import os +from typing import List, Dict, Optional class OllamaClient: - def __init__(self, base_url: str = "http://localhost:11434"): + def __init__(self, base_url: Optional[str] = None): + if base_url is None: + base_url = os.getenv("OLLAMA_HOST", "http://localhost:11434") self.base_url = base_url self.api_url = f"{base_url}/api" @@ -196,4 +199,4 @@ def main(): print(f"AI: {response}") if __name__ == "__main__": - main() \ No newline at end of file + main() \ No newline at end of file diff --git a/backend/test_ollama_connectivity.py b/backend/test_ollama_connectivity.py new file mode 100644 index 0000000..d4e2e65 --- /dev/null +++ b/backend/test_ollama_connectivity.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +import os +import sys + +def test_ollama_connectivity(): + """Test Ollama connectivity from within Docker container""" + print("๐Ÿงช Testing Ollama Connectivity") + print("=" * 40) + + ollama_host = os.getenv('OLLAMA_HOST', 'Not set') + print(f"OLLAMA_HOST environment variable: {ollama_host}") + + try: + from ollama_client import OllamaClient + client = OllamaClient() + print(f"OllamaClient base_url: {client.base_url}") + + is_running = client.is_ollama_running() + print(f"Ollama running: {is_running}") + + if is_running: + models = client.list_models() + print(f"Available models: {models}") + print("โœ… Ollama connectivity test passed!") + return True + else: + print("โŒ Ollama connectivity test failed!") + return False + + except Exception as e: + print(f"โŒ Error testing Ollama connectivity: {e}") + return False + +if __name__ == "__main__": + success = test_ollama_connectivity() + sys.exit(0 if success else 1) diff --git a/docker-compose.yml b/docker-compose.yml index c2e91b9..abf8b80 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,6 +56,7 @@ services: environment: - NODE_ENV=production - RAG_API_URL=http://rag-api:8001 + - OLLAMA_HOST=${OLLAMA_HOST:-http://172.18.0.1:11434} volumes: - ./backend:/app/backend - ./shared_uploads:/app/shared_uploads @@ -100,4 +101,4 @@ volumes: networks: rag-network: - driver: bridge \ No newline at end of file + driver: bridge \ No newline at end of file diff --git a/docker.env b/docker.env index c79ef77..4bb29ee 100644 --- a/docker.env +++ b/docker.env @@ -1,6 +1,7 @@ # Docker environment configuration # Set this to use local Ollama instance running on host -OLLAMA_HOST=http://host.docker.internal:11434 +# Note: Using Docker gateway IP instead of host.docker.internal for Linux compatibility +OLLAMA_HOST=http://172.18.0.1:11434 # Alternative: Use containerized Ollama (uncomment and run with --profile with-ollama) # OLLAMA_HOST=http://ollama:11434 @@ -8,4 +9,4 @@ OLLAMA_HOST=http://host.docker.internal:11434 # Other configuration NODE_ENV=production NEXT_PUBLIC_API_URL=http://localhost:8000 -RAG_API_URL=http://rag-api:8001 \ No newline at end of file +RAG_API_URL=http://rag-api:8001 \ No newline at end of file