mirror of
https://github.com/zebrajr/privateGPT.git
synced 2025-12-06 00:19:52 +01:00
* Extract optional dependencies * Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity * Support Ollama embeddings * Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine * Fix vector retriever filters
23 lines
313 B
YAML
23 lines
313 B
YAML
server:
|
|
env_name: ${APP_ENV:ollama}
|
|
|
|
llm:
|
|
mode: ollama
|
|
max_new_tokens: 512
|
|
context_window: 3900
|
|
|
|
embedding:
|
|
mode: ollama
|
|
|
|
ollama:
|
|
llm_model: mistral
|
|
embedding_model: nomic-embed-text
|
|
api_base: http://localhost:11434
|
|
|
|
vectorstore:
|
|
database: qdrant
|
|
|
|
qdrant:
|
|
path: local_data/private_gpt/qdrant
|
|
|