LOG_LEVEL=debug # Modèles à utiliser MODEL_NAME=gpt-alex #MULTIMODAL_MODEL=minicpm-v-2_6 #IMAGE_MODEL=sd-1.5-ggml #EMBEDDING_MODEL=granite-embedding-107m-multilingual STT_ENGINE=STT_ENGINE=whisper # For Fast Whisper (GPU recommended) STT_ENGINE=whisper-alex-fast CUDA_VISIBLE_DEVICES=0 GGML_CUDA_FORCE_MMQ=0 GGML_CUDA_FORCE_CUBLAS=1 # Home Assistant Configuration HASS_HOST=https://jarvis.carriere.cloud HASS_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjYjYzMTQwZjc4Njk0ZTdhODFiYTY2OGI4YzM1NWQzMSIsImlhdCI6MTc0OTM4ODkzMCwiZXhwIjoyMDY0NzQ4OTMwfQ.y6zC6fOk_d7COngm4QG-WatC8lQCYfltuvrJSDbZtk8 HASS_SOCKET_URL=ws://jarvis.carriere.cloud/api/websocket # Server Configuration PORT=3000 NODE_ENV=production DEBUG=false # URLs des services LOCALAGI_LLM_API_URL=http://localai:8080 LOCALAGI_LOCALRAG_URL=http://localrecall:8080 # Configuration générale LOCALAGI_TIMEOUT=5m LOCALAGI_MCP_TIMEOUT=5m LOCALAGI_STATE_DIR=/pool LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false # Configuration LocalAI (basée sur votre instance Unraid) DEBUG=true MODELS_PATH=/models THREADS=4 COQUI_TOS_AGREED=1 GALLERIES=[{"name":"localai","url":"github:mudler/LocalAI/gallery/index.yaml@master"}] SINGLE_ACTIVE_BACKEND=false LOCALAI_SINGLE_ACTIVE_BACKEND=false PYTHON_GRPC_MAX_WORKERS=12 LLAMACPP_PARALLEL=6 PARALLEL_REQUESTS=true WATCHDOG_IDLE=true WATCHDOG_BUSY=true WATCHDOG_IDLE_TIMEOUT=60m WATCHDOG_BUSY_TIMEOUT=5m LOCALAI_UPLOAD_LIMIT=256 DISABLE_AUTODETECT=true LOW_VRAM=true MMAP=true CONTEXT_SIZE=32768 LOCALAI_P2P=true LOCALAI_FEDERATED=true LOCALAI_P2P_LOGLEVEL=info