From 2b79c99dd794963aa6e7279f3ec8e63e005a8057 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Sun, 13 Apr 2025 22:31:33 +0200 Subject: [PATCH] chore(README): reorganize docker compose files Signed-off-by: Ettore Di Giacinto --- README.md | 10 +++--- docker-compose.intel.yaml | 42 ++++++++++++++++++++++ docker-compose.nvidia.yaml | 31 ++++++++++++++++ docker-compose.yaml | 73 +++----------------------------------- 4 files changed, 83 insertions(+), 73 deletions(-) create mode 100644 docker-compose.intel.yaml create mode 100644 docker-compose.nvidia.yaml diff --git a/README.md b/README.md index c380ce4..87fc32d 100644 --- a/README.md +++ b/README.md @@ -49,10 +49,10 @@ cd LocalAGI docker compose up # NVIDIA GPU setup -docker compose --profile nvidia up +docker compose -f docker-compose.nvidia.yaml up # Intel GPU setup (for Intel Arc and integrated GPUs) -docker compose --profile intel up +docker compose -f docker-compose.intel.yaml up # Start with a specific model (see available models in models.localai.io, or localai.io to use any model in huggingface) MODEL_NAME=gemma-3-12b-it docker compose up @@ -61,7 +61,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up MODEL_NAME=gemma-3-12b-it \ MULTIMODAL_MODEL=minicpm-v-2_6 \ IMAGE_MODEL=flux.1-dev \ -docker compose --profile nvidia up +docker compose -f docker-compose.nvidia.yaml up ``` Now you can access and manage your agents at [http://localhost:8080](http://localhost:8080) @@ -149,13 +149,13 @@ MODEL_NAME=gemma-3-12b-it docker compose up MODEL_NAME=gemma-3-12b-it \ MULTIMODAL_MODEL=minicpm-v-2_6 \ IMAGE_MODEL=flux.1-dev \ -docker compose --profile nvidia up +docker compose -f docker-compose.nvidia.yaml up # Intel GPU with custom models MODEL_NAME=gemma-3-12b-it \ MULTIMODAL_MODEL=minicpm-v-2_6 \ IMAGE_MODEL=sd-1.5-ggml \ -docker compose --profile intel up +docker compose -f docker-compose.intel.yaml up ``` If no models are specified, it will use the defaults: diff --git a/docker-compose.intel.yaml b/docker-compose.intel.yaml new file mode 100644 index 0000000..d5c2b1a --- /dev/null +++ b/docker-compose.intel.yaml @@ -0,0 +1,42 @@ +services: + localai: + extends: + file: docker-compose.yaml + service: localai + environment: + - LOCALAI_SINGLE_ACTIVE_BACKEND=true + - DEBUG=true + image: localai/localai:master-sycl-f32-ffmpeg-core + devices: + # On a system with integrated GPU and an Arc 770, this is the Arc 770 + - /dev/dri/card1 + - /dev/dri/renderD129 + command: + - ${MODEL_NAME:-arcee-agent} + - ${MULTIMODAL_MODEL:-minicpm-v-2_6} + - ${IMAGE_MODEL:-sd-1.5-ggml} + - granite-embedding-107m-multilingual + + localrecall: + extends: + file: docker-compose.yaml + service: localrecall + + localrecall-healthcheck: + extends: + file: docker-compose.yaml + service: localrecall-healthcheck + + localagi: + extends: + file: docker-compose.yaml + service: localagi + environment: + - LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent} + - LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6} + - LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml} + - LOCALAGI_LLM_API_URL=http://localai:8080 + - LOCALAGI_LOCALRAG_URL=http://localrecall:8080 + - LOCALAGI_STATE_DIR=/pool + - LOCALAGI_TIMEOUT=5m + - LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false diff --git a/docker-compose.nvidia.yaml b/docker-compose.nvidia.yaml new file mode 100644 index 0000000..744dc2f --- /dev/null +++ b/docker-compose.nvidia.yaml @@ -0,0 +1,31 @@ +services: + localai: + extends: + file: docker-compose.yaml + service: localai + environment: + - LOCALAI_SINGLE_ACTIVE_BACKEND=true + - DEBUG=true + image: localai/localai:master-sycl-f32-ffmpeg-core + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + + localrecall: + extends: + file: docker-compose.yaml + service: localrecall + + localrecall-healthcheck: + extends: + file: docker-compose.yaml + service: localrecall-healthcheck + + localagi: + extends: + file: docker-compose.yaml + service: localagi \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index 2778c4e..25f1a8a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,8 +7,9 @@ services: # Image list (dockerhub): https://hub.docker.com/r/localai/localai image: localai/localai:master-ffmpeg-core command: - # - gemma-3-12b-it - ${MODEL_NAME:-arcee-agent} + - ${MULTIMODAL_MODEL:-minicpm-v-2_6} + - ${IMAGE_MODEL:-flux.1-dev} - granite-embedding-107m-multilingual healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] @@ -24,44 +25,6 @@ services: - ./volumes/models:/build/models:cached - ./volumes/images:/tmp/generated/images - localai-nvidia: - profiles: ["nvidia"] - extends: - service: localai - environment: - - LOCALAI_SINGLE_ACTIVE_BACKEND=true - - DEBUG=true - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] - command: - - ${MODEL_NAME:-arcee-agent} - - ${MULTIMODAL_MODEL:-minicpm-v-2_6} - - ${IMAGE_MODEL:-flux.1-dev} - - granite-embedding-107m-multilingual - - localai-intel: - profiles: ["intel"] - environment: - - LOCALAI_SINGLE_ACTIVE_BACKEND=true - - DEBUG=true - extends: - service: localai - image: localai/localai:master-sycl-f32-ffmpeg-core - devices: - # On a system with integrated GPU and an Arc 770, this is the Arc 770 - - /dev/dri/card1 - - /dev/dri/renderD129 - command: - - ${MODEL_NAME:-arcee-agent} - - ${MULTIMODAL_MODEL:-minicpm-v-2_6} - - ${IMAGE_MODEL:-sd-1.5-ggml} - - granite-embedding-107m-multilingual - localrecall: image: quay.io/mudler/localrecall:main ports: @@ -97,6 +60,8 @@ services: #image: quay.io/mudler/localagi:master environment: - LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent} + - LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6} + - LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml} - LOCALAGI_LLM_API_URL=http://localai:8080 #- LOCALAGI_LLM_API_KEY=sk-1234567890 - LOCALAGI_LOCALRAG_URL=http://localrecall:8080 @@ -106,32 +71,4 @@ services: extra_hosts: - "host.docker.internal:host-gateway" volumes: - - ./volumes/localagi/:/pool - - localagi-nvidia: - profiles: ["nvidia"] - extends: - service: localagi - environment: - - LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent} - - LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6} - - LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-flux.1-dev} - - LOCALAGI_LLM_API_URL=http://localai:8080 - - LOCALAGI_LOCALRAG_URL=http://localrecall:8080 - - LOCALAGI_STATE_DIR=/pool - - LOCALAGI_TIMEOUT=5m - - LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false - - localagi-intel: - profiles: ["intel"] - extends: - service: localagi - environment: - - LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent} - - LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6} - - LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml} - - LOCALAGI_LLM_API_URL=http://localai:8080 - - LOCALAGI_LOCALRAG_URL=http://localrecall:8080 - - LOCALAGI_STATE_DIR=/pool - - LOCALAGI_TIMEOUT=5m - - LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false + - ./volumes/localagi/:/pool \ No newline at end of file