chore(README): reorganize docker compose files

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto
2025-04-13 22:31:33 +02:00
parent 77905ed3cd
commit 2b79c99dd7
4 changed files with 83 additions and 73 deletions

View File

@@ -49,10 +49,10 @@ cd LocalAGI
docker compose up
# NVIDIA GPU setup
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
# Intel GPU setup (for Intel Arc and integrated GPUs)
docker compose --profile intel up
docker compose -f docker-compose.intel.yaml up
# Start with a specific model (see available models in models.localai.io, or localai.io to use any model in huggingface)
MODEL_NAME=gemma-3-12b-it docker compose up
@@ -61,7 +61,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=flux.1-dev \
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
```
Now you can access and manage your agents at [http://localhost:8080](http://localhost:8080)
@@ -149,13 +149,13 @@ MODEL_NAME=gemma-3-12b-it docker compose up
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=flux.1-dev \
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
# Intel GPU with custom models
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=sd-1.5-ggml \
docker compose --profile intel up
docker compose -f docker-compose.intel.yaml up
```
If no models are specified, it will use the defaults:

42
docker-compose.intel.yaml Normal file
View File

@@ -0,0 +1,42 @@
services:
localai:
extends:
file: docker-compose.yaml
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
image: localai/localai:master-sycl-f32-ffmpeg-core
devices:
# On a system with integrated GPU and an Arc 770, this is the Arc 770
- /dev/dri/card1
- /dev/dri/renderD129
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-sd-1.5-ggml}
- granite-embedding-107m-multilingual
localrecall:
extends:
file: docker-compose.yaml
service: localrecall
localrecall-healthcheck:
extends:
file: docker-compose.yaml
service: localrecall-healthcheck
localagi:
extends:
file: docker-compose.yaml
service: localagi
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
- LOCALAGI_STATE_DIR=/pool
- LOCALAGI_TIMEOUT=5m
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false

View File

@@ -0,0 +1,31 @@
services:
localai:
extends:
file: docker-compose.yaml
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
image: localai/localai:master-sycl-f32-ffmpeg-core
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
localrecall:
extends:
file: docker-compose.yaml
service: localrecall
localrecall-healthcheck:
extends:
file: docker-compose.yaml
service: localrecall-healthcheck
localagi:
extends:
file: docker-compose.yaml
service: localagi

View File

@@ -7,8 +7,9 @@ services:
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
image: localai/localai:master-ffmpeg-core
command:
# - gemma-3-12b-it
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-flux.1-dev}
- granite-embedding-107m-multilingual
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
@@ -24,44 +25,6 @@ services:
- ./volumes/models:/build/models:cached
- ./volumes/images:/tmp/generated/images
localai-nvidia:
profiles: ["nvidia"]
extends:
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-flux.1-dev}
- granite-embedding-107m-multilingual
localai-intel:
profiles: ["intel"]
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
extends:
service: localai
image: localai/localai:master-sycl-f32-ffmpeg-core
devices:
# On a system with integrated GPU and an Arc 770, this is the Arc 770
- /dev/dri/card1
- /dev/dri/renderD129
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-sd-1.5-ggml}
- granite-embedding-107m-multilingual
localrecall:
image: quay.io/mudler/localrecall:main
ports:
@@ -97,6 +60,8 @@ services:
#image: quay.io/mudler/localagi:master
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080
#- LOCALAGI_LLM_API_KEY=sk-1234567890
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
@@ -106,32 +71,4 @@ services:
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./volumes/localagi/:/pool
localagi-nvidia:
profiles: ["nvidia"]
extends:
service: localagi
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-flux.1-dev}
- LOCALAGI_LLM_API_URL=http://localai:8080
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
- LOCALAGI_STATE_DIR=/pool
- LOCALAGI_TIMEOUT=5m
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
localagi-intel:
profiles: ["intel"]
extends:
service: localagi
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
- LOCALAGI_STATE_DIR=/pool
- LOCALAGI_TIMEOUT=5m
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
- ./volumes/localagi/:/pool