From 4a7c30ea5a56aa66ab36165afefe51efc57c65b3 Mon Sep 17 00:00:00 2001 From: mudler Date: Thu, 17 Apr 2025 19:32:53 +0200 Subject: [PATCH] fix: correct image name, switch to flux.1-dev-ggml as default Signed-off-by: mudler --- README.md | 8 ++++---- docker-compose.nvidia.yaml | 4 +++- docker-compose.yaml | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0f639bf..a32f0e2 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up # NVIDIA GPU setup with custom multimodal and image models MODEL_NAME=gemma-3-12b-it \ MULTIMODAL_MODEL=minicpm-v-2_6 \ -IMAGE_MODEL=flux.1-dev \ +IMAGE_MODEL=flux.1-dev-ggml \ docker compose -f docker-compose.nvidia.yaml up ``` @@ -116,7 +116,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil - Default models: - Text: `arcee-agent` - Multimodal: `minicpm-v-2_6` - - Image: `flux.1-dev` + - Image: `flux.1-dev-ggml` - Environment variables: - `MODEL_NAME`: Text model to use - `MULTIMODAL_MODEL`: Multimodal model to use @@ -150,7 +150,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up # NVIDIA GPU with custom models MODEL_NAME=gemma-3-12b-it \ MULTIMODAL_MODEL=minicpm-v-2_6 \ -IMAGE_MODEL=flux.1-dev \ +IMAGE_MODEL=flux.1-dev-ggml \ docker compose -f docker-compose.nvidia.yaml up # Intel GPU with custom models @@ -163,7 +163,7 @@ docker compose -f docker-compose.intel.yaml up If no models are specified, it will use the defaults: - Text model: `arcee-agent` - Multimodal model: `minicpm-v-2_6` -- Image model: `flux.1-dev` (NVIDIA) or `sd-1.5-ggml` (Intel) +- Image model: `flux.1-dev-ggml` (NVIDIA) or `sd-1.5-ggml` (Intel) Good (relatively small) models that have been tested are: diff --git a/docker-compose.nvidia.yaml b/docker-compose.nvidia.yaml index 744dc2f..ff250a8 100644 --- a/docker-compose.nvidia.yaml +++ b/docker-compose.nvidia.yaml @@ -6,7 +6,9 @@ services: environment: - LOCALAI_SINGLE_ACTIVE_BACKEND=true - DEBUG=true - image: localai/localai:master-sycl-f32-ffmpeg-core + image: localai/localai:master-cublas-cuda12-ffmpeg-core + # For images with python backends, use: + # image: localai/localai:master-cublas-cuda12-ffmpeg-core deploy: resources: reservations: diff --git a/docker-compose.yaml b/docker-compose.yaml index 25f1a8a..ee5bc98 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -9,7 +9,7 @@ services: command: - ${MODEL_NAME:-arcee-agent} - ${MULTIMODAL_MODEL:-minicpm-v-2_6} - - ${IMAGE_MODEL:-flux.1-dev} + - ${IMAGE_MODEL:-flux.1-dev-ggml} - granite-embedding-107m-multilingual healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]