chore(gpu-example): update with multimodal model and image-gen model

Signed-off-by: mudler <mudler@localai.io>
This commit is contained in:
mudler
2025-04-04 11:54:20 +02:00
parent 6747fe87f2
commit 25286a828c

View File

@@ -8,11 +8,16 @@ services:
image: localai/localai:master-gpu-nvidia-cuda-12 image: localai/localai:master-gpu-nvidia-cuda-12
command: command:
- mlabonne_gemma-3-27b-it-abliterated - mlabonne_gemma-3-27b-it-abliterated
# - qwen_qwq-32b
# Other good alternative options: # Other good alternative options:
# - rombo-org_rombo-llm-v3.0-qwen-32b # minimum suggested model # - rombo-org_rombo-llm-v3.0-qwen-32b # minimum suggested model
# - qwen_qwq-32b
# - arcee-agent # - arcee-agent
- granite-embedding-107m-multilingual - granite-embedding-107m-multilingual
- flux.1-dev
- minicpm-v-2_6
environment:
# Enable if you have a single GPU which don't fit all the models
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
healthcheck: healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 10s interval: 10s
@@ -73,6 +78,8 @@ services:
- LOCALAGENT_STATE_DIR=/pool - LOCALAGENT_STATE_DIR=/pool
- LOCALAGENT_TIMEOUT=5m - LOCALAGENT_TIMEOUT=5m
- LOCALAGENT_ENABLE_CONVERSATIONS_LOGGING=false - LOCALAGENT_ENABLE_CONVERSATIONS_LOGGING=false
- LOCALAGENT_MULTIMODAL_MODEL=minicpm-v-2_6
- LOCALAGENT_IMAGE_MODEL=flux.1-dev
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
volumes: volumes: