Compare commits
2 Commits
mcp-migrat
...
feat/bette
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c466d9b81 | ||
|
|
4a7c30ea5a |
@@ -60,7 +60,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up
|
||||
# NVIDIA GPU setup with custom multimodal and image models
|
||||
MODEL_NAME=gemma-3-12b-it \
|
||||
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
||||
IMAGE_MODEL=flux.1-dev \
|
||||
IMAGE_MODEL=flux.1-dev-ggml \
|
||||
docker compose -f docker-compose.nvidia.yaml up
|
||||
```
|
||||
|
||||
@@ -116,7 +116,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
|
||||
- Default models:
|
||||
- Text: `arcee-agent`
|
||||
- Multimodal: `minicpm-v-2_6`
|
||||
- Image: `flux.1-dev`
|
||||
- Image: `sd-1.5-ggml`
|
||||
- Environment variables:
|
||||
- `MODEL_NAME`: Text model to use
|
||||
- `MULTIMODAL_MODEL`: Multimodal model to use
|
||||
@@ -150,7 +150,7 @@ MODEL_NAME=gemma-3-12b-it docker compose up
|
||||
# NVIDIA GPU with custom models
|
||||
MODEL_NAME=gemma-3-12b-it \
|
||||
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
||||
IMAGE_MODEL=flux.1-dev \
|
||||
IMAGE_MODEL=flux.1-dev-ggml \
|
||||
docker compose -f docker-compose.nvidia.yaml up
|
||||
|
||||
# Intel GPU with custom models
|
||||
@@ -163,7 +163,7 @@ docker compose -f docker-compose.intel.yaml up
|
||||
If no models are specified, it will use the defaults:
|
||||
- Text model: `arcee-agent`
|
||||
- Multimodal model: `minicpm-v-2_6`
|
||||
- Image model: `flux.1-dev` (NVIDIA) or `sd-1.5-ggml` (Intel)
|
||||
- Image model: `sd-1.5-ggml`
|
||||
|
||||
Good (relatively small) models that have been tested are:
|
||||
|
||||
|
||||
@@ -11,11 +11,6 @@ services:
|
||||
# On a system with integrated GPU and an Arc 770, this is the Arc 770
|
||||
- /dev/dri/card1
|
||||
- /dev/dri/renderD129
|
||||
command:
|
||||
- ${MODEL_NAME:-arcee-agent}
|
||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- ${IMAGE_MODEL:-sd-1.5-ggml}
|
||||
- granite-embedding-107m-multilingual
|
||||
|
||||
localrecall:
|
||||
extends:
|
||||
|
||||
@@ -6,7 +6,9 @@ services:
|
||||
environment:
|
||||
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
||||
- DEBUG=true
|
||||
image: localai/localai:master-sycl-f32-ffmpeg-core
|
||||
image: localai/localai:master-cublas-cuda12-ffmpeg-core
|
||||
# For images with python backends, use:
|
||||
# image: localai/localai:master-cublas-cuda12-ffmpeg
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
|
||||
@@ -9,7 +9,7 @@ services:
|
||||
command:
|
||||
- ${MODEL_NAME:-arcee-agent}
|
||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- ${IMAGE_MODEL:-flux.1-dev}
|
||||
- ${IMAGE_MODEL:-sd-1.5-ggml}
|
||||
- granite-embedding-107m-multilingual
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||
|
||||
Reference in New Issue
Block a user