chore: use qwen3 as default

Signed-off-by: mudler <mudler@localai.io>
This commit is contained in:
mudler
2025-04-29 16:25:09 +02:00
parent a1efa07b24
commit 45969d3187
3 changed files with 6 additions and 6 deletions

View File

@@ -11,7 +11,7 @@ cleanup-tests:
docker compose down
tests: prepare-tests
LOCALAGI_MCPBOX_URL="http://localhost:9090" LOCALAGI_MODEL="gemma-3-12b-it-qat" LOCALAI_API_URL="http://localhost:8081" LOCALAGI_API_URL="http://localhost:8080" $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --fail-fast -v -r ./...
LOCALAGI_MCPBOX_URL="http://localhost:9090" LOCALAGI_MODEL="qwen3-8b" LOCALAI_API_URL="http://localhost:8081" LOCALAGI_API_URL="http://localhost:8080" $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --fail-fast -v -r ./...
run-nokb:
$(MAKE) run KBDISABLEINDEX=true

View File

@@ -120,7 +120,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
- Supports text, multimodal, and image generation models
- Run with: `docker compose -f docker-compose.nvidia.yaml up`
- Default models:
- Text: `gemma-3-12b-it-qat`
- Text: `qwen3-8b`
- Multimodal: `minicpm-v-2_6`
- Image: `sd-1.5-ggml`
- Environment variables:
@@ -136,7 +136,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
- Supports text, multimodal, and image generation models
- Run with: `docker compose -f docker-compose.intel.yaml up`
- Default models:
- Text: `gemma-3-12b-it-qat`
- Text: `qwen3-8b`
- Multimodal: `minicpm-v-2_6`
- Image: `sd-1.5-ggml`
- Environment variables:
@@ -167,7 +167,7 @@ docker compose -f docker-compose.intel.yaml up
```
If no models are specified, it will use the defaults:
- Text model: `gemma-3-12b-it-qat`
- Text model: `qwen3-8b`
- Multimodal model: `minicpm-v-2_6`
- Image model: `sd-1.5-ggml`

View File

@@ -7,7 +7,7 @@ services:
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
image: localai/localai:master-ffmpeg-core
command:
- ${MODEL_NAME:-gemma-3-12b-it-qat}
- ${MODEL_NAME:-qwen3-8b}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-sd-1.5-ggml}
- granite-embedding-107m-multilingual
@@ -91,7 +91,7 @@ services:
- 8080:3000
#image: quay.io/mudler/localagi:master
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-gemma-3-12b-it-qat}
- LOCALAGI_MODEL=${MODEL_NAME:-qwen3-8b}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080