46 lines
1.3 KiB
YAML
46 lines
1.3 KiB
YAML
services:
|
|
api:
|
|
# See https://localai.io/basics/getting_started/#container-images for
|
|
# a list of available container images (or build your own with the provided Dockerfile)
|
|
# Available images with CUDA, ROCm, SYCL, Vulkan
|
|
# Image list (quay.io): https://quay.io/repository/go-skynet/local-ai?tab=tags
|
|
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
|
|
image: localai/localai:latest-cpu
|
|
command:
|
|
- marco-o1
|
|
- bert-embeddings
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
|
interval: 10s
|
|
timeout: 20m
|
|
retries: 20
|
|
ports:
|
|
- 8081:8080
|
|
environment:
|
|
- DEBUG=true
|
|
volumes:
|
|
- ./models:/build/models:cached
|
|
# decomment the following piece if running with Nvidia GPUs
|
|
# deploy:
|
|
# resources:
|
|
# reservations:
|
|
# devices:
|
|
# - driver: nvidia
|
|
# count: 1
|
|
# capabilities: [gpu]
|
|
localagent:
|
|
depends_on:
|
|
api:
|
|
condition: service_healthy
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile.webui
|
|
ports:
|
|
- 8080:3000
|
|
environment:
|
|
- LOCALAGENT_MODEL=marco-o1
|
|
- EMBEDDING_MODEL=bert-embeddings
|
|
- LOCALAGENT_LLM_API_URL=http://api:8080
|
|
volumes:
|
|
- ./data/:/pool
|