diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..bfce180 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,42 @@ +version: "3.9" +services: + api: + image: localai/localai:latest-aio-cpu + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] + interval: 10s + timeout: 20m + retries: 20 + # ports: + # - 8080:8080 + environment: + - DEBUG=true + volumes: + - ./models:/build/models:cached + # decomment the following piece if running with Nvidia GPUs + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + localagent: + depends_on: + api: + condition: service_healthy + # See https://localai.io/basics/getting_started/#container-images for + # a list of available container images (or build your own with the provided Dockerfile) + # Available images with CUDA, ROCm, SYCL + # Image list (quay.io): https://quay.io/repository/go-skynet/local-ai?tab=tags + # Image list (dockerhub): https://hub.docker.com/r/localai/localai + build: + context: . + dockerfile: Dockerfile.webui + ports: + - 8080:3000 + environment: + - TEST_MODEL=gpt-4 + - API_URL=http://api:8080 + volumes: + - ./data/:/pool