From 367b81405852be6f9666caac8b6d038ab526cf15 Mon Sep 17 00:00:00 2001 From: Alexi C Date: Fri, 30 May 2025 11:20:00 +0200 Subject: [PATCH] Initial commit: LocalAI P2P addons --- .gitea/workflows/__builder.yaml | 67 +++++++++++++++++++++++++++++++++ .gitea/workflows/builder.yaml | 13 +++++++ CHANGELOG.md | 0 DOCS.md | 29 ++++++++++++++ README.md | 22 +++++++++++ localai-p2p-master/DOCS.md | 29 ++++++++++++++ localai-p2p-master/Dockerfile | 12 ++++++ localai-p2p-master/config.yaml | 32 ++++++++++++++++ localai-p2p-master/run.sh | 11 ++++++ localai-p2p-worker/Dockerfile | 12 ++++++ localai-p2p-worker/config.yaml | 29 ++++++++++++++ localai-p2p-worker/run.sh | 11 ++++++ repository.yaml | 4 ++ workflows/builder.yaml | 50 ++++++++++++++++++++++++ 14 files changed, 321 insertions(+) create mode 100644 .gitea/workflows/__builder.yaml create mode 100644 .gitea/workflows/builder.yaml create mode 100644 CHANGELOG.md create mode 100644 DOCS.md create mode 100644 README.md create mode 100644 localai-p2p-master/DOCS.md create mode 100644 localai-p2p-master/Dockerfile create mode 100644 localai-p2p-master/config.yaml create mode 100644 localai-p2p-master/run.sh create mode 100644 localai-p2p-worker/Dockerfile create mode 100644 localai-p2p-worker/config.yaml create mode 100644 localai-p2p-worker/run.sh create mode 100644 repository.yaml create mode 100644 workflows/builder.yaml diff --git a/.gitea/workflows/__builder.yaml b/.gitea/workflows/__builder.yaml new file mode 100644 index 0000000..9f01117 --- /dev/null +++ b/.gitea/workflows/__builder.yaml @@ -0,0 +1,67 @@ +name: Builder + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + build: + runs-on: ubuntu-latest + name: Build addons + strategy: + matrix: + addon: ["localai-p2p-master", "localai-p2p-worker"] + arch: ["amd64"] # Ajoutez d'autres architectures si nécessaire + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Gitea Container Registry + uses: docker/login-action@v3 + with: + registry: gitea.votre-domaine.com # Remplacez par votre instance Gitea + username: ${{ gitea.actor }} + password: ${{ secrets.GITEA_TOKEN }} + + - name: Extract version from config + id: version + run: | + VERSION=$(grep '^version:' ${{ matrix.addon }}/config.yaml | sed 's/version: //' | tr -d '"') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Build and push addon + uses: docker/build-push-action@v5 + with: + context: ./${{ matrix.addon }} + platforms: linux/${{ matrix.arch }} + push: true + tags: | + gitea.votre-domaine.com/votre-user/ha-${{ matrix.addon }}:latest + gitea.votre-domaine.com/votre-user/ha-${{ matrix.addon }}:${{ steps.version.outputs.version }} + cache-from: type=gha + cache-to: type=gha,mode=max + + release: + runs-on: ubuntu-latest + name: Create release + needs: build + if: startsWith(github.ref, 'refs/tags/') + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Create Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITEA_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: false + prerelease: false \ No newline at end of file diff --git a/.gitea/workflows/builder.yaml b/.gitea/workflows/builder.yaml new file mode 100644 index 0000000..63395a6 --- /dev/null +++ b/.gitea/workflows/builder.yaml @@ -0,0 +1,13 @@ +# .gitea/workflows/validate.yaml +name: Validate +on: [push, pull_request] +jobs: + validate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Validate configs + run: | + for config in */config.yaml; do + python3 -c "import yaml; yaml.safe_load(open('$config'))" + done \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e69de29 diff --git a/DOCS.md b/DOCS.md new file mode 100644 index 0000000..865953d --- /dev/null +++ b/DOCS.md @@ -0,0 +1,29 @@ +# Configuration + +## Options + +### gpu_layers +Nombre de couches à décharger sur le GPU (0 = CPU uniquement). + +### debug +Active les logs de débogage. + +### models_path +Chemin vers le dossier des modèles. + +## Réseau P2P + +Cet add-on crée automatiquement un réseau P2P pour distribuer la charge. + +1. Démarrez d'abord l'instance master +2. Récupérez le token dans les logs ou l'interface web +3. Configurez les workers avec ce token + +## Modèles supportés + +- GGUF (llama.cpp) +- Formats compatibles LocalAI + +## Dépannage + +Consultez les logs de l'add-on pour identifier les problèmes. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..6dc0729 --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +# LocalAI P2P Add-ons pour Home Assistant + +Collection d'add-ons pour LocalAI avec support P2P et fédération. + +## Installation + +1. Ajoutez ce repository dans Home Assistant : + - Supervisor → Add-on Store → ⋮ → Repositories + - Ajoutez : `https://git.carriere.cloud/alex/hass-addons` + +2. Installez les add-ons : + - LocalAI P2P Master (pour machine avec GPU) + - LocalAI P2P Worker (pour machines CPU) + +## Add-ons disponibles + +- **LocalAI P2P Master** : Instance principale avec GPU +- **LocalAI P2P Worker** : Instance worker CPU + +## Configuration + +Voir la documentation de chaque add-on. \ No newline at end of file diff --git a/localai-p2p-master/DOCS.md b/localai-p2p-master/DOCS.md new file mode 100644 index 0000000..865953d --- /dev/null +++ b/localai-p2p-master/DOCS.md @@ -0,0 +1,29 @@ +# Configuration + +## Options + +### gpu_layers +Nombre de couches à décharger sur le GPU (0 = CPU uniquement). + +### debug +Active les logs de débogage. + +### models_path +Chemin vers le dossier des modèles. + +## Réseau P2P + +Cet add-on crée automatiquement un réseau P2P pour distribuer la charge. + +1. Démarrez d'abord l'instance master +2. Récupérez le token dans les logs ou l'interface web +3. Configurez les workers avec ce token + +## Modèles supportés + +- GGUF (llama.cpp) +- Formats compatibles LocalAI + +## Dépannage + +Consultez les logs de l'add-on pour identifier les problèmes. \ No newline at end of file diff --git a/localai-p2p-master/Dockerfile b/localai-p2p-master/Dockerfile new file mode 100644 index 0000000..dc98929 --- /dev/null +++ b/localai-p2p-master/Dockerfile @@ -0,0 +1,12 @@ +FROM localai/localai:latest-cpu + + + +ENV LOCALAI_P2P="true" +ENV LOCALAI_FEDERATED="true" + + +COPY run.sh /run.sh +RUN chmod +x /run.sh + +CMD [ "/run.sh" ] diff --git a/localai-p2p-master/config.yaml b/localai-p2p-master/config.yaml new file mode 100644 index 0000000..1559706 --- /dev/null +++ b/localai-p2p-master/config.yaml @@ -0,0 +1,32 @@ +name: "LocalAI P2P Master" +version: "1.0.1" +slug: "localai-p2p-master" +description: "LocalAI with P2P federation support (master node with GPU)" +url: "https://git.carriere.cloud/alex/hass-addons/tree/main/localai-p2p-master" +arch: + - amd64 +host_network: true +startup: application +ports: + 8080/tcp: 8080 +ports_description: + 8080/tcp: "LocalAI API port" +image: "localai/localai:latest" +environment: + LOCALAI_P2P: "true" + LOCALAI_FEDERATED: "true" +map: + - type: share + read_only: false + - type: addon_config + read_only: false +privileged: + - SYS_ADMIN +options: + gpu_layers: 99 + debug: false + models_path: "/share/localai/models" +schema: + gpu_layers: "int(0,)" + debug: "bool?" + models_path: "str?" diff --git a/localai-p2p-master/run.sh b/localai-p2p-master/run.sh new file mode 100644 index 0000000..2962fba --- /dev/null +++ b/localai-p2p-master/run.sh @@ -0,0 +1,11 @@ +#!/usr/bin/with-contenv bashio +CONFIG_PATH=/data/options.json + +GPU_LAYERS="$(bashio::config 'gpu_layers')" +TOKEN="$(bashio::config 'master_token' || echo '')" + +if [ ! -z "$TOKEN" ]; then + export LOCALAI_P2P_TOKEN="$TOKEN" +fi + +exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS" diff --git a/localai-p2p-worker/Dockerfile b/localai-p2p-worker/Dockerfile new file mode 100644 index 0000000..dc98929 --- /dev/null +++ b/localai-p2p-worker/Dockerfile @@ -0,0 +1,12 @@ +FROM localai/localai:latest-cpu + + + +ENV LOCALAI_P2P="true" +ENV LOCALAI_FEDERATED="true" + + +COPY run.sh /run.sh +RUN chmod +x /run.sh + +CMD [ "/run.sh" ] diff --git a/localai-p2p-worker/config.yaml b/localai-p2p-worker/config.yaml new file mode 100644 index 0000000..295de8a --- /dev/null +++ b/localai-p2p-worker/config.yaml @@ -0,0 +1,29 @@ +name: "LocalAI P2P Worker" +version: "1.0.2" # Incrémentez aussi la version +slug: "localai-p2p-worker" # ⬅️ IMPORTANT : doit correspondre au nom du dossier +description: "LocalAI P2P federation worker node" +image: localai/localai:latest +arch: + - amd64 +host_network: true # OBLIGATOIRE pour P2P +startup: application +ports: + 8080/tcp: 8081 # Port différent si sur même réseau +environment: + LOCALAI_P2P: "true" + LOCALAI_FEDERATED: "true" + THREADS: 8 # Utiliser tous les cœurs du 5800U + OMP_NUM_THREADS: 8 +map: + - type: share + read_only: false + - type: addon_config + read_only: false +options: + master_token: "" + gpu_layers: 0 + debug: false +schema: + master_token: str + gpu_layers: "int(0,)" + debug: "bool?" diff --git a/localai-p2p-worker/run.sh b/localai-p2p-worker/run.sh new file mode 100644 index 0000000..2962fba --- /dev/null +++ b/localai-p2p-worker/run.sh @@ -0,0 +1,11 @@ +#!/usr/bin/with-contenv bashio +CONFIG_PATH=/data/options.json + +GPU_LAYERS="$(bashio::config 'gpu_layers')" +TOKEN="$(bashio::config 'master_token' || echo '')" + +if [ ! -z "$TOKEN" ]; then + export LOCALAI_P2P_TOKEN="$TOKEN" +fi + +exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS" diff --git a/repository.yaml b/repository.yaml new file mode 100644 index 0000000..ad47e1d --- /dev/null +++ b/repository.yaml @@ -0,0 +1,4 @@ +name: "My Add-ons" +url: "https://git.carriere.cloud/alex/hass-addons" +maintainer: "Alex" +description: "Collection d'addons LocalAI avec support P2P pour Home Assistant" diff --git a/workflows/builder.yaml b/workflows/builder.yaml new file mode 100644 index 0000000..d9bd7ae --- /dev/null +++ b/workflows/builder.yaml @@ -0,0 +1,50 @@ +name: Builder + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + init: + runs-on: ubuntu-latest + name: Initialize builds + outputs: + changed_addons: ${{ steps.changed_addons.outputs.addons }} + changed: ${{ steps.changed_addons.outputs.changed }} + steps: + - name: Check out the repository + uses: actions/checkout@v4 + + - name: Get changed add-ons + id: changed_addons + uses: home-assistant/actions/helpers/changed-addons@master + + build: + runs-on: ubuntu-latest + name: Build ${{ matrix.addon }} (${{ matrix.arch }}) + needs: init + if: needs.init.outputs.changed == 'true' + strategy: + matrix: + addon: ${{ fromJson(needs.init.outputs.changed_addons) }} + arch: ["aarch64", "amd64", "armhf", "armv7", "i386"] + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Get information + id: info + uses: home-assistant/actions/helpers/info@master + with: + path: "./${{ matrix.addon }}" + + - name: Build ${{ matrix.addon }} (${{ matrix.arch }}) + uses: home-assistant/builder@master + with: + args: | + --${{ matrix.arch }} \ + --target /data/${{ matrix.addon }} \ + --generic ${{ steps.info.outputs.version }} \ No newline at end of file