Initial commit: LocalAI P2P addons
Some checks failed
Builder / Build addons (localai-p2p-master, amd64) (push) Failing after 1m49s
Builder / Build addons (localai-p2p-worker, amd64) (push) Failing after 9s
Validate / validate (push) Failing after 2s
Builder / Create release (push) Has been skipped

This commit is contained in:
2025-05-30 11:20:00 +02:00
commit 367b814058
14 changed files with 321 additions and 0 deletions

View File

@@ -0,0 +1,67 @@
name: Builder
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
jobs:
build:
runs-on: ubuntu-latest
name: Build addons
strategy:
matrix:
addon: ["localai-p2p-master", "localai-p2p-worker"]
arch: ["amd64"] # Ajoutez d'autres architectures si nécessaire
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Gitea Container Registry
uses: docker/login-action@v3
with:
registry: gitea.votre-domaine.com # Remplacez par votre instance Gitea
username: ${{ gitea.actor }}
password: ${{ secrets.GITEA_TOKEN }}
- name: Extract version from config
id: version
run: |
VERSION=$(grep '^version:' ${{ matrix.addon }}/config.yaml | sed 's/version: //' | tr -d '"')
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Build and push addon
uses: docker/build-push-action@v5
with:
context: ./${{ matrix.addon }}
platforms: linux/${{ matrix.arch }}
push: true
tags: |
gitea.votre-domaine.com/votre-user/ha-${{ matrix.addon }}:latest
gitea.votre-domaine.com/votre-user/ha-${{ matrix.addon }}:${{ steps.version.outputs.version }}
cache-from: type=gha
cache-to: type=gha,mode=max
release:
runs-on: ubuntu-latest
name: Create release
needs: build
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Create Release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITEA_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false

View File

@@ -0,0 +1,13 @@
# .gitea/workflows/validate.yaml
name: Validate
on: [push, pull_request]
jobs:
validate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Validate configs
run: |
for config in */config.yaml; do
python3 -c "import yaml; yaml.safe_load(open('$config'))"
done

0
CHANGELOG.md Normal file
View File

29
DOCS.md Normal file
View File

@@ -0,0 +1,29 @@
# Configuration
## Options
### gpu_layers
Nombre de couches à décharger sur le GPU (0 = CPU uniquement).
### debug
Active les logs de débogage.
### models_path
Chemin vers le dossier des modèles.
## Réseau P2P
Cet add-on crée automatiquement un réseau P2P pour distribuer la charge.
1. Démarrez d'abord l'instance master
2. Récupérez le token dans les logs ou l'interface web
3. Configurez les workers avec ce token
## Modèles supportés
- GGUF (llama.cpp)
- Formats compatibles LocalAI
## Dépannage
Consultez les logs de l'add-on pour identifier les problèmes.

22
README.md Normal file
View File

@@ -0,0 +1,22 @@
# LocalAI P2P Add-ons pour Home Assistant
Collection d'add-ons pour LocalAI avec support P2P et fédération.
## Installation
1. Ajoutez ce repository dans Home Assistant :
- Supervisor → Add-on Store → ⋮ → Repositories
- Ajoutez : `https://git.carriere.cloud/alex/hass-addons`
2. Installez les add-ons :
- LocalAI P2P Master (pour machine avec GPU)
- LocalAI P2P Worker (pour machines CPU)
## Add-ons disponibles
- **LocalAI P2P Master** : Instance principale avec GPU
- **LocalAI P2P Worker** : Instance worker CPU
## Configuration
Voir la documentation de chaque add-on.

View File

@@ -0,0 +1,29 @@
# Configuration
## Options
### gpu_layers
Nombre de couches à décharger sur le GPU (0 = CPU uniquement).
### debug
Active les logs de débogage.
### models_path
Chemin vers le dossier des modèles.
## Réseau P2P
Cet add-on crée automatiquement un réseau P2P pour distribuer la charge.
1. Démarrez d'abord l'instance master
2. Récupérez le token dans les logs ou l'interface web
3. Configurez les workers avec ce token
## Modèles supportés
- GGUF (llama.cpp)
- Formats compatibles LocalAI
## Dépannage
Consultez les logs de l'add-on pour identifier les problèmes.

View File

@@ -0,0 +1,12 @@
FROM localai/localai:latest-cpu
ENV LOCALAI_P2P="true"
ENV LOCALAI_FEDERATED="true"
COPY run.sh /run.sh
RUN chmod +x /run.sh
CMD [ "/run.sh" ]

View File

@@ -0,0 +1,32 @@
name: "LocalAI P2P Master"
version: "1.0.1"
slug: "localai-p2p-master"
description: "LocalAI with P2P federation support (master node with GPU)"
url: "https://git.carriere.cloud/alex/hass-addons/tree/main/localai-p2p-master"
arch:
- amd64
host_network: true
startup: application
ports:
8080/tcp: 8080
ports_description:
8080/tcp: "LocalAI API port"
image: "localai/localai:latest"
environment:
LOCALAI_P2P: "true"
LOCALAI_FEDERATED: "true"
map:
- type: share
read_only: false
- type: addon_config
read_only: false
privileged:
- SYS_ADMIN
options:
gpu_layers: 99
debug: false
models_path: "/share/localai/models"
schema:
gpu_layers: "int(0,)"
debug: "bool?"
models_path: "str?"

11
localai-p2p-master/run.sh Normal file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/with-contenv bashio
CONFIG_PATH=/data/options.json
GPU_LAYERS="$(bashio::config 'gpu_layers')"
TOKEN="$(bashio::config 'master_token' || echo '')"
if [ ! -z "$TOKEN" ]; then
export LOCALAI_P2P_TOKEN="$TOKEN"
fi
exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS"

View File

@@ -0,0 +1,12 @@
FROM localai/localai:latest-cpu
ENV LOCALAI_P2P="true"
ENV LOCALAI_FEDERATED="true"
COPY run.sh /run.sh
RUN chmod +x /run.sh
CMD [ "/run.sh" ]

View File

@@ -0,0 +1,29 @@
name: "LocalAI P2P Worker"
version: "1.0.2" # Incrémentez aussi la version
slug: "localai-p2p-worker" # ⬅️ IMPORTANT : doit correspondre au nom du dossier
description: "LocalAI P2P federation worker node"
image: localai/localai:latest
arch:
- amd64
host_network: true # OBLIGATOIRE pour P2P
startup: application
ports:
8080/tcp: 8081 # Port différent si sur même réseau
environment:
LOCALAI_P2P: "true"
LOCALAI_FEDERATED: "true"
THREADS: 8 # Utiliser tous les cœurs du 5800U
OMP_NUM_THREADS: 8
map:
- type: share
read_only: false
- type: addon_config
read_only: false
options:
master_token: ""
gpu_layers: 0
debug: false
schema:
master_token: str
gpu_layers: "int(0,)"
debug: "bool?"

11
localai-p2p-worker/run.sh Normal file
View File

@@ -0,0 +1,11 @@
#!/usr/bin/with-contenv bashio
CONFIG_PATH=/data/options.json
GPU_LAYERS="$(bashio::config 'gpu_layers')"
TOKEN="$(bashio::config 'master_token' || echo '')"
if [ ! -z "$TOKEN" ]; then
export LOCALAI_P2P_TOKEN="$TOKEN"
fi
exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS"

4
repository.yaml Normal file
View File

@@ -0,0 +1,4 @@
name: "My Add-ons"
url: "https://git.carriere.cloud/alex/hass-addons"
maintainer: "Alex"
description: "Collection d'addons LocalAI avec support P2P pour Home Assistant"

50
workflows/builder.yaml Normal file
View File

@@ -0,0 +1,50 @@
name: Builder
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
init:
runs-on: ubuntu-latest
name: Initialize builds
outputs:
changed_addons: ${{ steps.changed_addons.outputs.addons }}
changed: ${{ steps.changed_addons.outputs.changed }}
steps:
- name: Check out the repository
uses: actions/checkout@v4
- name: Get changed add-ons
id: changed_addons
uses: home-assistant/actions/helpers/changed-addons@master
build:
runs-on: ubuntu-latest
name: Build ${{ matrix.addon }} (${{ matrix.arch }})
needs: init
if: needs.init.outputs.changed == 'true'
strategy:
matrix:
addon: ${{ fromJson(needs.init.outputs.changed_addons) }}
arch: ["aarch64", "amd64", "armhf", "armv7", "i386"]
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Get information
id: info
uses: home-assistant/actions/helpers/info@master
with:
path: "./${{ matrix.addon }}"
- name: Build ${{ matrix.addon }} (${{ matrix.arch }})
uses: home-assistant/builder@master
with:
args: |
--${{ matrix.arch }} \
--target /data/${{ matrix.addon }} \
--generic ${{ steps.info.outputs.version }}