Initial commit: LocalAI P2P addons
This commit is contained in:
29
localai-p2p-master/DOCS.md
Normal file
29
localai-p2p-master/DOCS.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Configuration
|
||||
|
||||
## Options
|
||||
|
||||
### gpu_layers
|
||||
Nombre de couches à décharger sur le GPU (0 = CPU uniquement).
|
||||
|
||||
### debug
|
||||
Active les logs de débogage.
|
||||
|
||||
### models_path
|
||||
Chemin vers le dossier des modèles.
|
||||
|
||||
## Réseau P2P
|
||||
|
||||
Cet add-on crée automatiquement un réseau P2P pour distribuer la charge.
|
||||
|
||||
1. Démarrez d'abord l'instance master
|
||||
2. Récupérez le token dans les logs ou l'interface web
|
||||
3. Configurez les workers avec ce token
|
||||
|
||||
## Modèles supportés
|
||||
|
||||
- GGUF (llama.cpp)
|
||||
- Formats compatibles LocalAI
|
||||
|
||||
## Dépannage
|
||||
|
||||
Consultez les logs de l'add-on pour identifier les problèmes.
|
||||
12
localai-p2p-master/Dockerfile
Normal file
12
localai-p2p-master/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
FROM localai/localai:latest-cpu
|
||||
|
||||
|
||||
|
||||
ENV LOCALAI_P2P="true"
|
||||
ENV LOCALAI_FEDERATED="true"
|
||||
|
||||
|
||||
COPY run.sh /run.sh
|
||||
RUN chmod +x /run.sh
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
32
localai-p2p-master/config.yaml
Normal file
32
localai-p2p-master/config.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
name: "LocalAI P2P Master"
|
||||
version: "1.0.1"
|
||||
slug: "localai-p2p-master"
|
||||
description: "LocalAI with P2P federation support (master node with GPU)"
|
||||
url: "https://git.carriere.cloud/alex/hass-addons/tree/main/localai-p2p-master"
|
||||
arch:
|
||||
- amd64
|
||||
host_network: true
|
||||
startup: application
|
||||
ports:
|
||||
8080/tcp: 8080
|
||||
ports_description:
|
||||
8080/tcp: "LocalAI API port"
|
||||
image: "localai/localai:latest"
|
||||
environment:
|
||||
LOCALAI_P2P: "true"
|
||||
LOCALAI_FEDERATED: "true"
|
||||
map:
|
||||
- type: share
|
||||
read_only: false
|
||||
- type: addon_config
|
||||
read_only: false
|
||||
privileged:
|
||||
- SYS_ADMIN
|
||||
options:
|
||||
gpu_layers: 99
|
||||
debug: false
|
||||
models_path: "/share/localai/models"
|
||||
schema:
|
||||
gpu_layers: "int(0,)"
|
||||
debug: "bool?"
|
||||
models_path: "str?"
|
||||
11
localai-p2p-master/run.sh
Normal file
11
localai-p2p-master/run.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/with-contenv bashio
|
||||
CONFIG_PATH=/data/options.json
|
||||
|
||||
GPU_LAYERS="$(bashio::config 'gpu_layers')"
|
||||
TOKEN="$(bashio::config 'master_token' || echo '')"
|
||||
|
||||
if [ ! -z "$TOKEN" ]; then
|
||||
export LOCALAI_P2P_TOKEN="$TOKEN"
|
||||
fi
|
||||
|
||||
exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS"
|
||||
Reference in New Issue
Block a user