69 lines
2.2 KiB
Bash
69 lines
2.2 KiB
Bash
#!/bin/bash
|
|
|
|
echo "Starting LocalAI P2P Worker"
|
|
|
|
# Lire la configuration Home Assistant directement avec jq
|
|
if [ -f /data/options.json ]; then
|
|
echo "Reading Home Assistant configuration..."
|
|
TOKEN="$(jq -r '.master_token // ""' /data/options.json 2>/dev/null || echo '')"
|
|
GPU_LAYERS="$(jq -r '.gpu_layers // 0' /data/options.json 2>/dev/null || echo '0')"
|
|
DEBUG="$(jq -r '.debug // false' /data/options.json 2>/dev/null || echo 'false')"
|
|
MODELS_PATH="$(jq -r '.models_path // "/share/localai/models"' /data/options.json 2>/dev/null || echo '/share/localai/models')"
|
|
THREADS="$(jq -r '.threads // 8' /data/options.json 2>/dev/null || echo '8')"
|
|
|
|
echo "Configuration loaded:"
|
|
echo "- Master Token: ${TOKEN:0:20}..."
|
|
echo "- GPU Layers: $GPU_LAYERS"
|
|
echo "- Debug: $DEBUG"
|
|
echo "- Models Path: $MODELS_PATH"
|
|
echo "- Threads: $THREADS"
|
|
else
|
|
echo "No Home Assistant config found, using defaults"
|
|
TOKEN=""
|
|
GPU_LAYERS="0"
|
|
DEBUG="false"
|
|
MODELS_PATH="/share/localai/models"
|
|
THREADS="8"
|
|
fi
|
|
|
|
# Configurer le token P2P si fourni
|
|
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ] && [ "$TOKEN" != "" ]; then
|
|
export LOCALAI_P2P_TOKEN="$TOKEN"
|
|
export P2P_TOKEN="$TOKEN"
|
|
echo "P2P Token configured"
|
|
else
|
|
echo "No P2P token provided - will generate one"
|
|
fi
|
|
|
|
# Créer le répertoire des modèles
|
|
mkdir -p "$MODELS_PATH"
|
|
|
|
# Nettoyer les fichiers corrompus
|
|
find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio\|mapping values are not allowed in this context" {} \; 2>/dev/null | xargs rm -f 2>/dev/null || true
|
|
|
|
# Configurer les variables d'environnement
|
|
export THREADS="$THREADS"
|
|
export OMP_NUM_THREADS="$THREADS"
|
|
|
|
# Configurer le debug si activé
|
|
if [ "$DEBUG" = "true" ]; then
|
|
export LOCALAI_DEBUG="true"
|
|
export LOCALAI_LOG_LEVEL="debug"
|
|
echo "Debug mode enabled"
|
|
fi
|
|
|
|
# Configurer GPU layers si spécifié
|
|
if [ "$GPU_LAYERS" -gt 0 ]; then
|
|
export LOCALAI_GPU_LAYERS="$GPU_LAYERS"
|
|
echo "GPU layers configured: $GPU_LAYERS"
|
|
fi
|
|
|
|
# Lancer LocalAI
|
|
echo "Starting LocalAI with models path: $MODELS_PATH"
|
|
exec /build/local-ai run \
|
|
--models-path="$MODELS_PATH" \
|
|
--threads="$THREADS" \
|
|
--address="0.0.0.0:8080" \
|
|
--cors \
|
|
--cors-allow-origins="*"
|