70 lines
2.1 KiB
Bash
70 lines
2.1 KiB
Bash
#!/bin/bash
|
|
|
|
echo "Starting LocalAI P2P Worker"
|
|
|
|
# Sourcer bashio pour utiliser les fonctions de configuration Home Assistant
|
|
source /usr/lib/bashio/bashio
|
|
|
|
# Lire la configuration Home Assistant
|
|
if [ -f /data/options.json ]; then
|
|
TOKEN="$(bashio::config 'master_token' 2>/dev/null || echo '')"
|
|
GPU_LAYERS="$(bashio::config 'gpu_layers' 2>/dev/null || echo '0')"
|
|
DEBUG="$(bashio::config 'debug' 2>/dev/null || echo 'false')"
|
|
MODELS_PATH="$(bashio::config 'models_path' 2>/dev/null || echo '/share/localai/models')"
|
|
THREADS="$(bashio::config 'threads' 2>/dev/null || echo '8')"
|
|
|
|
echo "Master Token: $TOKEN"
|
|
echo "GPU Layers: $GPU_LAYERS"
|
|
echo "Debug: $DEBUG"
|
|
echo "Models Path: $MODELS_PATH"
|
|
echo "Threads: $THREADS"
|
|
else
|
|
echo "No Home Assistant config found, using defaults"
|
|
TOKEN=""
|
|
GPU_LAYERS="0"
|
|
DEBUG="false"
|
|
MODELS_PATH="/share/localai/models"
|
|
THREADS="8"
|
|
fi
|
|
|
|
# Configurer le token P2P si fourni
|
|
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ] && [ "$TOKEN" != "" ]; then
|
|
export LOCALAI_P2P_TOKEN="$TOKEN"
|
|
export P2P_TOKEN="$TOKEN"
|
|
echo "P2P Token configured"
|
|
else
|
|
echo "No P2P token provided - will generate one"
|
|
fi
|
|
|
|
# Créer le répertoire des modèles
|
|
mkdir -p "$MODELS_PATH"
|
|
|
|
# Nettoyer les fichiers corrompus
|
|
find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio\|mapping values are not allowed in this context" {} \; 2>/dev/null | xargs rm -f 2>/dev/null || true
|
|
|
|
# Configurer les variables d'environnement
|
|
export THREADS="$THREADS"
|
|
export OMP_NUM_THREADS="$THREADS"
|
|
|
|
# Configurer le debug si activé
|
|
if [ "$DEBUG" = "true" ]; then
|
|
export LOCALAI_DEBUG="true"
|
|
export LOCALAI_LOG_LEVEL="debug"
|
|
echo "Debug mode enabled"
|
|
fi
|
|
|
|
# Configurer GPU layers si spécifié
|
|
if [ "$GPU_LAYERS" -gt 0 ]; then
|
|
export LOCALAI_GPU_LAYERS="$GPU_LAYERS"
|
|
echo "GPU layers configured: $GPU_LAYERS"
|
|
fi
|
|
|
|
# Lancer LocalAI
|
|
echo "Starting LocalAI with models path: $MODELS_PATH"
|
|
exec /build/local-ai run \
|
|
--models-path="$MODELS_PATH" \
|
|
--threads="$THREADS" \
|
|
--address="0.0.0.0:8080" \
|
|
--cors \
|
|
--cors-allow-origins="*"
|