diff --git a/localai-p2p-master/config.yaml b/localai-p2p-master/config.yaml index a05766f..5758218 100644 --- a/localai-p2p-master/config.yaml +++ b/localai-p2p-master/config.yaml @@ -17,6 +17,9 @@ environment: LOCALAI_P2P: "true" LOCALAI_FEDERATED: "true" LOCALAI_MODELS_PATH: "/share/localai/models" + LOCALAI_ADDRESS: "0.0.0.0:8080" + LOCALAI_CORS: "true" + LOCALAI_CORS_ALLOW_ORIGINS: "*" map: - type: share read_only: false diff --git a/localai-p2p-master/run.sh b/localai-p2p-master/run.sh index 73e6f5d..9059a06 100644 --- a/localai-p2p-master/run.sh +++ b/localai-p2p-master/run.sh @@ -3,15 +3,16 @@ CONFIG_PATH=/data/options.json GPU_LAYERS="$(bashio::config 'gpu_layers')" TOKEN="$(bashio::config 'master_token' || echo '')" +MODELS_PATH="$(bashio::config 'models_path')" if [ ! -z "$TOKEN" ]; then export LOCALAI_P2P_TOKEN="$TOKEN" fi # Créer le répertoire des modèles s'il n'existe pas -mkdir -p /share/localai/models +mkdir -p "$MODELS_PATH" # Nettoyer les fichiers de configuration corrompus (contenant du code shell) -find /share/localai/models -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true +find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true -exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS" +exec /build/local-ai run --models-path="$MODELS_PATH" --gpu-layers="$GPU_LAYERS" --address="0.0.0.0:8080" --cors --cors-allow-origins="*" diff --git a/localai-p2p-worker/config.yaml b/localai-p2p-worker/config.yaml index 46013f9..2212e8d 100644 --- a/localai-p2p-worker/config.yaml +++ b/localai-p2p-worker/config.yaml @@ -14,6 +14,9 @@ environment: LOCALAI_P2P: "true" LOCALAI_FEDERATED: "true" LOCALAI_MODELS_PATH: "/share/localai/models" + LOCALAI_ADDRESS: "0.0.0.0:8080" + LOCALAI_CORS: "true" + LOCALAI_CORS_ALLOW_ORIGINS: "*" THREADS: "8" # Utiliser tous les cœurs du 5800U OMP_NUM_THREADS: "8" map: @@ -25,7 +28,9 @@ options: master_token: "" gpu_layers: 0 debug: false + models_path: "/share/localai/models" schema: master_token: str gpu_layers: "int(0,)" debug: "bool?" + models_path: "str?" diff --git a/localai-p2p-worker/run.sh b/localai-p2p-worker/run.sh index 73e6f5d..9059a06 100644 --- a/localai-p2p-worker/run.sh +++ b/localai-p2p-worker/run.sh @@ -3,15 +3,16 @@ CONFIG_PATH=/data/options.json GPU_LAYERS="$(bashio::config 'gpu_layers')" TOKEN="$(bashio::config 'master_token' || echo '')" +MODELS_PATH="$(bashio::config 'models_path')" if [ ! -z "$TOKEN" ]; then export LOCALAI_P2P_TOKEN="$TOKEN" fi # Créer le répertoire des modèles s'il n'existe pas -mkdir -p /share/localai/models +mkdir -p "$MODELS_PATH" # Nettoyer les fichiers de configuration corrompus (contenant du code shell) -find /share/localai/models -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true +find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true -exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS" +exec /build/local-ai run --models-path="$MODELS_PATH" --gpu-layers="$GPU_LAYERS" --address="0.0.0.0:8080" --cors --cors-allow-origins="*"