minor fixes
This commit is contained in:
@@ -17,6 +17,9 @@ environment:
|
|||||||
LOCALAI_P2P: "true"
|
LOCALAI_P2P: "true"
|
||||||
LOCALAI_FEDERATED: "true"
|
LOCALAI_FEDERATED: "true"
|
||||||
LOCALAI_MODELS_PATH: "/share/localai/models"
|
LOCALAI_MODELS_PATH: "/share/localai/models"
|
||||||
|
LOCALAI_ADDRESS: "0.0.0.0:8080"
|
||||||
|
LOCALAI_CORS: "true"
|
||||||
|
LOCALAI_CORS_ALLOW_ORIGINS: "*"
|
||||||
map:
|
map:
|
||||||
- type: share
|
- type: share
|
||||||
read_only: false
|
read_only: false
|
||||||
|
|||||||
@@ -3,15 +3,16 @@ CONFIG_PATH=/data/options.json
|
|||||||
|
|
||||||
GPU_LAYERS="$(bashio::config 'gpu_layers')"
|
GPU_LAYERS="$(bashio::config 'gpu_layers')"
|
||||||
TOKEN="$(bashio::config 'master_token' || echo '')"
|
TOKEN="$(bashio::config 'master_token' || echo '')"
|
||||||
|
MODELS_PATH="$(bashio::config 'models_path')"
|
||||||
|
|
||||||
if [ ! -z "$TOKEN" ]; then
|
if [ ! -z "$TOKEN" ]; then
|
||||||
export LOCALAI_P2P_TOKEN="$TOKEN"
|
export LOCALAI_P2P_TOKEN="$TOKEN"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Créer le répertoire des modèles s'il n'existe pas
|
# Créer le répertoire des modèles s'il n'existe pas
|
||||||
mkdir -p /share/localai/models
|
mkdir -p "$MODELS_PATH"
|
||||||
|
|
||||||
# Nettoyer les fichiers de configuration corrompus (contenant du code shell)
|
# Nettoyer les fichiers de configuration corrompus (contenant du code shell)
|
||||||
find /share/localai/models -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true
|
find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true
|
||||||
|
|
||||||
exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS"
|
exec /build/local-ai run --models-path="$MODELS_PATH" --gpu-layers="$GPU_LAYERS" --address="0.0.0.0:8080" --cors --cors-allow-origins="*"
|
||||||
|
|||||||
@@ -14,6 +14,9 @@ environment:
|
|||||||
LOCALAI_P2P: "true"
|
LOCALAI_P2P: "true"
|
||||||
LOCALAI_FEDERATED: "true"
|
LOCALAI_FEDERATED: "true"
|
||||||
LOCALAI_MODELS_PATH: "/share/localai/models"
|
LOCALAI_MODELS_PATH: "/share/localai/models"
|
||||||
|
LOCALAI_ADDRESS: "0.0.0.0:8080"
|
||||||
|
LOCALAI_CORS: "true"
|
||||||
|
LOCALAI_CORS_ALLOW_ORIGINS: "*"
|
||||||
THREADS: "8" # Utiliser tous les cœurs du 5800U
|
THREADS: "8" # Utiliser tous les cœurs du 5800U
|
||||||
OMP_NUM_THREADS: "8"
|
OMP_NUM_THREADS: "8"
|
||||||
map:
|
map:
|
||||||
@@ -25,7 +28,9 @@ options:
|
|||||||
master_token: ""
|
master_token: ""
|
||||||
gpu_layers: 0
|
gpu_layers: 0
|
||||||
debug: false
|
debug: false
|
||||||
|
models_path: "/share/localai/models"
|
||||||
schema:
|
schema:
|
||||||
master_token: str
|
master_token: str
|
||||||
gpu_layers: "int(0,)"
|
gpu_layers: "int(0,)"
|
||||||
debug: "bool?"
|
debug: "bool?"
|
||||||
|
models_path: "str?"
|
||||||
|
|||||||
@@ -3,15 +3,16 @@ CONFIG_PATH=/data/options.json
|
|||||||
|
|
||||||
GPU_LAYERS="$(bashio::config 'gpu_layers')"
|
GPU_LAYERS="$(bashio::config 'gpu_layers')"
|
||||||
TOKEN="$(bashio::config 'master_token' || echo '')"
|
TOKEN="$(bashio::config 'master_token' || echo '')"
|
||||||
|
MODELS_PATH="$(bashio::config 'models_path')"
|
||||||
|
|
||||||
if [ ! -z "$TOKEN" ]; then
|
if [ ! -z "$TOKEN" ]; then
|
||||||
export LOCALAI_P2P_TOKEN="$TOKEN"
|
export LOCALAI_P2P_TOKEN="$TOKEN"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Créer le répertoire des modèles s'il n'existe pas
|
# Créer le répertoire des modèles s'il n'existe pas
|
||||||
mkdir -p /share/localai/models
|
mkdir -p "$MODELS_PATH"
|
||||||
|
|
||||||
# Nettoyer les fichiers de configuration corrompus (contenant du code shell)
|
# Nettoyer les fichiers de configuration corrompus (contenant du code shell)
|
||||||
find /share/localai/models -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true
|
find "$MODELS_PATH" -name "*.yaml" -exec grep -l "#!/usr/bin/with-contenv bashio" {} \; | xargs rm -f 2>/dev/null || true
|
||||||
|
|
||||||
exec /build/local-ai run --models-path=/share/localai/models --gpu-layers="$GPU_LAYERS"
|
exec /build/local-ai run --models-path="$MODELS_PATH" --gpu-layers="$GPU_LAYERS" --address="0.0.0.0:8080" --cors --cors-allow-origins="*"
|
||||||
|
|||||||
Reference in New Issue
Block a user