40 lines
1.0 KiB
YAML
40 lines
1.0 KiB
YAML
name: "LocalAI P2P Worker"
|
|
version: "1.0.3.5" # Incrémentez aussi la version
|
|
slug: "localai-p2p-worker" # ⬅️ IMPORTANT : doit correspondre au nom du dossier
|
|
description: "LocalAI P2P federation worker node"
|
|
arch:
|
|
- amd64
|
|
host_network: true # OBLIGATOIRE pour P2P
|
|
startup: application
|
|
ingress: true
|
|
ingress_port: 8080
|
|
ports:
|
|
8080/tcp: 8080 # Port différent si sur même réseau
|
|
9090/tcp: 9090 # Port P2P TCP
|
|
9090/udp: 9090 # Port P2P UDP
|
|
environment:
|
|
LOCALAI_P2P: "true"
|
|
LOCALAI_FEDERATED: "true"
|
|
LOCALAI_MODELS_PATH: "/share/localai/models"
|
|
LOCALAI_ADDRESS: "0.0.0.0:8080"
|
|
LOCALAI_CORS: "true"
|
|
LOCALAI_CORS_ALLOW_ORIGINS: "*"
|
|
LOCALAI_P2P_LISTEN_PORT: "9090"
|
|
THREADS: "8" # Utiliser tous les cœurs du 5800U
|
|
OMP_NUM_THREADS: "8"
|
|
map:
|
|
- type: share
|
|
read_only: false
|
|
- type: addon_config
|
|
read_only: false
|
|
options:
|
|
master_token: ""
|
|
gpu_layers: 0
|
|
debug: false
|
|
models_path: "/share/localai/models"
|
|
schema:
|
|
master_token: str
|
|
gpu_layers: "int(0,)"
|
|
debug: "bool?"
|
|
models_path: "str?"
|