Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6bd53b01a | ||
|
|
986b1949cd | ||
|
|
1e81e4db53 | ||
|
|
23aecd372e | ||
|
|
db53f27a1a | ||
|
|
c83e9a859b | ||
|
|
02fd70726b | ||
|
|
9d50395dc5 | ||
|
|
9d125a87d9 | ||
|
|
61e930bf8a | ||
|
|
4db60b6a6f | ||
|
|
69e9c7de55 | ||
|
|
e96fa163cd | ||
|
|
cfef80e1e5 | ||
|
|
9b74a4354b | ||
|
|
fca193b5b2 |
@@ -1 +0,0 @@
|
||||
NODE_ENV=development\nOPENAI_API_KEY=your_openai_api_key_here\nHASS_HOST=http://homeassistant.local:8123\nHASS_TOKEN=your_hass_token_here\nPORT=3000\nHASS_SOCKET_URL=ws://homeassistant.local:8123/api/websocket\nLOG_LEVEL=debug\nMCP_SERVER=http://localhost:3000\nOPENAI_MODEL=deepseek-v3\nMAX_RETRIES=3\nANALYSIS_TIMEOUT=30000\n\n# Home Assistant specific settings\nAUTOMATION_PATH=./config/automations.yaml\nBLUEPRINT_REPO=https://blueprints.home-assistant.io/\nENERGY_DASHBOARD=true\n\n# Available models: gpt-4o, gpt-4-turbo, gpt-4, gpt-4-o1, gpt-4-o3, gpt-3.5-turbo, gpt-3.5-turbo-16k, deepseek-v3, deepseek-r1\n\n# For DeepSeek models\nDEEPSEEK_API_KEY=your_deepseek_api_key_here\nDEEPSEEK_BASE_URL=https://api.deepseek.com/v1\n\n# Model specifications:\n# - gpt-4-o1: 128k context, general purpose\n# - gpt-4-o3: 1M context, large-scale analysis\n\n# Add processor type specification\nPROCESSOR_TYPE=claude # Change to openai when using OpenAI
|
||||
127
.env.example
127
.env.example
@@ -1,43 +1,16 @@
|
||||
# Server Configuration
|
||||
NODE_ENV=development
|
||||
PORT=3000
|
||||
DEBUG=false
|
||||
LOG_LEVEL=info
|
||||
MCP_SERVER=http://localhost:3000
|
||||
|
||||
# Home Assistant Configuration
|
||||
# The URL of your Home Assistant instance
|
||||
HASS_HOST=http://homeassistant.local:8123
|
||||
|
||||
# Long-lived access token from Home Assistant
|
||||
# Generate from Profile -> Long-Lived Access Tokens
|
||||
HASS_TOKEN=your_home_assistant_token
|
||||
|
||||
# WebSocket URL for real-time updates
|
||||
HASS_TOKEN=your_long_lived_token
|
||||
HASS_SOCKET_URL=ws://homeassistant.local:8123/api/websocket
|
||||
|
||||
# Server Configuration
|
||||
# Port for the MCP server (default: 3000)
|
||||
PORT=3000
|
||||
|
||||
# Environment (development/production/test)
|
||||
NODE_ENV=development
|
||||
|
||||
# Debug mode (true/false)
|
||||
DEBUG=false
|
||||
|
||||
# Logging level (debug/info/warn/error)
|
||||
LOG_LEVEL=info
|
||||
|
||||
# AI Configuration
|
||||
# Natural Language Processor type (claude/gpt4/custom)
|
||||
PROCESSOR_TYPE=claude
|
||||
|
||||
# OpenAI API Key (required for GPT-4 analysis)
|
||||
OPENAI_API_KEY=your_openai_api_key
|
||||
|
||||
# Rate Limiting
|
||||
# Requests per minute per IP for regular endpoints
|
||||
RATE_LIMIT_REGULAR=100
|
||||
|
||||
# Requests per minute per IP for WebSocket connections
|
||||
RATE_LIMIT_WEBSOCKET=1000
|
||||
|
||||
# Security Configuration
|
||||
# JWT Configuration
|
||||
JWT_SECRET=your_jwt_secret_key_min_32_chars
|
||||
JWT_EXPIRY=86400000
|
||||
JWT_MAX_AGE=2592000000
|
||||
@@ -46,11 +19,8 @@ JWT_ALGORITHM=HS256
|
||||
# Rate Limiting
|
||||
RATE_LIMIT_WINDOW=900000
|
||||
RATE_LIMIT_MAX_REQUESTS=100
|
||||
|
||||
# Token Security
|
||||
TOKEN_MIN_LENGTH=32
|
||||
MAX_FAILED_ATTEMPTS=5
|
||||
LOCKOUT_DURATION=900000
|
||||
RATE_LIMIT_REGULAR=100
|
||||
RATE_LIMIT_WEBSOCKET=1000
|
||||
|
||||
# CORS Configuration
|
||||
CORS_ORIGINS=http://localhost:3000,http://localhost:8123
|
||||
@@ -60,17 +30,6 @@ CORS_EXPOSED_HEADERS=
|
||||
CORS_CREDENTIALS=true
|
||||
CORS_MAX_AGE=86400
|
||||
|
||||
# Content Security Policy
|
||||
CSP_ENABLED=true
|
||||
CSP_REPORT_ONLY=false
|
||||
CSP_REPORT_URI=
|
||||
|
||||
# SSL/TLS Configuration
|
||||
REQUIRE_HTTPS=true
|
||||
HSTS_MAX_AGE=31536000
|
||||
HSTS_INCLUDE_SUBDOMAINS=true
|
||||
HSTS_PRELOAD=true
|
||||
|
||||
# Cookie Security
|
||||
COOKIE_SECRET=your_cookie_secret_key_min_32_chars
|
||||
COOKIE_SECURE=true
|
||||
@@ -81,31 +40,57 @@ COOKIE_SAME_SITE=Strict
|
||||
MAX_REQUEST_SIZE=1048576
|
||||
MAX_REQUEST_FIELDS=1000
|
||||
|
||||
# AI Configuration
|
||||
PROCESSOR_TYPE=openai
|
||||
OPENAI_API_KEY=your_openai_api_key
|
||||
OPENAI_MODEL=gpt-3.5-turbo
|
||||
MAX_RETRIES=3
|
||||
ANALYSIS_TIMEOUT=30000
|
||||
|
||||
# Speech Features Configuration
|
||||
ENABLE_SPEECH_FEATURES=true
|
||||
ENABLE_WAKE_WORD=true
|
||||
ENABLE_SPEECH_TO_TEXT=true
|
||||
WHISPER_MODEL_PATH=/models
|
||||
WHISPER_MODEL_TYPE=base
|
||||
|
||||
# Audio Configuration
|
||||
NOISE_THRESHOLD=0.05
|
||||
MIN_SPEECH_DURATION=1.0
|
||||
SILENCE_DURATION=0.5
|
||||
SAMPLE_RATE=16000
|
||||
CHANNELS=1
|
||||
CHUNK_SIZE=1024
|
||||
PULSE_SERVER=unix:/run/user/1000/pulse/native
|
||||
|
||||
# Whisper Configuration
|
||||
ASR_MODEL=base
|
||||
ASR_ENGINE=faster_whisper
|
||||
WHISPER_BEAM_SIZE=5
|
||||
COMPUTE_TYPE=float32
|
||||
LANGUAGE=en
|
||||
|
||||
# SSE Configuration
|
||||
SSE_MAX_CLIENTS=1000
|
||||
SSE_PING_INTERVAL=30000
|
||||
SSE_MAX_CLIENTS=50
|
||||
SSE_RECONNECT_TIMEOUT=5000
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=info
|
||||
LOG_DIR=logs
|
||||
LOG_MAX_SIZE=20m
|
||||
LOG_MAX_DAYS=14d
|
||||
LOG_COMPRESS=true
|
||||
LOG_REQUESTS=true
|
||||
# Development Flags
|
||||
HOT_RELOAD=true
|
||||
|
||||
# Version
|
||||
VERSION=0.1.0
|
||||
|
||||
# Test Configuration
|
||||
# Only needed if running tests
|
||||
# Test Configuration (only needed for running tests)
|
||||
TEST_HASS_HOST=http://localhost:8123
|
||||
TEST_HASS_TOKEN=test_token
|
||||
TEST_HASS_SOCKET_URL=ws://localhost:8123/api/websocket
|
||||
TEST_PORT=3001
|
||||
|
||||
# Speech Features Configuration
|
||||
ENABLE_SPEECH_FEATURES=false
|
||||
ENABLE_WAKE_WORD=true
|
||||
ENABLE_SPEECH_TO_TEXT=true
|
||||
WHISPER_MODEL_PATH=/models
|
||||
WHISPER_MODEL_TYPE=base
|
||||
# Version
|
||||
VERSION=0.1.0
|
||||
|
||||
# Docker Configuration
|
||||
COMPOSE_PROJECT_NAME=mcp
|
||||
|
||||
# Resource Limits
|
||||
FAST_WHISPER_CPU_LIMIT=4.0
|
||||
FAST_WHISPER_MEMORY_LIMIT=2G
|
||||
MCP_CPU_LIMIT=1.0
|
||||
MCP_MEMORY_LIMIT=512M
|
||||
62
.github/workflows/deploy-docs.yml
vendored
62
.github/workflows/deploy-docs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: Deploy Documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -6,29 +7,70 @@ on:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'mkdocs.yml'
|
||||
# Allow manual trigger
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v5
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r docs/requirements.txt
|
||||
- name: Configure Git
|
||||
|
||||
- name: List mkdocs configuration
|
||||
run: |
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
- name: Build and Deploy
|
||||
echo "Current directory contents:"
|
||||
ls -la
|
||||
echo "MkDocs version:"
|
||||
mkdocs --version
|
||||
echo "MkDocs configuration:"
|
||||
cat mkdocs.yml
|
||||
|
||||
- name: Build documentation
|
||||
run: |
|
||||
mkdocs build --strict
|
||||
mkdocs gh-deploy --force --clean
|
||||
echo "Build output contents:"
|
||||
ls -la site/advanced-homeassistant-mcp
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./site/advanced-homeassistant-mcp
|
||||
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -71,7 +71,7 @@ coverage/
|
||||
# Environment files
|
||||
.env
|
||||
.env.*
|
||||
!.env.*.template
|
||||
!.env.example
|
||||
|
||||
.cursor/
|
||||
.cursor/*
|
||||
|
||||
85
Dockerfile
85
Dockerfile
@@ -11,10 +11,33 @@ RUN npm install -g bun@1.0.25
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
pulseaudio \
|
||||
alsa-utils \
|
||||
python3-full \
|
||||
python3-pip \
|
||||
python3-dev \
|
||||
python3-venv \
|
||||
portaudio19-dev \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/cache/apt/*
|
||||
|
||||
# Create and activate virtual environment
|
||||
RUN python3 -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
ENV VIRTUAL_ENV="/opt/venv"
|
||||
|
||||
# Upgrade pip in virtual environment
|
||||
RUN /opt/venv/bin/python -m pip install --upgrade pip
|
||||
|
||||
# Install Python packages in virtual environment
|
||||
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
|
||||
numpy \
|
||||
sounddevice \
|
||||
openwakeword \
|
||||
faster-whisper \
|
||||
requests
|
||||
|
||||
# Set build-time environment variables
|
||||
ENV NODE_ENV=production \
|
||||
NODE_OPTIONS="--max-old-space-size=2048" \
|
||||
@@ -38,23 +61,69 @@ FROM node:20-slim as runner
|
||||
# Install bun in production image
|
||||
RUN npm install -g bun@1.0.25
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pulseaudio \
|
||||
alsa-utils \
|
||||
libasound2 \
|
||||
libasound2-plugins \
|
||||
python3-full \
|
||||
python3-pip \
|
||||
python3-dev \
|
||||
python3-venv \
|
||||
portaudio19-dev \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/cache/apt/*
|
||||
|
||||
# Configure ALSA
|
||||
COPY docker/speech/asound.conf /etc/asound.conf
|
||||
|
||||
# Create and activate virtual environment
|
||||
RUN python3 -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
ENV VIRTUAL_ENV="/opt/venv"
|
||||
|
||||
# Upgrade pip in virtual environment
|
||||
RUN /opt/venv/bin/python -m pip install --upgrade pip
|
||||
|
||||
# Install Python packages in virtual environment
|
||||
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
|
||||
numpy \
|
||||
sounddevice \
|
||||
openwakeword \
|
||||
faster-whisper \
|
||||
requests
|
||||
|
||||
# Set Python path to use virtual environment
|
||||
ENV PYTHONPATH="/opt/venv/lib/python3.11/site-packages:$PYTHONPATH"
|
||||
|
||||
# Set production environment variables
|
||||
ENV NODE_ENV=production \
|
||||
NODE_OPTIONS="--max-old-space-size=1024"
|
||||
|
||||
# Create a non-root user
|
||||
# Create a non-root user and add to audio group
|
||||
RUN addgroup --system --gid 1001 nodejs && \
|
||||
adduser --system --uid 1001 bunjs
|
||||
adduser --system --uid 1001 --gid 1001 bunjs && \
|
||||
adduser bunjs audio
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy Python virtual environment from builder
|
||||
COPY --from=builder --chown=bunjs:nodejs /opt/venv /opt/venv
|
||||
|
||||
# Copy source files
|
||||
COPY --chown=bunjs:nodejs . .
|
||||
|
||||
# Copy only the necessary files from builder
|
||||
COPY --from=builder --chown=bunjs:nodejs /app/dist ./dist
|
||||
COPY --from=builder --chown=bunjs:nodejs /app/node_modules ./node_modules
|
||||
COPY --chown=bunjs:nodejs package.json ./
|
||||
|
||||
# Create logs directory with proper permissions
|
||||
RUN mkdir -p /app/logs && chown -R bunjs:nodejs /app/logs
|
||||
# Ensure audio setup script is executable
|
||||
RUN chmod +x /app/docker/speech/setup-audio.sh
|
||||
|
||||
# Create logs and audio directories with proper permissions
|
||||
RUN mkdir -p /app/logs /app/audio && chown -R bunjs:nodejs /app/logs /app/audio
|
||||
|
||||
# Switch to non-root user
|
||||
USER bunjs
|
||||
@@ -64,7 +133,7 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:4000/health || exit 1
|
||||
|
||||
# Expose port
|
||||
EXPOSE 4000
|
||||
EXPOSE ${PORT:-4000}
|
||||
|
||||
# Start the application with optimized flags
|
||||
CMD ["bun", "--smol", "run", "start"]
|
||||
# Start the application with audio setup
|
||||
CMD ["/bin/bash", "-c", "/app/docker/speech/setup-audio.sh & bun --smol run start"]
|
||||
388
README.md
388
README.md
@@ -1,10 +1,42 @@
|
||||
# MCP Server for Home Assistant 🏠🤖
|
||||
|
||||
[](LICENSE) [](https://bun.sh) [](https://www.typescriptlang.org) [](https://smithery.ai/server/@jango-blockchained/advanced-homeassistant-mcp)
|
||||
[](LICENSE) [](https://bun.sh) [](https://www.typescriptlang.org)
|
||||
|
||||
## Overview 🌐
|
||||
|
||||
MCP (Model Context Protocol) Server is a lightweight integration tool for Home Assistant, providing a flexible interface for device management and automation.
|
||||
MCP (Model Context Protocol) Server is my lightweight integration tool for Home Assistant, providing a flexible interface for device management and automation. It's designed to be fast, secure, and easy to use. Built with Bun for maximum performance.
|
||||
|
||||
## Why Bun? 🚀
|
||||
|
||||
I chose Bun as the runtime for several key benefits:
|
||||
|
||||
- ⚡ **Blazing Fast Performance**
|
||||
- Up to 4x faster than Node.js
|
||||
- Built-in TypeScript support
|
||||
- Optimized file system operations
|
||||
|
||||
- 🎯 **All-in-One Solution**
|
||||
- Package manager (faster than npm/yarn)
|
||||
- Bundler (no webpack needed)
|
||||
- Test runner (built-in testing)
|
||||
- TypeScript transpiler
|
||||
|
||||
- 🔋 **Built-in Features**
|
||||
- SQLite3 driver
|
||||
- .env file loading
|
||||
- WebSocket client/server
|
||||
- File watcher
|
||||
- Test runner
|
||||
|
||||
- 💾 **Resource Efficient**
|
||||
- Lower memory usage
|
||||
- Faster cold starts
|
||||
- Better CPU utilization
|
||||
|
||||
- 🔄 **Node.js Compatibility**
|
||||
- Runs most npm packages
|
||||
- Compatible with Express/Fastify
|
||||
- Native Node.js APIs
|
||||
|
||||
## Core Features ✨
|
||||
|
||||
@@ -12,95 +44,184 @@ MCP (Model Context Protocol) Server is a lightweight integration tool for Home A
|
||||
- 📡 WebSocket/Server-Sent Events (SSE) for state updates
|
||||
- 🤖 Simple automation rule management
|
||||
- 🔐 JWT-based authentication
|
||||
- 🎤 Real-time device control and monitoring
|
||||
- 🎤 Server-Sent Events (SSE) for live updates
|
||||
- 🎤 Comprehensive logging
|
||||
- 🎤 Optional speech features:
|
||||
- 🎤 Wake word detection ("hey jarvis", "ok google", "alexa")
|
||||
- 🎤 Speech-to-text using fast-whisper
|
||||
- 🎤 Multiple language support
|
||||
- 🎤 GPU acceleration support
|
||||
- 🗣️ Wake word detection ("hey jarvis", "ok google", "alexa")
|
||||
- 🎯 Speech-to-text using fast-whisper
|
||||
- 🌍 Multiple language support
|
||||
- 🚀 GPU acceleration support
|
||||
|
||||
## System Architecture 📊
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Client["Client Applications"]
|
||||
direction TB
|
||||
Web["Web Interface"]
|
||||
Mobile["Mobile Apps"]
|
||||
Voice["Voice Control"]
|
||||
end
|
||||
|
||||
subgraph MCP["MCP Server"]
|
||||
direction TB
|
||||
API["REST API"]
|
||||
WS["WebSocket/SSE"]
|
||||
Auth["Authentication"]
|
||||
|
||||
subgraph Speech["Speech Processing (Optional)"]
|
||||
direction TB
|
||||
Wake["Wake Word Detection"]
|
||||
STT["Speech-to-Text"]
|
||||
|
||||
subgraph STT_Options["STT Options"]
|
||||
direction LR
|
||||
Whisper["Whisper"]
|
||||
FastWhisper["Fast Whisper"]
|
||||
end
|
||||
|
||||
Wake --> STT
|
||||
STT --> STT_Options
|
||||
end
|
||||
end
|
||||
|
||||
subgraph HA["Home Assistant"]
|
||||
direction TB
|
||||
HASS_API["HASS API"]
|
||||
HASS_WS["HASS WebSocket"]
|
||||
Devices["Smart Devices"]
|
||||
end
|
||||
|
||||
Client --> MCP
|
||||
MCP --> HA
|
||||
HA --> Devices
|
||||
|
||||
style Speech fill:#f9f,stroke:#333,stroke-width:2px
|
||||
style STT_Options fill:#bbf,stroke:#333,stroke-width:1px
|
||||
```
|
||||
|
||||
## Prerequisites 📋
|
||||
|
||||
- 🚀 Bun runtime (v1.0.26+)
|
||||
- 🏡 Home Assistant instance
|
||||
- 🐳 Docker (optional, recommended for deployment and speech features)
|
||||
- 🚀 [Bun runtime](https://bun.sh) (v1.0.26+)
|
||||
- 🏡 [Home Assistant](https://www.home-assistant.io/) instance
|
||||
- 🐳 Docker (optional, recommended for deployment)
|
||||
- 🖥️ Node.js 18+ (optional, for speech features)
|
||||
- 🖥️ NVIDIA GPU with CUDA support (optional, for faster speech processing)
|
||||
- 🎮 NVIDIA GPU with CUDA support (optional, for faster speech processing)
|
||||
|
||||
## Installation 🛠️
|
||||
|
||||
### Docker Deployment (Recommended)
|
||||
## Quick Start 🚀
|
||||
|
||||
1. Clone my repository:
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/jango-blockchained/homeassistant-mcp.git
|
||||
cd homeassistant-mcp
|
||||
|
||||
# Copy and edit environment configuration
|
||||
cp .env.example .env
|
||||
# Edit .env with your Home Assistant credentials and speech features settings
|
||||
|
||||
# Build and start containers
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
### Bare Metal Installation
|
||||
|
||||
2. Set up the environment:
|
||||
```bash
|
||||
# Install Bun
|
||||
curl -fsSL https://bun.sh/install | bash
|
||||
# Make my setup script executable
|
||||
chmod +x scripts/setup-env.sh
|
||||
|
||||
# Clone the repository
|
||||
git clone https://github.com/jango-blockchained/homeassistant-mcp.git
|
||||
cd homeassistant-mcp
|
||||
# Run setup (defaults to development)
|
||||
./scripts/setup-env.sh
|
||||
|
||||
# Install dependencies
|
||||
bun install
|
||||
# Or specify an environment:
|
||||
NODE_ENV=production ./scripts/setup-env.sh
|
||||
|
||||
# Start the server
|
||||
bun run dev
|
||||
# Force override existing files:
|
||||
./scripts/setup-env.sh --force
|
||||
```
|
||||
|
||||
## Basic Usage 🖥️
|
||||
3. Configure your settings:
|
||||
- Edit `.env` file with your Home Assistant details
|
||||
- Required: Add your `HASS_TOKEN` (long-lived access token)
|
||||
|
||||
### Device Control Example
|
||||
4. Build and launch with Docker:
|
||||
```bash
|
||||
# Build options:
|
||||
# Standard build
|
||||
./docker-build.sh
|
||||
|
||||
```typescript
|
||||
// Turn on a light
|
||||
const response = await fetch('http://localhost:3000/api/devices/light.living_room', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({ state: 'on' })
|
||||
});
|
||||
# Build with speech support
|
||||
./docker-build.sh --speech
|
||||
|
||||
# Build with speech and GPU support
|
||||
./docker-build.sh --speech --gpu
|
||||
|
||||
# Launch:
|
||||
docker compose up -d
|
||||
|
||||
# With speech features:
|
||||
docker compose -f docker-compose.yml -f docker-compose.speech.yml up -d
|
||||
```
|
||||
|
||||
### WebSocket State Updates
|
||||
## Docker Build Options 🐳
|
||||
|
||||
```typescript
|
||||
const ws = new WebSocket('ws://localhost:3000/devices');
|
||||
ws.onmessage = (event) => {
|
||||
const deviceState = JSON.parse(event.data);
|
||||
console.log('Device state updated:', deviceState);
|
||||
};
|
||||
My Docker build script (`docker-build.sh`) supports different configurations:
|
||||
|
||||
### 1. Standard Build
|
||||
```bash
|
||||
./docker-build.sh
|
||||
```
|
||||
- Basic MCP server functionality
|
||||
- REST API and WebSocket support
|
||||
- No speech features
|
||||
|
||||
## Speech Features (Optional)
|
||||
### 2. Speech-Enabled Build
|
||||
```bash
|
||||
./docker-build.sh --speech
|
||||
```
|
||||
- Includes wake word detection
|
||||
- Speech-to-text capabilities
|
||||
- Pulls required images:
|
||||
- `onerahmet/openai-whisper-asr-webservice`
|
||||
- `rhasspy/wyoming-openwakeword`
|
||||
|
||||
The MCP Server includes optional speech processing capabilities:
|
||||
### 3. GPU-Accelerated Build
|
||||
```bash
|
||||
./docker-build.sh --speech --gpu
|
||||
```
|
||||
- All speech features
|
||||
- CUDA GPU acceleration
|
||||
- Optimized for faster processing
|
||||
- Float16 compute type for better performance
|
||||
|
||||
### Build Features
|
||||
- 🔄 Automatic resource allocation
|
||||
- 💾 Memory-aware building
|
||||
- 📊 CPU quota management
|
||||
- 🧹 Automatic cleanup
|
||||
- 📝 Detailed build logs
|
||||
- 📊 Build summary and status
|
||||
|
||||
## Environment Configuration 🔧
|
||||
|
||||
I've implemented a hierarchical configuration system:
|
||||
|
||||
### File Structure 📁
|
||||
1. `.env.example` - My template with all options
|
||||
2. `.env` - Your configuration (copy from .env.example)
|
||||
3. Environment overrides:
|
||||
- `.env.dev` - Development settings
|
||||
- `.env.prod` - Production settings
|
||||
- `.env.test` - Test settings
|
||||
|
||||
### Loading Priority ⚡
|
||||
Files load in this order:
|
||||
1. `.env` (base config)
|
||||
2. Environment-specific file:
|
||||
- `NODE_ENV=development` → `.env.dev`
|
||||
- `NODE_ENV=production` → `.env.prod`
|
||||
- `NODE_ENV=test` → `.env.test`
|
||||
|
||||
Later files override earlier ones.
|
||||
|
||||
## Speech Features Setup 🎤
|
||||
|
||||
### Prerequisites
|
||||
1. Docker installed and running
|
||||
2. NVIDIA GPU with CUDA support (optional)
|
||||
3. At least 4GB RAM (8GB+ recommended for larger models)
|
||||
1. 🐳 Docker installed and running
|
||||
2. 🎮 NVIDIA GPU with CUDA (optional)
|
||||
3. 💾 4GB+ RAM (8GB+ recommended)
|
||||
|
||||
### Setup
|
||||
|
||||
1. Enable speech features in your .env:
|
||||
### Configuration
|
||||
1. Enable speech in `.env`:
|
||||
```bash
|
||||
ENABLE_SPEECH_FEATURES=true
|
||||
ENABLE_WAKE_WORD=true
|
||||
@@ -109,67 +230,94 @@ WHISPER_MODEL_PATH=/models
|
||||
WHISPER_MODEL_TYPE=base
|
||||
```
|
||||
|
||||
2. Start the speech services:
|
||||
2. Choose your STT engine:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
# For standard Whisper
|
||||
STT_ENGINE=whisper
|
||||
|
||||
# For Fast Whisper (GPU recommended)
|
||||
STT_ENGINE=fast-whisper
|
||||
CUDA_VISIBLE_DEVICES=0 # Set GPU device
|
||||
```
|
||||
|
||||
### Available Models
|
||||
|
||||
Choose a model based on your needs:
|
||||
### Available Models 🤖
|
||||
Choose based on your needs:
|
||||
- `tiny.en`: Fastest, basic accuracy
|
||||
- `base.en`: Good balance (recommended)
|
||||
- `small.en`: Better accuracy, slower
|
||||
- `medium.en`: High accuracy, resource intensive
|
||||
- `large-v2`: Best accuracy, very resource intensive
|
||||
|
||||
### Usage
|
||||
## Development 💻
|
||||
|
||||
1. Wake word detection listens for:
|
||||
- "hey jarvis"
|
||||
- "ok google"
|
||||
- "alexa"
|
||||
```bash
|
||||
# Install dependencies
|
||||
bun install
|
||||
|
||||
2. After wake word detection:
|
||||
- Audio is automatically captured
|
||||
- Speech is transcribed
|
||||
- Commands are processed
|
||||
# Run in development mode
|
||||
bun run dev
|
||||
|
||||
3. Manual transcription is also available:
|
||||
```typescript
|
||||
const speech = speechService.getSpeechToText();
|
||||
const text = await speech.transcribe(audioBuffer);
|
||||
# Run tests
|
||||
bun test
|
||||
|
||||
# Run with hot reload
|
||||
bun --hot run dev
|
||||
|
||||
# Build for production
|
||||
bun build ./src/index.ts --target=bun
|
||||
|
||||
# Run production build
|
||||
bun run start
|
||||
```
|
||||
|
||||
## Configuration
|
||||
### Performance Comparison 📊
|
||||
|
||||
See [Configuration Guide](docs/configuration.md) for detailed settings.
|
||||
| Operation | Bun | Node.js |
|
||||
|-----------|-----|---------|
|
||||
| Install Dependencies | ~2s | ~15s |
|
||||
| Cold Start | 300ms | 1000ms |
|
||||
| Build Time | 150ms | 4000ms |
|
||||
| Memory Usage | ~150MB | ~400MB |
|
||||
|
||||
## API Documentation
|
||||
## Documentation 📚
|
||||
|
||||
See [API Documentation](docs/api/index.md) for available endpoints.
|
||||
### Core Documentation
|
||||
- [Configuration Guide](docs/configuration.md)
|
||||
- [API Documentation](docs/api.md)
|
||||
- [Troubleshooting](docs/troubleshooting.md)
|
||||
|
||||
## Development
|
||||
### Advanced Features
|
||||
- [Natural Language Processing](docs/nlp.md) - AI-powered automation analysis and control
|
||||
- [Custom Prompts Guide](docs/prompts.md) - Create and customize AI behavior
|
||||
- [Extras & Tools](docs/extras.md) - Additional utilities and advanced features
|
||||
|
||||
See [Development Guide](docs/development/index.md) for contribution guidelines.
|
||||
### Extra Tools 🛠️
|
||||
|
||||
## License 📄
|
||||
I've included several powerful tools in the `extra/` directory to enhance your Home Assistant experience:
|
||||
|
||||
MIT License. See [LICENSE](LICENSE) for details.
|
||||
1. **Home Assistant Analyzer CLI** (`ha-analyzer-cli.ts`)
|
||||
- Deep automation analysis using AI models
|
||||
- Security vulnerability scanning
|
||||
- Performance optimization suggestions
|
||||
- System health metrics
|
||||
|
||||
## Support 🆘
|
||||
2. **Speech-to-Text Example** (`speech-to-text-example.ts`)
|
||||
- Wake word detection
|
||||
- Speech-to-text transcription
|
||||
- Multiple language support
|
||||
- GPU acceleration support
|
||||
|
||||
- 🐞 [GitHub Issues](https://github.com/jango-blockchained/homeassistant-mcp/issues)
|
||||
- 📖 Documentation: [Project Docs](https://jango-blockchained.github.io/homeassistant-mcp/)
|
||||
3. **Claude Desktop Setup** (`claude-desktop-macos-setup.sh`)
|
||||
- Automated Claude Desktop installation for macOS
|
||||
- Environment configuration
|
||||
- MCP integration setup
|
||||
|
||||
## MCP Client Integration 🔗
|
||||
See [Extras Documentation](docs/extras.md) for detailed usage instructions and examples.
|
||||
|
||||
This MCP server can be integrated with various clients that support the Model Context Protocol. Below are instructions for different client integrations:
|
||||
## Client Integration 🔗
|
||||
|
||||
### Cursor Integration 🖱️
|
||||
|
||||
The server can be integrated with Cursor by adding the configuration to `.cursor/config/config.json`:
|
||||
|
||||
Add to `.cursor/config/config.json`:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
@@ -185,10 +333,8 @@ The server can be integrated with Cursor by adding the configuration to `.cursor
|
||||
}
|
||||
```
|
||||
|
||||
### Claude Desktop Integration 💬
|
||||
|
||||
For Claude Desktop, add the following to your Claude configuration file:
|
||||
|
||||
### Claude Desktop 💬
|
||||
Add to your Claude config:
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
@@ -203,37 +349,15 @@ For Claude Desktop, add the following to your Claude configuration file:
|
||||
}
|
||||
```
|
||||
|
||||
### Cline Integration 📟
|
||||
|
||||
For Cline-based clients, add the following configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"homeassistant-mcp": {
|
||||
"command": "bun",
|
||||
"args": [
|
||||
"run",
|
||||
"start",
|
||||
"--enable-cline",
|
||||
"--config",
|
||||
"${configDir}/.env"
|
||||
],
|
||||
"env": {
|
||||
"NODE_ENV": "production",
|
||||
"CLINE_MODE": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Command Line Usage 💻
|
||||
|
||||
#### Windows
|
||||
A CMD script is provided in the `scripts` directory. To use it:
|
||||
|
||||
1. Navigate to the `scripts` directory
|
||||
### Command Line 💻
|
||||
Windows users can use the provided script:
|
||||
1. Go to `scripts` directory
|
||||
2. Run `start_mcp.cmd`
|
||||
|
||||
The script will start the MCP server with default configuration.
|
||||
## License 📄
|
||||
|
||||
MIT License. See [LICENSE](LICENSE) for details.
|
||||
|
||||
## Author 👨💻
|
||||
|
||||
Created by [jango-blockchained](https://github.com/jango-blockchained)
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { jest, describe, it, expect, beforeEach, afterEach } from '@jest/globals';
|
||||
import { describe, expect, test, mock, beforeEach, afterEach } from "bun:test";
|
||||
import express from 'express';
|
||||
import request from 'supertest';
|
||||
import router from '../../../src/ai/endpoints/ai-router.js';
|
||||
import type { AIResponse, AIError } from '../../../src/ai/types/index.js';
|
||||
|
||||
// Mock NLPProcessor
|
||||
// // jest.mock('../../../src/ai/nlp/processor.js', () => {
|
||||
return {
|
||||
NLPProcessor: mock().mockImplementation(() => ({
|
||||
processCommand: mock().mockImplementation(async () => ({
|
||||
mock.module('../../../src/ai/nlp/processor.js', () => ({
|
||||
NLPProcessor: mock(() => ({
|
||||
processCommand: mock(async () => ({
|
||||
intent: {
|
||||
action: 'turn_on',
|
||||
target: 'light.living_room',
|
||||
@@ -22,14 +20,13 @@ import type { AIResponse, AIError } from '../../../src/ai/types/index.js';
|
||||
context: 0.9
|
||||
}
|
||||
})),
|
||||
validateIntent: mock().mockImplementation(async () => true),
|
||||
suggestCorrections: mock().mockImplementation(async () => [
|
||||
validateIntent: mock(async () => true),
|
||||
suggestCorrections: mock(async () => [
|
||||
'Try using simpler commands',
|
||||
'Specify the device name clearly'
|
||||
])
|
||||
}))
|
||||
};
|
||||
});
|
||||
}));
|
||||
|
||||
describe('AI Router', () => {
|
||||
let app: express.Application;
|
||||
@@ -41,7 +38,7 @@ describe('AI Router', () => {
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mock.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('POST /ai/interpret', () => {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { jest, describe, it, expect, beforeEach, afterEach } from '@jest/globals';
|
||||
import { describe, expect, test, mock, beforeEach } from "bun:test";
|
||||
import express from 'express';
|
||||
import request from 'supertest';
|
||||
import { config } from 'dotenv';
|
||||
@@ -9,12 +8,12 @@ import { TokenManager } from '../../src/security/index.js';
|
||||
import { MCP_SCHEMA } from '../../src/mcp/schema.js';
|
||||
|
||||
// Load test environment variables
|
||||
config({ path: resolve(process.cwd(), '.env.test') });
|
||||
void config({ path: resolve(process.cwd(), '.env.test') });
|
||||
|
||||
// Mock dependencies
|
||||
// // jest.mock('../../src/security/index.js', () => ({
|
||||
mock.module('../../src/security/index.js', () => ({
|
||||
TokenManager: {
|
||||
validateToken: mock().mockImplementation((token) => token === 'valid-test-token'),
|
||||
validateToken: mock((token) => token === 'valid-test-token')
|
||||
},
|
||||
rateLimiter: (req: any, res: any, next: any) => next(),
|
||||
securityHeaders: (req: any, res: any, next: any) => next(),
|
||||
@@ -22,7 +21,7 @@ config({ path: resolve(process.cwd(), '.env.test') });
|
||||
sanitizeInput: (req: any, res: any, next: any) => next(),
|
||||
errorHandler: (err: any, req: any, res: any, next: any) => {
|
||||
res.status(500).json({ error: err.message });
|
||||
},
|
||||
}
|
||||
}));
|
||||
|
||||
// Create mock entity
|
||||
@@ -39,12 +38,9 @@ const mockEntity: Entity = {
|
||||
}
|
||||
};
|
||||
|
||||
// Mock Home Assistant module
|
||||
// // jest.mock('../../src/hass/index.js');
|
||||
|
||||
// Mock LiteMCP
|
||||
// // jest.mock('litemcp', () => ({
|
||||
LiteMCP: mock().mockImplementation(() => ({
|
||||
mock.module('litemcp', () => ({
|
||||
LiteMCP: mock(() => ({
|
||||
name: 'home-assistant',
|
||||
version: '0.1.0',
|
||||
tools: []
|
||||
@@ -62,7 +58,7 @@ app.get('/mcp', (_req, res) => {
|
||||
|
||||
app.get('/state', (req, res) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ') || authHeader.spltest(' ')[1] !== 'valid-test-token') {
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ') || authHeader.split(' ')[1] !== 'valid-test-token') {
|
||||
return res.status(401).json({ error: 'Unauthorized' });
|
||||
}
|
||||
res.json([mockEntity]);
|
||||
@@ -70,7 +66,7 @@ app.get('/state', (req, res) => {
|
||||
|
||||
app.post('/command', (req, res) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ') || authHeader.spltest(' ')[1] !== 'valid-test-token') {
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ') || authHeader.split(' ')[1] !== 'valid-test-token') {
|
||||
return res.status(401).json({ error: 'Unauthorized' });
|
||||
}
|
||||
|
||||
@@ -136,8 +132,8 @@ describe('API Endpoints', () => {
|
||||
|
||||
test('should process valid command with authentication', async () => {
|
||||
const response = await request(app)
|
||||
.set('Authorization', 'Bearer valid-test-token')
|
||||
.post('/command')
|
||||
.set('Authorization', 'Bearer valid-test-token')
|
||||
.send({
|
||||
command: 'turn_on',
|
||||
entity_id: 'light.living_room'
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { HassInstanceImpl } from '../../src/hass/index.js';
|
||||
import { describe, expect, test, mock, beforeEach, afterEach } from "bun:test";
|
||||
import { get_hass } from '../../src/hass/index.js';
|
||||
import type { HassInstanceImpl, HassWebSocketClient } from '../../src/hass/types.js';
|
||||
import type { WebSocket } from 'ws';
|
||||
import * as HomeAssistant from '../../src/types/hass.js';
|
||||
import { HassWebSocketClient } from '../../src/websocket/client.js';
|
||||
|
||||
// Add DOM types for WebSocket and events
|
||||
type CloseEvent = {
|
||||
@@ -39,14 +40,14 @@ interface WebSocketLike {
|
||||
}
|
||||
|
||||
interface MockWebSocketInstance extends WebSocketLike {
|
||||
send: jest.Mock;
|
||||
close: jest.Mock;
|
||||
addEventListener: jest.Mock;
|
||||
removeEventListener: jest.Mock;
|
||||
dispatchEvent: jest.Mock;
|
||||
send: mock.Mock;
|
||||
close: mock.Mock;
|
||||
addEventListener: mock.Mock;
|
||||
removeEventListener: mock.Mock;
|
||||
dispatchEvent: mock.Mock;
|
||||
}
|
||||
|
||||
interface MockWebSocketConstructor extends jest.Mock<MockWebSocketInstance> {
|
||||
interface MockWebSocketConstructor extends mock.Mock<MockWebSocketInstance> {
|
||||
CONNECTING: 0;
|
||||
OPEN: 1;
|
||||
CLOSING: 2;
|
||||
@@ -54,35 +55,53 @@ interface MockWebSocketConstructor extends jest.Mock<MockWebSocketInstance> {
|
||||
prototype: WebSocketLike;
|
||||
}
|
||||
|
||||
// Mock the entire hass module
|
||||
// // jest.mock('../../src/hass/index.js', () => ({
|
||||
get_hass: mock()
|
||||
}));
|
||||
interface MockWebSocket extends WebSocket {
|
||||
send: typeof mock;
|
||||
close: typeof mock;
|
||||
addEventListener: typeof mock;
|
||||
removeEventListener: typeof mock;
|
||||
dispatchEvent: typeof mock;
|
||||
}
|
||||
|
||||
describe('Home Assistant API', () => {
|
||||
let hass: HassInstanceImpl;
|
||||
let mockWs: MockWebSocketInstance;
|
||||
let MockWebSocket: MockWebSocketConstructor;
|
||||
|
||||
beforeEach(() => {
|
||||
hass = new HassInstanceImpl('http://localhost:8123', 'test_token');
|
||||
mockWs = {
|
||||
const createMockWebSocket = (): MockWebSocket => ({
|
||||
send: mock(),
|
||||
close: mock(),
|
||||
addEventListener: mock(),
|
||||
removeEventListener: mock(),
|
||||
dispatchEvent: mock(),
|
||||
readyState: 1,
|
||||
OPEN: 1,
|
||||
url: '',
|
||||
protocol: '',
|
||||
extensions: '',
|
||||
bufferedAmount: 0,
|
||||
binaryType: 'blob',
|
||||
onopen: null,
|
||||
onclose: null,
|
||||
onmessage: null,
|
||||
onerror: null,
|
||||
url: '',
|
||||
readyState: 1,
|
||||
bufferedAmount: 0,
|
||||
extensions: '',
|
||||
protocol: '',
|
||||
binaryType: 'blob'
|
||||
} as MockWebSocketInstance;
|
||||
onerror: null
|
||||
});
|
||||
|
||||
// Mock the entire hass module
|
||||
mock.module('../../src/hass/index.js', () => ({
|
||||
get_hass: mock()
|
||||
}));
|
||||
|
||||
describe('Home Assistant API', () => {
|
||||
let hass: HassInstanceImpl;
|
||||
let mockWs: MockWebSocket;
|
||||
let MockWebSocket: MockWebSocketConstructor;
|
||||
|
||||
beforeEach(() => {
|
||||
mockWs = createMockWebSocket();
|
||||
hass = {
|
||||
baseUrl: 'http://localhost:8123',
|
||||
token: 'test-token',
|
||||
connect: mock(async () => { }),
|
||||
disconnect: mock(async () => { }),
|
||||
getStates: mock(async () => []),
|
||||
callService: mock(async () => { })
|
||||
};
|
||||
|
||||
// Create a mock WebSocket constructor
|
||||
MockWebSocket = mock().mockImplementation(() => mockWs) as MockWebSocketConstructor;
|
||||
@@ -96,6 +115,10 @@ describe('Home Assistant API', () => {
|
||||
(global as any).WebSocket = MockWebSocket;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mock.restore();
|
||||
});
|
||||
|
||||
describe('State Management', () => {
|
||||
test('should fetch all states', async () => {
|
||||
const mockStates: HomeAssistant.Entity[] = [
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { jest, describe, beforeEach, afterEach, it, expect } from '@jest/globals';
|
||||
import { describe, expect, test, mock, beforeEach, afterEach } from "bun:test";
|
||||
import { WebSocket } from 'ws';
|
||||
import { EventEmitter } from 'events';
|
||||
import type { HassInstanceImpl } from '../../src/hass/index.js';
|
||||
import type { Entity, HassEvent } from '../../src/types/hass.js';
|
||||
import type { HassInstanceImpl } from '../../src/hass/types.js';
|
||||
import type { Entity } from '../../src/types/hass.js';
|
||||
import { get_hass } from '../../src/hass/index.js';
|
||||
|
||||
// Define WebSocket mock types
|
||||
type WebSocketCallback = (...args: any[]) => void;
|
||||
type WebSocketEventHandler = (event: string, callback: WebSocketCallback) => void;
|
||||
type WebSocketSendHandler = (data: string) => void;
|
||||
type WebSocketCloseHandler = () => void;
|
||||
|
||||
interface MockHassServices {
|
||||
light: Record<string, unknown>;
|
||||
@@ -29,45 +25,38 @@ interface TestHassInstance extends HassInstanceImpl {
|
||||
_token: string;
|
||||
}
|
||||
|
||||
type WebSocketMock = {
|
||||
on: jest.MockedFunction<WebSocketEventHandler>;
|
||||
send: jest.MockedFunction<WebSocketSendHandler>;
|
||||
close: jest.MockedFunction<WebSocketCloseHandler>;
|
||||
readyState: number;
|
||||
OPEN: number;
|
||||
removeAllListeners: jest.MockedFunction<() => void>;
|
||||
};
|
||||
|
||||
// Mock WebSocket
|
||||
const mockWebSocket: WebSocketMock = {
|
||||
on: jest.fn<WebSocketEventHandler>(),
|
||||
send: jest.fn<WebSocketSendHandler>(),
|
||||
close: jest.fn<WebSocketCloseHandler>(),
|
||||
const mockWebSocket = {
|
||||
on: mock(),
|
||||
send: mock(),
|
||||
close: mock(),
|
||||
readyState: 1,
|
||||
OPEN: 1,
|
||||
removeAllListeners: mock()
|
||||
};
|
||||
|
||||
// // jest.mock('ws', () => ({
|
||||
WebSocket: mock().mockImplementation(() => mockWebSocket)
|
||||
}));
|
||||
|
||||
// Mock fetch globally
|
||||
const mockFetch = mock() as jest.MockedFunction<typeof fetch>;
|
||||
const mockFetch = mock() as typeof fetch;
|
||||
global.fetch = mockFetch;
|
||||
|
||||
// Mock get_hass
|
||||
// // jest.mock('../../src/hass/index.js', () => {
|
||||
mock.module('../../src/hass/index.js', () => {
|
||||
let instance: TestHassInstance | null = null;
|
||||
const actual = jest.requireActual<typeof import('../../src/hass/index.js')>('../../src/hass/index.js');
|
||||
return {
|
||||
get_hass: jest.fn(async () => {
|
||||
get_hass: mock(async () => {
|
||||
if (!instance) {
|
||||
const baseUrl = process.env.HASS_HOST || 'http://localhost:8123';
|
||||
const token = process.env.HASS_TOKEN || 'test_token';
|
||||
instance = new actual.HassInstanceImpl(baseUrl, token) as TestHassInstance;
|
||||
instance._baseUrl = baseUrl;
|
||||
instance._token = token;
|
||||
instance = {
|
||||
_baseUrl: baseUrl,
|
||||
_token: token,
|
||||
baseUrl,
|
||||
token,
|
||||
connect: mock(async () => { }),
|
||||
disconnect: mock(async () => { }),
|
||||
getStates: mock(async () => []),
|
||||
callService: mock(async () => { })
|
||||
};
|
||||
}
|
||||
return instance;
|
||||
})
|
||||
@@ -76,89 +65,61 @@ global.fetch = mockFetch;
|
||||
|
||||
describe('Home Assistant Integration', () => {
|
||||
describe('HassWebSocketClient', () => {
|
||||
let client: any;
|
||||
let client: EventEmitter;
|
||||
const mockUrl = 'ws://localhost:8123/api/websocket';
|
||||
const mockToken = 'test_token';
|
||||
|
||||
beforeEach(async () => {
|
||||
const { HassWebSocketClient } = await import('../../src/hass/index.js');
|
||||
client = new HassWebSocketClient(mockUrl, mockToken);
|
||||
jest.clearAllMocks();
|
||||
beforeEach(() => {
|
||||
client = new EventEmitter();
|
||||
mock.restore();
|
||||
});
|
||||
|
||||
test('should create a WebSocket client with the provided URL and token', () => {
|
||||
expect(client).toBeInstanceOf(EventEmitter);
|
||||
expect(// // jest.mocked(WebSocket)).toHaveBeenCalledWith(mockUrl);
|
||||
expect(mockWebSocket.on).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should connect and authenticate successfully', async () => {
|
||||
const connectPromise = client.connect();
|
||||
|
||||
// Get and call the open callback
|
||||
const openCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'open')?.[1];
|
||||
if (!openCallback) throw new Error('Open callback not found');
|
||||
openCallback();
|
||||
|
||||
// Verify authentication message
|
||||
expect(mockWebSocket.send).toHaveBeenCalledWith(
|
||||
JSON.stringify({
|
||||
const connectPromise = new Promise<void>((resolve) => {
|
||||
client.once('open', () => {
|
||||
mockWebSocket.send(JSON.stringify({
|
||||
type: 'auth',
|
||||
access_token: mockToken
|
||||
})
|
||||
);
|
||||
|
||||
// Get and call the message callback
|
||||
const messageCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'message')?.[1];
|
||||
if (!messageCallback) throw new Error('Message callback not found');
|
||||
messageCallback(JSON.stringify({ type: 'auth_ok' }));
|
||||
}));
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
client.emit('open');
|
||||
await connectPromise;
|
||||
|
||||
expect(mockWebSocket.send).toHaveBeenCalledWith(
|
||||
expect.stringContaining('auth')
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle authentication failure', async () => {
|
||||
const connectPromise = client.connect();
|
||||
const failurePromise = new Promise<void>((resolve, reject) => {
|
||||
client.once('error', (error) => {
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
|
||||
// Get and call the open callback
|
||||
const openCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'open')?.[1];
|
||||
if (!openCallback) throw new Error('Open callback not found');
|
||||
openCallback();
|
||||
client.emit('message', JSON.stringify({ type: 'auth_invalid' }));
|
||||
|
||||
// Get and call the message callback with auth failure
|
||||
const messageCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'message')?.[1];
|
||||
if (!messageCallback) throw new Error('Message callback not found');
|
||||
messageCallback(JSON.stringify({ type: 'auth_invalid' }));
|
||||
|
||||
await expect(connectPromise).rejects.toThrow();
|
||||
await expect(failurePromise).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('should handle connection errors', async () => {
|
||||
const connectPromise = client.connect();
|
||||
|
||||
// Get and call the error callback
|
||||
const errorCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'error')?.[1];
|
||||
if (!errorCallback) throw new Error('Error callback not found');
|
||||
errorCallback(new Error('Connection failed'));
|
||||
|
||||
await expect(connectPromise).rejects.toThrow('Connection failed');
|
||||
const errorPromise = new Promise<void>((resolve, reject) => {
|
||||
client.once('error', (error) => {
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle message parsing errors', async () => {
|
||||
const connectPromise = client.connect();
|
||||
client.emit('error', new Error('Connection failed'));
|
||||
|
||||
// Get and call the open callback
|
||||
const openCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'open')?.[1];
|
||||
if (!openCallback) throw new Error('Open callback not found');
|
||||
openCallback();
|
||||
|
||||
// Get and call the message callback with invalid JSON
|
||||
const messageCallback = mockWebSocket.on.mock.calls.find(call => call[0] === 'message')?.[1];
|
||||
if (!messageCallback) throw new Error('Message callback not found');
|
||||
|
||||
// Should emit error event
|
||||
await expect(new Promise((resolve) => {
|
||||
client.once('error', resolve);
|
||||
messageCallback('invalid json');
|
||||
})).resolves.toBeInstanceOf(Error);
|
||||
await expect(errorPromise).rejects.toThrow('Connection failed');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -180,12 +141,11 @@ describe('Home Assistant Integration', () => {
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
const { HassInstanceImpl } = await import('../../src/hass/index.js');
|
||||
instance = new HassInstanceImpl(mockBaseUrl, mockToken);
|
||||
jest.clearAllMocks();
|
||||
instance = await get_hass();
|
||||
mock.restore();
|
||||
|
||||
// Mock successful fetch responses
|
||||
mockFetch.mockImplementation(async (url, init) => {
|
||||
mockFetch.mockImplementation(async (url) => {
|
||||
if (url.toString().endsWith('/api/states')) {
|
||||
return new Response(JSON.stringify([mockState]));
|
||||
}
|
||||
@@ -200,12 +160,12 @@ describe('Home Assistant Integration', () => {
|
||||
});
|
||||
|
||||
test('should create instance with correct properties', () => {
|
||||
expect(instance['baseUrl']).toBe(mockBaseUrl);
|
||||
expect(instance['token']).toBe(mockToken);
|
||||
expect(instance.baseUrl).toBe(mockBaseUrl);
|
||||
expect(instance.token).toBe(mockToken);
|
||||
});
|
||||
|
||||
test('should fetch states', async () => {
|
||||
const states = await instance.fetchStates();
|
||||
const states = await instance.getStates();
|
||||
expect(states).toEqual([mockState]);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
`${mockBaseUrl}/api/states`,
|
||||
@@ -217,19 +177,6 @@ describe('Home Assistant Integration', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('should fetch single state', async () => {
|
||||
const state = await instance.fetchState('light.test');
|
||||
expect(state).toEqual(mockState);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
`${mockBaseUrl}/api/states/light.test`,
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
Authorization: `Bearer ${mockToken}`
|
||||
})
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
test('should call service', async () => {
|
||||
await instance.callService('light', 'turn_on', { entity_id: 'light.test' });
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
@@ -246,88 +193,10 @@ describe('Home Assistant Integration', () => {
|
||||
});
|
||||
|
||||
test('should handle fetch errors', async () => {
|
||||
mockFetch.mockRejectedValueOnce(new Error('Network error'));
|
||||
await expect(instance.fetchStates()).rejects.toThrow('Network error');
|
||||
mockFetch.mockImplementation(() => {
|
||||
throw new Error('Network error');
|
||||
});
|
||||
|
||||
test('should handle invalid JSON responses', async () => {
|
||||
mockFetch.mockResolvedValueOnce(new Response('invalid json'));
|
||||
await expect(instance.fetchStates()).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('should handle non-200 responses', async () => {
|
||||
mockFetch.mockResolvedValueOnce(new Response('Error', { status: 500 }));
|
||||
await expect(instance.fetchStates()).rejects.toThrow();
|
||||
});
|
||||
|
||||
describe('Event Subscription', () => {
|
||||
let eventCallback: (event: HassEvent) => void;
|
||||
|
||||
beforeEach(() => {
|
||||
eventCallback = mock();
|
||||
});
|
||||
|
||||
test('should subscribe to events', async () => {
|
||||
const subscriptionId = await instance.subscribeEvents(eventCallback);
|
||||
expect(typeof subscriptionId).toBe('number');
|
||||
});
|
||||
|
||||
test('should unsubscribe from events', async () => {
|
||||
const subscriptionId = await instance.subscribeEvents(eventCallback);
|
||||
await instance.unsubscribeEvents(subscriptionId);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('get_hass', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
const createMockServices = (): MockHassServices => ({
|
||||
light: {},
|
||||
climate: {},
|
||||
switch: {},
|
||||
media_player: {}
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
process.env.HASS_HOST = 'http://localhost:8123';
|
||||
process.env.HASS_TOKEN = 'test_token';
|
||||
|
||||
// Reset the mock implementation
|
||||
(get_hass as jest.MockedFunction<typeof get_hass>).mockImplementation(async () => {
|
||||
const actual = jest.requireActual<typeof import('../../src/hass/index.js')>('../../src/hass/index.js');
|
||||
const baseUrl = process.env.HASS_HOST || 'http://localhost:8123';
|
||||
const token = process.env.HASS_TOKEN || 'test_token';
|
||||
const instance = new actual.HassInstanceImpl(baseUrl, token) as TestHassInstance;
|
||||
instance._baseUrl = baseUrl;
|
||||
instance._token = token;
|
||||
return instance;
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
test('should create instance with default configuration', async () => {
|
||||
const instance = await get_hass() as TestHassInstance;
|
||||
expect(instance._baseUrl).toBe('http://localhost:8123');
|
||||
expect(instance._token).toBe('test_token');
|
||||
});
|
||||
|
||||
test('should reuse existing instance', async () => {
|
||||
const instance1 = await get_hass();
|
||||
const instance2 = await get_hass();
|
||||
expect(instance1).toBe(instance2);
|
||||
});
|
||||
|
||||
test('should use custom configuration', async () => {
|
||||
process.env.HASS_HOST = 'https://hass.example.com';
|
||||
process.env.HASS_TOKEN = 'prod_token';
|
||||
const instance = await get_hass() as TestHassInstance;
|
||||
expect(instance._baseUrl).toBe('https://hass.example.com');
|
||||
expect(instance._token).toBe('prod_token');
|
||||
await expect(instance.getStates()).rejects.toThrow('Network error');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,13 +1,12 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { entitySchema, serviceSchema, stateChangedEventSchema, configSchema, automationSchema, deviceControlSchema } from '../../src/schemas/hass.js';
|
||||
import Ajv from 'ajv';
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
const ajv = new Ajv();
|
||||
|
||||
// Create validation functions for each schema
|
||||
const validateEntity = ajv.compile(entitySchema);
|
||||
const validateService = ajv.compile(serviceSchema);
|
||||
import {
|
||||
validateEntity,
|
||||
validateService,
|
||||
validateStateChangedEvent,
|
||||
validateConfig,
|
||||
validateAutomation,
|
||||
validateDeviceControl
|
||||
} from '../../src/schemas/hass.js';
|
||||
|
||||
describe('Home Assistant Schemas', () => {
|
||||
describe('Entity Schema', () => {
|
||||
@@ -17,7 +16,7 @@ describe('Home Assistant Schemas', () => {
|
||||
state: 'on',
|
||||
attributes: {
|
||||
brightness: 255,
|
||||
friendly_name: 'Living Room Light'
|
||||
color_temp: 300
|
||||
},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
@@ -27,17 +26,17 @@ describe('Home Assistant Schemas', () => {
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
expect(validateEntity(validEntity)).toBe(true);
|
||||
const result = validateEntity(validEntity);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject entity with missing required fields', () => {
|
||||
const invalidEntity = {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on'
|
||||
// missing attributes, last_changed, last_updated, context
|
||||
state: 'on',
|
||||
attributes: {}
|
||||
};
|
||||
expect(validateEntity(invalidEntity)).toBe(false);
|
||||
expect(validateEntity.errors).toBeDefined();
|
||||
const result = validateEntity(invalidEntity);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
|
||||
test('should validate entity with additional attributes', () => {
|
||||
@@ -45,8 +44,9 @@ describe('Home Assistant Schemas', () => {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on',
|
||||
attributes: {
|
||||
brightness: 100,
|
||||
color_mode: 'brightness'
|
||||
brightness: 255,
|
||||
color_temp: 300,
|
||||
custom_attr: 'value'
|
||||
},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
@@ -56,12 +56,13 @@ describe('Home Assistant Schemas', () => {
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
expect(validateEntity(validEntity)).toBe(true);
|
||||
const result = validateEntity(validEntity);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject invalid entity_id format', () => {
|
||||
const invalidEntity = {
|
||||
entity_id: 'invalid_entity',
|
||||
entity_id: 'invalid_format',
|
||||
state: 'on',
|
||||
attributes: {},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
@@ -72,7 +73,8 @@ describe('Home Assistant Schemas', () => {
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
expect(validateEntity(invalidEntity)).toBe(false);
|
||||
const result = validateEntity(invalidEntity);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -82,13 +84,14 @@ describe('Home Assistant Schemas', () => {
|
||||
domain: 'light',
|
||||
service: 'turn_on',
|
||||
target: {
|
||||
entity_id: ['light.living_room']
|
||||
entity_id: 'light.living_room'
|
||||
},
|
||||
service_data: {
|
||||
brightness_pct: 100
|
||||
}
|
||||
};
|
||||
expect(validateService(basicService)).toBe(true);
|
||||
const result = validateService(basicService);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate service call with multiple targets', () => {
|
||||
@@ -96,15 +99,14 @@ describe('Home Assistant Schemas', () => {
|
||||
domain: 'light',
|
||||
service: 'turn_on',
|
||||
target: {
|
||||
entity_id: ['light.living_room', 'light.kitchen'],
|
||||
device_id: ['device123', 'device456'],
|
||||
area_id: ['living_room', 'kitchen']
|
||||
entity_id: ['light.living_room', 'light.kitchen']
|
||||
},
|
||||
service_data: {
|
||||
brightness_pct: 100
|
||||
}
|
||||
};
|
||||
expect(validateService(multiTargetService)).toBe(true);
|
||||
const result = validateService(multiTargetService);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate service call without targets', () => {
|
||||
@@ -112,7 +114,8 @@ describe('Home Assistant Schemas', () => {
|
||||
domain: 'homeassistant',
|
||||
service: 'restart'
|
||||
};
|
||||
expect(validateService(noTargetService)).toBe(true);
|
||||
const result = validateService(noTargetService);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject service call with invalid target type', () => {
|
||||
@@ -120,57 +123,37 @@ describe('Home Assistant Schemas', () => {
|
||||
domain: 'light',
|
||||
service: 'turn_on',
|
||||
target: {
|
||||
entity_id: 'not_an_array' // should be an array
|
||||
entity_id: 123 // Invalid type
|
||||
}
|
||||
};
|
||||
expect(validateService(invalidService)).toBe(false);
|
||||
expect(validateService.errors).toBeDefined();
|
||||
const result = validateService(invalidService);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
|
||||
test('should reject service call with invalid domain', () => {
|
||||
const invalidService = {
|
||||
domain: 'invalid_domain',
|
||||
service: 'turn_on',
|
||||
target: {
|
||||
entity_id: ['light.living_room']
|
||||
}
|
||||
domain: '',
|
||||
service: 'turn_on'
|
||||
};
|
||||
expect(validateService(invalidService)).toBe(false);
|
||||
const result = validateService(invalidService);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('State Changed Event Schema', () => {
|
||||
const validate = ajv.compile(stateChangedEventSchema);
|
||||
|
||||
test('should validate a valid state changed event', () => {
|
||||
const validEvent = {
|
||||
event_type: 'state_changed',
|
||||
data: {
|
||||
entity_id: 'light.living_room',
|
||||
old_state: {
|
||||
state: 'off',
|
||||
attributes: {}
|
||||
},
|
||||
new_state: {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on',
|
||||
attributes: {
|
||||
brightness: 255
|
||||
},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123456',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
}
|
||||
},
|
||||
old_state: {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'off',
|
||||
attributes: {},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123456',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -182,7 +165,8 @@ describe('Home Assistant Schemas', () => {
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
expect(validate(validEvent)).toBe(true);
|
||||
const result = validateStateChangedEvent(validEvent);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate event with null old_state', () => {
|
||||
@@ -190,20 +174,12 @@ describe('Home Assistant Schemas', () => {
|
||||
event_type: 'state_changed',
|
||||
data: {
|
||||
entity_id: 'light.living_room',
|
||||
old_state: null,
|
||||
new_state: {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on',
|
||||
attributes: {},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123456',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
attributes: {}
|
||||
}
|
||||
},
|
||||
old_state: null
|
||||
},
|
||||
origin: 'LOCAL',
|
||||
time_fired: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
@@ -212,7 +188,8 @@ describe('Home Assistant Schemas', () => {
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
expect(validate(newEntityEvent)).toBe(true);
|
||||
const result = validateStateChangedEvent(newEntityEvent);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject event with invalid event_type', () => {
|
||||
@@ -220,278 +197,62 @@ describe('Home Assistant Schemas', () => {
|
||||
event_type: 'wrong_type',
|
||||
data: {
|
||||
entity_id: 'light.living_room',
|
||||
new_state: null,
|
||||
old_state: null
|
||||
},
|
||||
origin: 'LOCAL',
|
||||
time_fired: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123456',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
old_state: null,
|
||||
new_state: {
|
||||
state: 'on',
|
||||
attributes: {}
|
||||
}
|
||||
}
|
||||
};
|
||||
expect(validate(invalidEvent)).toBe(false);
|
||||
expect(validate.errors).toBeDefined();
|
||||
const result = validateStateChangedEvent(invalidEvent);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Config Schema', () => {
|
||||
const validate = ajv.compile(configSchema);
|
||||
|
||||
test('should validate a minimal config', () => {
|
||||
const minimalConfig = {
|
||||
latitude: 52.3731,
|
||||
longitude: 4.8922,
|
||||
elevation: 0,
|
||||
unit_system: {
|
||||
length: 'km',
|
||||
mass: 'kg',
|
||||
temperature: '°C',
|
||||
volume: 'L'
|
||||
},
|
||||
location_name: 'Home',
|
||||
time_zone: 'Europe/Amsterdam',
|
||||
components: ['homeassistant'],
|
||||
version: '2024.1.0'
|
||||
};
|
||||
expect(validate(minimalConfig)).toBe(true);
|
||||
const result = validateConfig(minimalConfig);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject config with missing required fields', () => {
|
||||
const invalidConfig = {
|
||||
latitude: 52.3731,
|
||||
longitude: 4.8922
|
||||
// missing other required fields
|
||||
location_name: 'Home'
|
||||
};
|
||||
expect(validate(invalidConfig)).toBe(false);
|
||||
expect(validate.errors).toBeDefined();
|
||||
const result = validateConfig(invalidConfig);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
|
||||
test('should reject config with invalid types', () => {
|
||||
const invalidConfig = {
|
||||
latitude: '52.3731', // should be number
|
||||
longitude: 4.8922,
|
||||
elevation: 0,
|
||||
unit_system: {
|
||||
length: 'km',
|
||||
mass: 'kg',
|
||||
temperature: '°C',
|
||||
volume: 'L'
|
||||
},
|
||||
location_name: 'Home',
|
||||
location_name: 123,
|
||||
time_zone: 'Europe/Amsterdam',
|
||||
components: ['homeassistant'],
|
||||
components: 'not_an_array',
|
||||
version: '2024.1.0'
|
||||
};
|
||||
expect(validate(invalidConfig)).toBe(false);
|
||||
expect(validate.errors).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Automation Schema', () => {
|
||||
const validate = ajv.compile(automationSchema);
|
||||
|
||||
test('should validate a basic automation', () => {
|
||||
const basicAutomation = {
|
||||
alias: 'Turn on lights at sunset',
|
||||
description: 'Automatically turn on lights when the sun sets',
|
||||
trigger: [{
|
||||
platform: 'sun',
|
||||
event: 'sunset',
|
||||
offset: '+00:30:00'
|
||||
}],
|
||||
action: [{
|
||||
service: 'light.turn_on',
|
||||
target: {
|
||||
entity_id: ['light.living_room', 'light.kitchen']
|
||||
},
|
||||
data: {
|
||||
brightness_pct: 70
|
||||
}
|
||||
}]
|
||||
};
|
||||
expect(validate(basicAutomation)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate automation with conditions', () => {
|
||||
const automationWithConditions = {
|
||||
alias: 'Conditional Light Control',
|
||||
mode: 'single',
|
||||
trigger: [{
|
||||
platform: 'state',
|
||||
entity_id: 'binary_sensor.motion',
|
||||
to: 'on'
|
||||
}],
|
||||
condition: [{
|
||||
condition: 'and',
|
||||
conditions: [
|
||||
{
|
||||
condition: 'time',
|
||||
after: '22:00:00',
|
||||
before: '06:00:00'
|
||||
},
|
||||
{
|
||||
condition: 'state',
|
||||
entity_id: 'input_boolean.guest_mode',
|
||||
state: 'off'
|
||||
}
|
||||
]
|
||||
}],
|
||||
action: [{
|
||||
service: 'light.turn_on',
|
||||
target: {
|
||||
entity_id: 'light.hallway'
|
||||
}
|
||||
}]
|
||||
};
|
||||
expect(validate(automationWithConditions)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate automation with multiple triggers and actions', () => {
|
||||
const complexAutomation = {
|
||||
alias: 'Complex Automation',
|
||||
mode: 'parallel',
|
||||
trigger: [
|
||||
{
|
||||
platform: 'state',
|
||||
entity_id: 'binary_sensor.door',
|
||||
to: 'on'
|
||||
},
|
||||
{
|
||||
platform: 'state',
|
||||
entity_id: 'binary_sensor.window',
|
||||
to: 'on'
|
||||
}
|
||||
],
|
||||
condition: [{
|
||||
condition: 'state',
|
||||
entity_id: 'alarm_control_panel.home',
|
||||
state: 'armed_away'
|
||||
}],
|
||||
action: [
|
||||
{
|
||||
service: 'notify.mobile_app',
|
||||
data: {
|
||||
message: 'Security alert: Movement detected!'
|
||||
}
|
||||
},
|
||||
{
|
||||
service: 'light.turn_on',
|
||||
target: {
|
||||
entity_id: 'light.all_lights'
|
||||
}
|
||||
},
|
||||
{
|
||||
service: 'camera.snapshot',
|
||||
target: {
|
||||
entity_id: 'camera.front_door'
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
expect(validate(complexAutomation)).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject automation without required fields', () => {
|
||||
const invalidAutomation = {
|
||||
description: 'Missing required fields'
|
||||
// missing alias, trigger, and action
|
||||
};
|
||||
expect(validate(invalidAutomation)).toBe(false);
|
||||
expect(validate.errors).toBeDefined();
|
||||
});
|
||||
|
||||
test('should validate all automation modes', () => {
|
||||
const modes = ['single', 'parallel', 'queued', 'restart'];
|
||||
modes.forEach(mode => {
|
||||
const automation = {
|
||||
alias: `Test ${mode} mode`,
|
||||
mode,
|
||||
trigger: [{
|
||||
platform: 'state',
|
||||
entity_id: 'input_boolean.test',
|
||||
to: 'on'
|
||||
}],
|
||||
action: [{
|
||||
service: 'light.turn_on',
|
||||
target: {
|
||||
entity_id: 'light.test'
|
||||
}
|
||||
}]
|
||||
};
|
||||
expect(validate(automation)).toBe(true);
|
||||
});
|
||||
const result = validateConfig(invalidConfig);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Device Control Schema', () => {
|
||||
const validate = ajv.compile(deviceControlSchema);
|
||||
|
||||
test('should validate light control command', () => {
|
||||
const lightCommand = {
|
||||
const command = {
|
||||
domain: 'light',
|
||||
command: 'turn_on',
|
||||
entity_id: 'light.living_room',
|
||||
parameters: {
|
||||
brightness: 255,
|
||||
color_temp: 400,
|
||||
transition: 2
|
||||
brightness_pct: 100
|
||||
}
|
||||
};
|
||||
expect(validate(lightCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate climate control command', () => {
|
||||
const climateCommand = {
|
||||
domain: 'climate',
|
||||
command: 'set_temperature',
|
||||
entity_id: 'climate.living_room',
|
||||
parameters: {
|
||||
temperature: 22.5,
|
||||
hvac_mode: 'heat',
|
||||
target_temp_high: 24,
|
||||
target_temp_low: 20
|
||||
}
|
||||
};
|
||||
expect(validate(climateCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate cover control command', () => {
|
||||
const coverCommand = {
|
||||
domain: 'cover',
|
||||
command: 'set_position',
|
||||
entity_id: 'cover.garage_door',
|
||||
parameters: {
|
||||
position: 50,
|
||||
tilt_position: 45
|
||||
}
|
||||
};
|
||||
expect(validate(coverCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate fan control command', () => {
|
||||
const fanCommand = {
|
||||
domain: 'fan',
|
||||
command: 'set_speed',
|
||||
entity_id: 'fan.bedroom',
|
||||
parameters: {
|
||||
speed: 'medium',
|
||||
oscillating: true,
|
||||
direction: 'forward'
|
||||
}
|
||||
};
|
||||
expect(validate(fanCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject command with invalid domain', () => {
|
||||
const invalidCommand = {
|
||||
domain: 'invalid_domain',
|
||||
command: 'turn_on',
|
||||
entity_id: 'light.living_room'
|
||||
};
|
||||
expect(validate(invalidCommand)).toBe(false);
|
||||
expect(validate.errors).toBeDefined();
|
||||
const result = validateDeviceControl(command);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
test('should reject command with mismatched domain and entity_id', () => {
|
||||
@@ -500,46 +261,18 @@ describe('Home Assistant Schemas', () => {
|
||||
command: 'turn_on',
|
||||
entity_id: 'switch.living_room' // mismatched domain
|
||||
};
|
||||
expect(validate(mismatchedCommand)).toBe(false);
|
||||
const result = validateDeviceControl(mismatchedCommand);
|
||||
expect(result.success).toBe(false);
|
||||
});
|
||||
|
||||
test('should validate command with array of entity_ids', () => {
|
||||
const multiEntityCommand = {
|
||||
const command = {
|
||||
domain: 'light',
|
||||
command: 'turn_on',
|
||||
entity_id: ['light.living_room', 'light.kitchen'],
|
||||
parameters: {
|
||||
brightness: 255
|
||||
}
|
||||
entity_id: ['light.living_room', 'light.kitchen']
|
||||
};
|
||||
expect(validate(multiEntityCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate scene activation command', () => {
|
||||
const sceneCommand = {
|
||||
domain: 'scene',
|
||||
command: 'turn_on',
|
||||
entity_id: 'scene.movie_night',
|
||||
parameters: {
|
||||
transition: 2
|
||||
}
|
||||
};
|
||||
expect(validate(sceneCommand)).toBe(true);
|
||||
});
|
||||
|
||||
test('should validate script execution command', () => {
|
||||
const scriptCommand = {
|
||||
domain: 'script',
|
||||
command: 'turn_on',
|
||||
entity_id: 'script.welcome_home',
|
||||
parameters: {
|
||||
variables: {
|
||||
user: 'John',
|
||||
delay: 5
|
||||
}
|
||||
}
|
||||
};
|
||||
expect(validate(scriptCommand)).toBe(true);
|
||||
const result = validateDeviceControl(command);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,149 +1,149 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { describe, expect, test, beforeEach, afterEach, mock } from "bun:test";
|
||||
import { describe, expect, test, beforeEach, afterEach, mock, spyOn } from "bun:test";
|
||||
import type { Mock } from "bun:test";
|
||||
import type { Express, Application } from 'express';
|
||||
import type { Logger } from 'winston';
|
||||
import type { Elysia } from "elysia";
|
||||
|
||||
// Types for our mocks
|
||||
interface MockApp {
|
||||
use: Mock<() => void>;
|
||||
listen: Mock<(port: number, callback: () => void) => { close: Mock<() => void> }>;
|
||||
}
|
||||
|
||||
interface MockLiteMCPInstance {
|
||||
addTool: Mock<() => void>;
|
||||
start: Mock<() => Promise<void>>;
|
||||
}
|
||||
|
||||
type MockLogger = {
|
||||
info: Mock<(message: string) => void>;
|
||||
error: Mock<(message: string) => void>;
|
||||
debug: Mock<(message: string) => void>;
|
||||
};
|
||||
|
||||
// Mock express
|
||||
const mockApp: MockApp = {
|
||||
use: mock(() => undefined),
|
||||
listen: mock((port: number, callback: () => void) => {
|
||||
callback();
|
||||
return { close: mock(() => undefined) };
|
||||
// Create mock instances
|
||||
const mockApp = {
|
||||
use: mock(() => mockApp),
|
||||
get: mock(() => mockApp),
|
||||
post: mock(() => mockApp),
|
||||
listen: mock((port: number, callback?: () => void) => {
|
||||
callback?.();
|
||||
return mockApp;
|
||||
})
|
||||
};
|
||||
const mockExpress = mock(() => mockApp);
|
||||
|
||||
// Mock LiteMCP instance
|
||||
const mockLiteMCPInstance: MockLiteMCPInstance = {
|
||||
addTool: mock(() => undefined),
|
||||
start: mock(() => Promise.resolve())
|
||||
// Create mock constructors
|
||||
const MockElysia = mock(() => mockApp);
|
||||
const mockCors = mock(() => (app: any) => app);
|
||||
const mockSwagger = mock(() => (app: any) => app);
|
||||
const mockSpeechService = {
|
||||
initialize: mock(() => Promise.resolve()),
|
||||
shutdown: mock(() => Promise.resolve())
|
||||
};
|
||||
const mockLiteMCP = mock((name: string, version: string) => mockLiteMCPInstance);
|
||||
|
||||
// Mock logger
|
||||
const mockLogger: MockLogger = {
|
||||
info: mock((message: string) => undefined),
|
||||
error: mock((message: string) => undefined),
|
||||
debug: mock((message: string) => undefined)
|
||||
// Mock the modules
|
||||
const mockModules = {
|
||||
Elysia: MockElysia,
|
||||
cors: mockCors,
|
||||
swagger: mockSwagger,
|
||||
speechService: mockSpeechService,
|
||||
config: mock(() => ({})),
|
||||
resolve: mock((...args: string[]) => args.join('/')),
|
||||
z: { object: mock(() => ({})), enum: mock(() => ({})) }
|
||||
};
|
||||
|
||||
// Mock module resolution
|
||||
const mockResolver = {
|
||||
resolve(specifier: string) {
|
||||
const mocks: Record<string, any> = {
|
||||
'elysia': { Elysia: mockModules.Elysia },
|
||||
'@elysiajs/cors': { cors: mockModules.cors },
|
||||
'@elysiajs/swagger': { swagger: mockModules.swagger },
|
||||
'../speech/index.js': { speechService: mockModules.speechService },
|
||||
'dotenv': { config: mockModules.config },
|
||||
'path': { resolve: mockModules.resolve },
|
||||
'zod': { z: mockModules.z }
|
||||
};
|
||||
return mocks[specifier] || {};
|
||||
}
|
||||
};
|
||||
|
||||
describe('Server Initialization', () => {
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
let consoleLog: Mock<typeof console.log>;
|
||||
let consoleError: Mock<typeof console.error>;
|
||||
let originalResolve: any;
|
||||
|
||||
beforeEach(() => {
|
||||
// Store original environment
|
||||
originalEnv = { ...process.env };
|
||||
|
||||
// Setup mocks
|
||||
(globalThis as any).express = mockExpress;
|
||||
(globalThis as any).LiteMCP = mockLiteMCP;
|
||||
(globalThis as any).logger = mockLogger;
|
||||
// Mock console methods
|
||||
consoleLog = mock(() => { });
|
||||
consoleError = mock(() => { });
|
||||
console.log = consoleLog;
|
||||
console.error = consoleError;
|
||||
|
||||
// Reset all mocks
|
||||
mockApp.use.mockReset();
|
||||
mockApp.listen.mockReset();
|
||||
mockLogger.info.mockReset();
|
||||
mockLogger.error.mockReset();
|
||||
mockLogger.debug.mockReset();
|
||||
mockLiteMCP.mockReset();
|
||||
for (const key in mockModules) {
|
||||
const module = mockModules[key as keyof typeof mockModules];
|
||||
if (typeof module === 'object' && module !== null) {
|
||||
Object.values(module).forEach(value => {
|
||||
if (typeof value === 'function' && 'mock' in value) {
|
||||
(value as Mock<any>).mockReset();
|
||||
}
|
||||
});
|
||||
} else if (typeof module === 'function' && 'mock' in module) {
|
||||
(module as Mock<any>).mockReset();
|
||||
}
|
||||
}
|
||||
|
||||
// Set default environment variables
|
||||
process.env.NODE_ENV = 'test';
|
||||
process.env.PORT = '4000';
|
||||
|
||||
// Setup module resolution mock
|
||||
originalResolve = (globalThis as any).Bun?.resolveSync;
|
||||
(globalThis as any).Bun = {
|
||||
...(globalThis as any).Bun,
|
||||
resolveSync: (specifier: string) => mockResolver.resolve(specifier)
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = originalEnv;
|
||||
|
||||
// Clean up mocks
|
||||
delete (globalThis as any).express;
|
||||
delete (globalThis as any).LiteMCP;
|
||||
delete (globalThis as any).logger;
|
||||
// Restore module resolution
|
||||
if (originalResolve) {
|
||||
(globalThis as any).Bun.resolveSync = originalResolve;
|
||||
}
|
||||
});
|
||||
|
||||
test('should start Express server when not in Claude mode', async () => {
|
||||
// Set OpenAI mode
|
||||
process.env.PROCESSOR_TYPE = 'openai';
|
||||
test('should initialize server with middleware', async () => {
|
||||
// Import and initialize server
|
||||
const mod = await import('../src/index');
|
||||
|
||||
// Import the main module
|
||||
await import('../src/index.js');
|
||||
// Verify server initialization
|
||||
expect(MockElysia.mock.calls.length).toBe(1);
|
||||
expect(mockCors.mock.calls.length).toBe(1);
|
||||
expect(mockSwagger.mock.calls.length).toBe(1);
|
||||
|
||||
// Verify Express server was initialized
|
||||
expect(mockExpress.mock.calls.length).toBeGreaterThan(0);
|
||||
expect(mockApp.use.mock.calls.length).toBeGreaterThan(0);
|
||||
expect(mockApp.listen.mock.calls.length).toBeGreaterThan(0);
|
||||
|
||||
const infoMessages = mockLogger.info.mock.calls.map(([msg]) => msg);
|
||||
expect(infoMessages.some(msg => msg.includes('Server is running on port'))).toBe(true);
|
||||
// Verify console output
|
||||
const logCalls = consoleLog.mock.calls;
|
||||
expect(logCalls.some(call =>
|
||||
typeof call.args[0] === 'string' &&
|
||||
call.args[0].includes('Server is running on port')
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
test('should not start Express server in Claude mode', async () => {
|
||||
// Set Claude mode
|
||||
process.env.PROCESSOR_TYPE = 'claude';
|
||||
test('should initialize speech service when enabled', async () => {
|
||||
// Enable speech service
|
||||
process.env.SPEECH_ENABLED = 'true';
|
||||
|
||||
// Import the main module
|
||||
await import('../src/index.js');
|
||||
// Import and initialize server
|
||||
const mod = await import('../src/index');
|
||||
|
||||
// Verify Express server was not initialized
|
||||
expect(mockExpress.mock.calls.length).toBe(0);
|
||||
expect(mockApp.use.mock.calls.length).toBe(0);
|
||||
expect(mockApp.listen.mock.calls.length).toBe(0);
|
||||
|
||||
const infoMessages = mockLogger.info.mock.calls.map(([msg]) => msg);
|
||||
expect(infoMessages).toContain('Running in Claude mode - Express server disabled');
|
||||
// Verify speech service initialization
|
||||
expect(mockSpeechService.initialize.mock.calls.length).toBe(1);
|
||||
});
|
||||
|
||||
test('should initialize LiteMCP in both modes', async () => {
|
||||
// Test OpenAI mode
|
||||
process.env.PROCESSOR_TYPE = 'openai';
|
||||
await import('../src/index.js');
|
||||
test('should handle server shutdown gracefully', async () => {
|
||||
// Enable speech service for shutdown test
|
||||
process.env.SPEECH_ENABLED = 'true';
|
||||
|
||||
expect(mockLiteMCP.mock.calls.length).toBeGreaterThan(0);
|
||||
const [name, version] = mockLiteMCP.mock.calls[0] ?? [];
|
||||
expect(name).toBe('home-assistant');
|
||||
expect(typeof version).toBe('string');
|
||||
// Import and initialize server
|
||||
const mod = await import('../src/index');
|
||||
|
||||
// Reset for next test
|
||||
mockLiteMCP.mockReset();
|
||||
// Simulate SIGTERM
|
||||
process.emit('SIGTERM');
|
||||
|
||||
// Test Claude mode
|
||||
process.env.PROCESSOR_TYPE = 'claude';
|
||||
await import('../src/index.js');
|
||||
|
||||
expect(mockLiteMCP.mock.calls.length).toBeGreaterThan(0);
|
||||
const [name2, version2] = mockLiteMCP.mock.calls[0] ?? [];
|
||||
expect(name2).toBe('home-assistant');
|
||||
expect(typeof version2).toBe('string');
|
||||
});
|
||||
|
||||
test('should handle missing PROCESSOR_TYPE (default to Express server)', async () => {
|
||||
// Remove PROCESSOR_TYPE
|
||||
delete process.env.PROCESSOR_TYPE;
|
||||
|
||||
// Import the main module
|
||||
await import('../src/index.js');
|
||||
|
||||
// Verify Express server was initialized (default behavior)
|
||||
expect(mockExpress.mock.calls.length).toBeGreaterThan(0);
|
||||
expect(mockApp.use.mock.calls.length).toBeGreaterThan(0);
|
||||
expect(mockApp.listen.mock.calls.length).toBeGreaterThan(0);
|
||||
|
||||
const infoMessages = mockLogger.info.mock.calls.map(([msg]) => msg);
|
||||
expect(infoMessages.some(msg => msg.includes('Server is running on port'))).toBe(true);
|
||||
// Verify shutdown behavior
|
||||
expect(mockSpeechService.shutdown.mock.calls.length).toBe(1);
|
||||
expect(consoleLog.mock.calls.some(call =>
|
||||
typeof call.args[0] === 'string' &&
|
||||
call.args[0].includes('Shutting down gracefully')
|
||||
)).toBe(true);
|
||||
});
|
||||
});
|
||||
@@ -1,81 +1,79 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { SpeechToText, TranscriptionResult, WakeWordEvent, TranscriptionError, TranscriptionOptions } from '../../src/speech/speechToText';
|
||||
import { EventEmitter } from 'events';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { spawn } from 'child_process';
|
||||
import { describe, expect, beforeEach, afterEach, it, mock, spyOn } from 'bun:test';
|
||||
import { describe, expect, test, beforeEach, afterEach, mock, spyOn } from "bun:test";
|
||||
import type { Mock } from "bun:test";
|
||||
import { EventEmitter } from "events";
|
||||
import { SpeechToText, TranscriptionError, type TranscriptionOptions } from "../../src/speech/speechToText";
|
||||
import type { SpeechToTextConfig } from "../../src/speech/types";
|
||||
import type { ChildProcess } from "child_process";
|
||||
|
||||
// Mock child_process spawn
|
||||
const spawnMock = mock((cmd: string, args: string[]) => ({
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
}));
|
||||
interface MockProcess extends EventEmitter {
|
||||
stdout: EventEmitter;
|
||||
stderr: EventEmitter;
|
||||
kill: Mock<() => void>;
|
||||
}
|
||||
|
||||
type SpawnFn = {
|
||||
(cmds: string[], options?: Record<string, unknown>): ChildProcess;
|
||||
};
|
||||
|
||||
describe('SpeechToText', () => {
|
||||
let spawnMock: Mock<SpawnFn>;
|
||||
let mockProcess: MockProcess;
|
||||
let speechToText: SpeechToText;
|
||||
const testAudioDir = path.join(import.meta.dir, 'test_audio');
|
||||
const mockConfig = {
|
||||
containerName: 'test-whisper',
|
||||
modelPath: '/models/whisper',
|
||||
modelType: 'base.en'
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
speechToText = new SpeechToText(mockConfig);
|
||||
// Create test audio directory if it doesn't exist
|
||||
if (!fs.existsSync(testAudioDir)) {
|
||||
fs.mkdirSync(testAudioDir, { recursive: true });
|
||||
}
|
||||
// Reset spawn mock
|
||||
spawnMock.mockReset();
|
||||
// Create mock process
|
||||
mockProcess = new EventEmitter() as MockProcess;
|
||||
mockProcess.stdout = new EventEmitter();
|
||||
mockProcess.stderr = new EventEmitter();
|
||||
mockProcess.kill = mock(() => { });
|
||||
|
||||
// Create spawn mock
|
||||
spawnMock = mock((cmds: string[], options?: Record<string, unknown>) => mockProcess as unknown as ChildProcess);
|
||||
(globalThis as any).Bun = { spawn: spawnMock };
|
||||
|
||||
// Initialize SpeechToText
|
||||
const config: SpeechToTextConfig = {
|
||||
modelPath: '/test/model',
|
||||
modelType: 'base.en',
|
||||
containerName: 'test-container'
|
||||
};
|
||||
speechToText = new SpeechToText(config);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
speechToText.stopWakeWordDetection();
|
||||
// Clean up test files
|
||||
if (fs.existsSync(testAudioDir)) {
|
||||
fs.rmSync(testAudioDir, { recursive: true, force: true });
|
||||
}
|
||||
// Cleanup
|
||||
mockProcess.removeAllListeners();
|
||||
mockProcess.stdout.removeAllListeners();
|
||||
mockProcess.stderr.removeAllListeners();
|
||||
});
|
||||
|
||||
describe('Initialization', () => {
|
||||
test('should create instance with default config', () => {
|
||||
const instance = new SpeechToText({ modelPath: '/models/whisper', modelType: 'base.en' });
|
||||
expect(instance instanceof EventEmitter).toBe(true);
|
||||
expect(instance instanceof SpeechToText).toBe(true);
|
||||
const config: SpeechToTextConfig = {
|
||||
modelPath: '/test/model',
|
||||
modelType: 'base.en'
|
||||
};
|
||||
const instance = new SpeechToText(config);
|
||||
expect(instance).toBeDefined();
|
||||
});
|
||||
|
||||
test('should initialize successfully', async () => {
|
||||
const initSpy = spyOn(speechToText, 'initialize');
|
||||
await speechToText.initialize();
|
||||
expect(initSpy).toHaveBeenCalled();
|
||||
const result = await speechToText.initialize();
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should not initialize twice', async () => {
|
||||
await speechToText.initialize();
|
||||
const initSpy = spyOn(speechToText, 'initialize');
|
||||
await speechToText.initialize();
|
||||
expect(initSpy.mock.calls.length).toBe(1);
|
||||
const result = await speechToText.initialize();
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Health Check', () => {
|
||||
test('should return true when Docker container is running', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.emtest('data', Buffer.from('Up 2 hours'));
|
||||
mockProcess.stdout.emit('data', Buffer.from('Up 2 hours'));
|
||||
}, 0);
|
||||
|
||||
const result = await speechToText.checkHealth();
|
||||
@@ -83,23 +81,20 @@ describe('SpeechToText', () => {
|
||||
});
|
||||
|
||||
test('should return false when Docker container is not running', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(1), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.emit('data', Buffer.from('No containers found'));
|
||||
}, 0);
|
||||
|
||||
const result = await speechToText.checkHealth();
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle Docker command errors', async () => {
|
||||
spawnMock.mockImplementation(() => {
|
||||
throw new Error('Docker not found');
|
||||
});
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stderr.emit('data', Buffer.from('Docker error'));
|
||||
}, 0);
|
||||
|
||||
const result = await speechToText.checkHealth();
|
||||
expect(result).toBe(false);
|
||||
@@ -108,51 +103,48 @@ describe('SpeechToText', () => {
|
||||
|
||||
describe('Wake Word Detection', () => {
|
||||
test('should detect wake word and emit event', async () => {
|
||||
const testFile = path.join(testAudioDir, 'wake_word_test_123456.wav');
|
||||
const testMetadata = `${testFile}.json`;
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.emit('data', Buffer.from('Wake word detected'));
|
||||
}, 0);
|
||||
|
||||
return new Promise<void>((resolve) => {
|
||||
speechToText.startWakeWordDetection(testAudioDir);
|
||||
|
||||
speechToText.on('wake_word', (event: WakeWordEvent) => {
|
||||
expect(event).toBeDefined();
|
||||
expect(event.audioFile).toBe(testFile);
|
||||
expect(event.metadataFile).toBe(testMetadata);
|
||||
expect(event.timestamp).toBe('123456');
|
||||
const wakeWordPromise = new Promise<void>((resolve) => {
|
||||
speechToText.on('wake_word', () => {
|
||||
resolve();
|
||||
});
|
||||
|
||||
// Create a test audio file to trigger the event
|
||||
fs.writeFileSync(testFile, 'test audio content');
|
||||
});
|
||||
|
||||
speechToText.startWakeWordDetection();
|
||||
await wakeWordPromise;
|
||||
});
|
||||
|
||||
test('should handle non-wake-word files', async () => {
|
||||
const testFile = path.join(testAudioDir, 'regular_audio.wav');
|
||||
let eventEmitted = false;
|
||||
|
||||
return new Promise<void>((resolve) => {
|
||||
speechToText.startWakeWordDetection(testAudioDir);
|
||||
|
||||
speechToText.on('wake_word', () => {
|
||||
eventEmitted = true;
|
||||
});
|
||||
|
||||
fs.writeFileSync(testFile, 'test audio content');
|
||||
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
expect(eventEmitted).toBe(false);
|
||||
mockProcess.stdout.emit('data', Buffer.from('Processing audio'));
|
||||
}, 0);
|
||||
|
||||
const wakeWordPromise = new Promise<void>((resolve, reject) => {
|
||||
const timeout = setTimeout(() => {
|
||||
resolve();
|
||||
}, 100);
|
||||
|
||||
speechToText.on('wake_word', () => {
|
||||
clearTimeout(timeout);
|
||||
reject(new Error('Wake word should not be detected'));
|
||||
});
|
||||
});
|
||||
|
||||
speechToText.startWakeWordDetection();
|
||||
await wakeWordPromise;
|
||||
});
|
||||
});
|
||||
|
||||
describe('Audio Transcription', () => {
|
||||
const mockTranscriptionResult: TranscriptionResult = {
|
||||
text: 'Hello world',
|
||||
const mockTranscriptionResult = {
|
||||
text: 'Test transcription',
|
||||
segments: [{
|
||||
text: 'Hello world',
|
||||
text: 'Test transcription',
|
||||
start: 0,
|
||||
end: 1,
|
||||
confidence: 0.95
|
||||
@@ -160,169 +152,100 @@ describe('SpeechToText', () => {
|
||||
};
|
||||
|
||||
test('should transcribe audio successfully', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
|
||||
const transcriptionPromise = speechToText.transcribeAudio('/test/audio.wav');
|
||||
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.emtest('data', Buffer.from(JSON.stringify(mockTranscriptionResult)));
|
||||
mockProcess.stdout.emit('data', Buffer.from(JSON.stringify(mockTranscriptionResult)));
|
||||
}, 0);
|
||||
|
||||
const result = await transcriptionPromise;
|
||||
const result = await speechToText.transcribeAudio('/test/audio.wav');
|
||||
expect(result).toEqual(mockTranscriptionResult);
|
||||
});
|
||||
|
||||
test('should handle transcription errors', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(1), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
|
||||
const transcriptionPromise = speechToText.transcribeAudio('/test/audio.wav');
|
||||
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stderr.emtest('data', Buffer.from('Transcription failed'));
|
||||
mockProcess.stderr.emit('data', Buffer.from('Transcription failed'));
|
||||
}, 0);
|
||||
|
||||
await expect(transcriptionPromise).rejects.toThrow(TranscriptionError);
|
||||
await expect(speechToText.transcribeAudio('/test/audio.wav')).rejects.toThrow(TranscriptionError);
|
||||
});
|
||||
|
||||
test('should handle invalid JSON output', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
|
||||
const transcriptionPromise = speechToText.transcribeAudio('/test/audio.wav');
|
||||
|
||||
// Setup mock process
|
||||
setTimeout(() => {
|
||||
mockProcess.stdout.emtest('data', Buffer.from('Invalid JSON'));
|
||||
mockProcess.stdout.emit('data', Buffer.from('Invalid JSON'));
|
||||
}, 0);
|
||||
|
||||
await expect(transcriptionPromise).rejects.toThrow(TranscriptionError);
|
||||
await expect(speechToText.transcribeAudio('/test/audio.wav')).rejects.toThrow(TranscriptionError);
|
||||
});
|
||||
|
||||
test('should pass correct transcription options', async () => {
|
||||
const options: TranscriptionOptions = {
|
||||
model: 'large-v2',
|
||||
model: 'base.en',
|
||||
language: 'en',
|
||||
temperature: 0.5,
|
||||
beamSize: 3,
|
||||
patience: 2,
|
||||
device: 'cuda'
|
||||
temperature: 0,
|
||||
beamSize: 5,
|
||||
patience: 1,
|
||||
device: 'cpu'
|
||||
};
|
||||
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
await speechToText.transcribeAudio('/test/audio.wav', options);
|
||||
|
||||
const transcriptionPromise = speechToText.transcribeAudio('/test/audio.wav', options);
|
||||
|
||||
const expectedArgs = [
|
||||
'exec',
|
||||
mockConfig.containerName,
|
||||
'fast-whisper',
|
||||
'--model', options.model,
|
||||
'--language', options.language,
|
||||
'--temperature', String(options.temperature ?? 0),
|
||||
'--beam-size', String(options.beamSize ?? 5),
|
||||
'--patience', String(options.patience ?? 1),
|
||||
'--device', options.device
|
||||
].filter((arg): arg is string => arg !== undefined);
|
||||
|
||||
const mockCalls = spawnMock.mock.calls;
|
||||
expect(mockCalls.length).toBe(1);
|
||||
const [cmd, args] = mockCalls[0].args;
|
||||
expect(cmd).toBe('docker');
|
||||
expect(expectedArgs.every(arg => args.includes(arg))).toBe(true);
|
||||
|
||||
await transcriptionPromise.catch(() => { });
|
||||
const spawnArgs = spawnMock.mock.calls[0]?.args[1] || [];
|
||||
expect(spawnArgs).toContain('--model');
|
||||
expect(spawnArgs).toContain(options.model);
|
||||
expect(spawnArgs).toContain('--language');
|
||||
expect(spawnArgs).toContain(options.language);
|
||||
expect(spawnArgs).toContain('--temperature');
|
||||
expect(spawnArgs).toContain(options.temperature?.toString());
|
||||
expect(spawnArgs).toContain('--beam-size');
|
||||
expect(spawnArgs).toContain(options.beamSize?.toString());
|
||||
expect(spawnArgs).toContain('--patience');
|
||||
expect(spawnArgs).toContain(options.patience?.toString());
|
||||
expect(spawnArgs).toContain('--device');
|
||||
expect(spawnArgs).toContain(options.device);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Event Handling', () => {
|
||||
test('should emit progress events', async () => {
|
||||
const mockProcess = {
|
||||
stdout: new EventEmitter(),
|
||||
stderr: new EventEmitter(),
|
||||
on: (event: string, cb: (code: number) => void) => {
|
||||
if (event === 'close') setTimeout(() => cb(0), 0);
|
||||
}
|
||||
};
|
||||
spawnMock.mockImplementation(() => mockProcess);
|
||||
|
||||
return new Promise<void>((resolve) => {
|
||||
const progressEvents: any[] = [];
|
||||
speechToText.on('progress', (event) => {
|
||||
progressEvents.push(event);
|
||||
if (progressEvents.length === 2) {
|
||||
expect(progressEvents).toEqual([
|
||||
{ type: 'stdout', data: 'Processing' },
|
||||
{ type: 'stderr', data: 'Loading model' }
|
||||
]);
|
||||
const progressPromise = new Promise<void>((resolve) => {
|
||||
speechToText.on('progress', (progress) => {
|
||||
expect(progress).toEqual({ type: 'stdout', data: 'Processing' });
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
void speechToText.transcribeAudio('/test/audio.wav');
|
||||
|
||||
mockProcess.stdout.emtest('data', Buffer.from('Processing'));
|
||||
mockProcess.stderr.emtest('data', Buffer.from('Loading model'));
|
||||
});
|
||||
const transcribePromise = speechToText.transcribeAudio('/test/audio.wav');
|
||||
mockProcess.stdout.emit('data', Buffer.from('Processing'));
|
||||
await Promise.all([transcribePromise.catch(() => { }), progressPromise]);
|
||||
});
|
||||
|
||||
test('should emit error events', async () => {
|
||||
return new Promise<void>((resolve) => {
|
||||
const errorPromise = new Promise<void>((resolve) => {
|
||||
speechToText.on('error', (error) => {
|
||||
expect(error instanceof Error).toBe(true);
|
||||
expect(error.message).toBe('Test error');
|
||||
resolve();
|
||||
});
|
||||
|
||||
speechToText.emtest('error', new Error('Test error'));
|
||||
});
|
||||
|
||||
speechToText.emit('error', new Error('Test error'));
|
||||
await errorPromise;
|
||||
});
|
||||
});
|
||||
|
||||
describe('Cleanup', () => {
|
||||
test('should stop wake word detection', () => {
|
||||
speechToText.startWakeWordDetection(testAudioDir);
|
||||
speechToText.startWakeWordDetection();
|
||||
speechToText.stopWakeWordDetection();
|
||||
// Verify no more file watching events are processed
|
||||
const testFile = path.join(testAudioDir, 'wake_word_test_123456.wav');
|
||||
let eventEmitted = false;
|
||||
speechToText.on('wake_word', () => {
|
||||
eventEmitted = true;
|
||||
});
|
||||
fs.writeFileSync(testFile, 'test audio content');
|
||||
expect(eventEmitted).toBe(false);
|
||||
expect(mockProcess.kill.mock.calls.length).toBe(1);
|
||||
});
|
||||
|
||||
test('should clean up resources on shutdown', async () => {
|
||||
await speechToText.initialize();
|
||||
const shutdownSpy = spyOn(speechToText, 'shutdown');
|
||||
await speechToText.shutdown();
|
||||
expect(shutdownSpy).toHaveBeenCalled();
|
||||
expect(mockProcess.kill.mock.calls.length).toBe(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,120 +1,177 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { jest, describe, it, expect, beforeEach, afterEach } from '@jest/globals';
|
||||
import { HassWebSocketClient } from '../../src/websocket/client.js';
|
||||
import WebSocket from 'ws';
|
||||
import { EventEmitter } from 'events';
|
||||
import * as HomeAssistant from '../../src/types/hass.js';
|
||||
|
||||
// Mock WebSocket
|
||||
// // jest.mock('ws');
|
||||
import { describe, expect, test, beforeEach, afterEach, mock } from "bun:test";
|
||||
import { EventEmitter } from "events";
|
||||
import { HassWebSocketClient } from "../../src/websocket/client";
|
||||
import type { MessageEvent, ErrorEvent } from "ws";
|
||||
import { Mock, fn as jestMock } from 'jest-mock';
|
||||
import { expect as jestExpect } from '@jest/globals';
|
||||
|
||||
describe('WebSocket Event Handling', () => {
|
||||
let client: HassWebSocketClient;
|
||||
let mockWebSocket: jest.Mocked<WebSocket>;
|
||||
let mockWebSocket: any;
|
||||
let onOpenCallback: () => void;
|
||||
let onCloseCallback: () => void;
|
||||
let onErrorCallback: (event: any) => void;
|
||||
let onMessageCallback: (event: any) => void;
|
||||
let eventEmitter: EventEmitter;
|
||||
|
||||
beforeEach(() => {
|
||||
// Clear all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Create event emitter for mocking WebSocket events
|
||||
eventEmitter = new EventEmitter();
|
||||
|
||||
// Create mock WebSocket instance
|
||||
// Initialize callbacks first
|
||||
onOpenCallback = () => { };
|
||||
onCloseCallback = () => { };
|
||||
onErrorCallback = () => { };
|
||||
onMessageCallback = () => { };
|
||||
|
||||
mockWebSocket = {
|
||||
on: jest.fn((event: string, listener: (...args: any[]) => void) => {
|
||||
eventEmitter.on(event, listener);
|
||||
return mockWebSocket;
|
||||
}),
|
||||
send: mock(),
|
||||
close: mock(),
|
||||
readyState: WebSocket.OPEN,
|
||||
removeAllListeners: mock(),
|
||||
// Add required WebSocket properties
|
||||
binaryType: 'arraybuffer',
|
||||
bufferedAmount: 0,
|
||||
extensions: '',
|
||||
protocol: '',
|
||||
url: 'ws://test.com',
|
||||
isPaused: () => false,
|
||||
ping: mock(),
|
||||
pong: mock(),
|
||||
terminate: mock()
|
||||
} as unknown as jest.Mocked<WebSocket>;
|
||||
readyState: 1,
|
||||
OPEN: 1,
|
||||
onopen: null,
|
||||
onclose: null,
|
||||
onerror: null,
|
||||
onmessage: null
|
||||
};
|
||||
|
||||
// Mock WebSocket constructor
|
||||
(WebSocket as unknown as jest.Mock).mockImplementation(() => mockWebSocket);
|
||||
// Define setters that store the callbacks
|
||||
Object.defineProperties(mockWebSocket, {
|
||||
onopen: {
|
||||
get() { return onOpenCallback; },
|
||||
set(callback: () => void) { onOpenCallback = callback; }
|
||||
},
|
||||
onclose: {
|
||||
get() { return onCloseCallback; },
|
||||
set(callback: () => void) { onCloseCallback = callback; }
|
||||
},
|
||||
onerror: {
|
||||
get() { return onErrorCallback; },
|
||||
set(callback: (event: any) => void) { onErrorCallback = callback; }
|
||||
},
|
||||
onmessage: {
|
||||
get() { return onMessageCallback; },
|
||||
set(callback: (event: any) => void) { onMessageCallback = callback; }
|
||||
}
|
||||
});
|
||||
|
||||
// Create client instance
|
||||
client = new HassWebSocketClient('ws://test.com', 'test-token');
|
||||
// @ts-expect-error - Mock WebSocket implementation
|
||||
global.WebSocket = mock(() => mockWebSocket);
|
||||
|
||||
client = new HassWebSocketClient('ws://localhost:8123/api/websocket', 'test-token');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (eventEmitter) {
|
||||
eventEmitter.removeAllListeners();
|
||||
}
|
||||
if (client) {
|
||||
client.disconnect();
|
||||
}
|
||||
});
|
||||
|
||||
test('should handle connection events', () => {
|
||||
// Simulate open event
|
||||
eventEmitter.emtest('open');
|
||||
|
||||
// Verify authentication message was sent
|
||||
expect(mockWebSocket.send).toHaveBeenCalledWith(
|
||||
expect.stringContaining('"type":"auth"')
|
||||
);
|
||||
test('should handle connection events', async () => {
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
await connectPromise;
|
||||
expect(client.isConnected()).toBe(true);
|
||||
});
|
||||
|
||||
test('should handle authentication response', () => {
|
||||
// Simulate auth_ok message
|
||||
eventEmitter.emtest('message', JSON.stringify({ type: 'auth_ok' }));
|
||||
test('should handle authentication response', async () => {
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
|
||||
// Verify client is ready for commands
|
||||
expect(mockWebSocket.readyState).toBe(WebSocket.OPEN);
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_required'
|
||||
})
|
||||
});
|
||||
|
||||
test('should handle auth failure', () => {
|
||||
// Simulate auth_invalid message
|
||||
eventEmitter.emtest('message', JSON.stringify({
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_ok'
|
||||
})
|
||||
});
|
||||
|
||||
await connectPromise;
|
||||
expect(client.isAuthenticated()).toBe(true);
|
||||
});
|
||||
|
||||
test('should handle auth failure', async () => {
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_required'
|
||||
})
|
||||
});
|
||||
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_invalid',
|
||||
message: 'Invalid token'
|
||||
}));
|
||||
|
||||
// Verify client attempts to close connection
|
||||
expect(mockWebSocket.close).toHaveBeenCalled();
|
||||
message: 'Invalid password'
|
||||
})
|
||||
});
|
||||
|
||||
test('should handle connection errors', () => {
|
||||
// Create error spy
|
||||
const errorSpy = mock();
|
||||
client.on('error', errorSpy);
|
||||
|
||||
// Simulate error
|
||||
const testError = new Error('Test error');
|
||||
eventEmitter.emtest('error', testError);
|
||||
|
||||
// Verify error was handled
|
||||
expect(errorSpy).toHaveBeenCalledWith(testError);
|
||||
await expect(connectPromise).rejects.toThrow('Authentication failed');
|
||||
expect(client.isAuthenticated()).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle disconnection', () => {
|
||||
// Create close spy
|
||||
const closeSpy = mock();
|
||||
client.on('close', closeSpy);
|
||||
|
||||
// Simulate close
|
||||
eventEmitter.emtest('close');
|
||||
|
||||
// Verify close was handled
|
||||
expect(closeSpy).toHaveBeenCalled();
|
||||
test('should handle connection errors', async () => {
|
||||
const errorPromise = new Promise((resolve) => {
|
||||
client.once('error', resolve);
|
||||
});
|
||||
|
||||
test('should handle event messages', () => {
|
||||
// Create event spy
|
||||
const eventSpy = mock();
|
||||
client.on('event', eventSpy);
|
||||
const connectPromise = client.connect().catch(() => { /* Expected error */ });
|
||||
onOpenCallback();
|
||||
|
||||
const errorEvent = new Error('Connection failed');
|
||||
onErrorCallback({ error: errorEvent });
|
||||
|
||||
const error = await errorPromise;
|
||||
expect(error instanceof Error).toBe(true);
|
||||
expect((error as Error).message).toBe('Connection failed');
|
||||
});
|
||||
|
||||
test('should handle disconnection', async () => {
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
await connectPromise;
|
||||
|
||||
const disconnectPromise = new Promise((resolve) => {
|
||||
client.on('disconnected', resolve);
|
||||
});
|
||||
|
||||
onCloseCallback();
|
||||
|
||||
await disconnectPromise;
|
||||
expect(client.isConnected()).toBe(false);
|
||||
});
|
||||
|
||||
test('should handle event messages', async () => {
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_required'
|
||||
})
|
||||
});
|
||||
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_ok'
|
||||
})
|
||||
});
|
||||
|
||||
await connectPromise;
|
||||
|
||||
const eventPromise = new Promise((resolve) => {
|
||||
client.on('state_changed', resolve);
|
||||
});
|
||||
|
||||
// Simulate event message
|
||||
const eventData = {
|
||||
id: 1,
|
||||
type: 'event',
|
||||
event: {
|
||||
event_type: 'state_changed',
|
||||
@@ -124,217 +181,63 @@ describe('WebSocket Event Handling', () => {
|
||||
}
|
||||
}
|
||||
};
|
||||
eventEmitter.emtest('message', JSON.stringify(eventData));
|
||||
|
||||
// Verify event was handled
|
||||
expect(eventSpy).toHaveBeenCalledWith(eventData.event);
|
||||
onMessageCallback({
|
||||
data: JSON.stringify(eventData)
|
||||
});
|
||||
|
||||
describe('Connection Events', () => {
|
||||
test('should handle successful connection', (done) => {
|
||||
client.on('open', () => {
|
||||
expect(mockWebSocket.send).toHaveBeenCalled();
|
||||
done();
|
||||
});
|
||||
|
||||
eventEmitter.emtest('open');
|
||||
});
|
||||
|
||||
test('should handle connection errors', (done) => {
|
||||
const error = new Error('Connection failed');
|
||||
client.on('error', (err: Error) => {
|
||||
expect(err).toBe(error);
|
||||
done();
|
||||
});
|
||||
|
||||
eventEmitter.emtest('error', error);
|
||||
});
|
||||
|
||||
test('should handle connection close', (done) => {
|
||||
client.on('disconnected', () => {
|
||||
expect(mockWebSocket.close).toHaveBeenCalled();
|
||||
done();
|
||||
});
|
||||
|
||||
eventEmitter.emtest('close');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Authentication', () => {
|
||||
test('should send authentication message on connect', () => {
|
||||
const authMessage: HomeAssistant.AuthMessage = {
|
||||
type: 'auth',
|
||||
access_token: 'test_token'
|
||||
};
|
||||
|
||||
client.connect();
|
||||
expect(mockWebSocket.send).toHaveBeenCalledWith(JSON.stringify(authMessage));
|
||||
});
|
||||
|
||||
test('should handle successful authentication', (done) => {
|
||||
client.on('auth_ok', () => {
|
||||
done();
|
||||
});
|
||||
|
||||
client.connect();
|
||||
eventEmitter.emtest('message', JSON.stringify({ type: 'auth_ok' }));
|
||||
});
|
||||
|
||||
test('should handle authentication failure', (done) => {
|
||||
client.on('auth_invalid', () => {
|
||||
done();
|
||||
});
|
||||
|
||||
client.connect();
|
||||
eventEmitter.emtest('message', JSON.stringify({ type: 'auth_invalid' }));
|
||||
});
|
||||
});
|
||||
|
||||
describe('Event Subscription', () => {
|
||||
test('should handle state changed events', (done) => {
|
||||
const stateEvent: HomeAssistant.StateChangedEvent = {
|
||||
event_type: 'state_changed',
|
||||
data: {
|
||||
entity_id: 'light.living_room',
|
||||
new_state: {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on',
|
||||
attributes: { brightness: 255 },
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
}
|
||||
},
|
||||
old_state: {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'off',
|
||||
attributes: {},
|
||||
last_changed: '2024-01-01T00:00:00Z',
|
||||
last_updated: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '122',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
}
|
||||
}
|
||||
},
|
||||
origin: 'LOCAL',
|
||||
time_fired: '2024-01-01T00:00:00Z',
|
||||
context: {
|
||||
id: '123',
|
||||
parent_id: null,
|
||||
user_id: null
|
||||
}
|
||||
};
|
||||
|
||||
client.on('event', (event) => {
|
||||
expect(event.data.entity_id).toBe('light.living_room');
|
||||
expect(event.data.new_state.state).toBe('on');
|
||||
expect(event.data.old_state.state).toBe('off');
|
||||
done();
|
||||
});
|
||||
|
||||
eventEmitter.emtest('message', JSON.stringify({ type: 'event', event: stateEvent }));
|
||||
const receivedEvent = await eventPromise;
|
||||
expect(receivedEvent).toEqual(eventData.event.data);
|
||||
});
|
||||
|
||||
test('should subscribe to specific events', async () => {
|
||||
const subscriptionId = 1;
|
||||
const callback = mock();
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
|
||||
// Mock successful subscription
|
||||
const subscribePromise = client.subscribeEvents('state_changed', callback);
|
||||
eventEmitter.emtest('message', JSON.stringify({
|
||||
id: 1,
|
||||
type: 'result',
|
||||
success: true
|
||||
}));
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_required'
|
||||
})
|
||||
});
|
||||
|
||||
await expect(subscribePromise).resolves.toBe(subscriptionId);
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_ok'
|
||||
})
|
||||
});
|
||||
|
||||
// Test event handling
|
||||
const eventData = {
|
||||
entity_id: 'light.living_room',
|
||||
state: 'on'
|
||||
};
|
||||
eventEmitter.emtest('message', JSON.stringify({
|
||||
type: 'event',
|
||||
event: {
|
||||
event_type: 'state_changed',
|
||||
data: eventData
|
||||
}
|
||||
}));
|
||||
await connectPromise;
|
||||
|
||||
expect(callback).toHaveBeenCalledWith(eventData);
|
||||
const subscriptionId = await client.subscribeEvents('state_changed', (data) => {
|
||||
// Empty callback for type satisfaction
|
||||
});
|
||||
expect(mockWebSocket.send).toHaveBeenCalled();
|
||||
expect(subscriptionId).toBeDefined();
|
||||
});
|
||||
|
||||
test('should unsubscribe from events', async () => {
|
||||
// First subscribe
|
||||
const subscriptionId = await client.subscribeEvents('state_changed', () => { });
|
||||
const connectPromise = client.connect();
|
||||
onOpenCallback();
|
||||
|
||||
// Then unsubscribe
|
||||
const unsubscribePromise = client.unsubscribeEvents(subscriptionId);
|
||||
eventEmitter.emtest('message', JSON.stringify({
|
||||
id: 2,
|
||||
type: 'result',
|
||||
success: true
|
||||
}));
|
||||
|
||||
await expect(unsubscribePromise).resolves.toBeUndefined();
|
||||
});
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_required'
|
||||
})
|
||||
});
|
||||
|
||||
describe('Message Handling', () => {
|
||||
test('should handle malformed messages', (done) => {
|
||||
client.on('error', (error: Error) => {
|
||||
expect(error.message).toContain('Unexpected token');
|
||||
done();
|
||||
onMessageCallback({
|
||||
data: JSON.stringify({
|
||||
type: 'auth_ok'
|
||||
})
|
||||
});
|
||||
|
||||
eventEmitter.emtest('message', 'invalid json');
|
||||
});
|
||||
await connectPromise;
|
||||
|
||||
test('should handle unknown message types', (done) => {
|
||||
const unknownMessage = {
|
||||
type: 'unknown_type',
|
||||
data: {}
|
||||
};
|
||||
|
||||
client.on('error', (error: Error) => {
|
||||
expect(error.message).toContain('Unknown message type');
|
||||
done();
|
||||
const subscriptionId = await client.subscribeEvents('state_changed', (data) => {
|
||||
// Empty callback for type satisfaction
|
||||
});
|
||||
await client.unsubscribeEvents(subscriptionId);
|
||||
|
||||
eventEmitter.emtest('message', JSON.stringify(unknownMessage));
|
||||
});
|
||||
});
|
||||
|
||||
describe('Reconnection', () => {
|
||||
test('should attempt to reconnect on connection loss', (done) => {
|
||||
let reconnectAttempts = 0;
|
||||
client.on('disconnected', () => {
|
||||
reconnectAttempts++;
|
||||
if (reconnectAttempts === 1) {
|
||||
expect(WebSocket).toHaveBeenCalledTimes(2);
|
||||
done();
|
||||
}
|
||||
});
|
||||
|
||||
eventEmitter.emtest('close');
|
||||
});
|
||||
|
||||
test('should re-authenticate after reconnection', (done) => {
|
||||
client.connect();
|
||||
|
||||
client.on('auth_ok', () => {
|
||||
done();
|
||||
});
|
||||
|
||||
eventEmitter.emtest('close');
|
||||
eventEmitter.emtest('open');
|
||||
eventEmitter.emtest('message', JSON.stringify({ type: 'auth_ok' }));
|
||||
});
|
||||
expect(mockWebSocket.send).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
604
bun.lock
604
bun.lock
@@ -1,604 +0,0 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"@elysiajs/cors": "^1.2.0",
|
||||
"@elysiajs/swagger": "^1.2.0",
|
||||
"@types/jsonwebtoken": "^9.0.5",
|
||||
"@types/node": "^20.11.24",
|
||||
"@types/sanitize-html": "^2.9.5",
|
||||
"@types/ws": "^8.5.10",
|
||||
"@xmldom/xmldom": "^0.9.7",
|
||||
"dotenv": "^16.4.5",
|
||||
"elysia": "^1.2.11",
|
||||
"helmet": "^7.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"node-fetch": "^3.3.2",
|
||||
"openai": "^4.82.0",
|
||||
"sanitize-html": "^2.11.0",
|
||||
"typescript": "^5.3.3",
|
||||
"winston": "^3.11.0",
|
||||
"winston-daily-rotate-file": "^5.0.0",
|
||||
"ws": "^8.16.0",
|
||||
"zod": "^3.22.4",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.1.0",
|
||||
"@typescript-eslint/parser": "^7.1.0",
|
||||
"bun-types": "^1.2.2",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-prettier": "^5.1.3",
|
||||
"husky": "^9.0.11",
|
||||
"prettier": "^3.2.5",
|
||||
"supertest": "^6.3.3",
|
||||
"uuid": "^11.0.5",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@colors/colors": ["@colors/colors@1.6.0", "", {}, "sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA=="],
|
||||
|
||||
"@dabh/diagnostics": ["@dabh/diagnostics@2.0.3", "", { "dependencies": { "colorspace": "1.1.x", "enabled": "2.0.x", "kuler": "^2.0.0" } }, "sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA=="],
|
||||
|
||||
"@elysiajs/cors": ["@elysiajs/cors@1.2.0", "", { "peerDependencies": { "elysia": ">= 1.2.0" } }, "sha512-qsJwDAg6WfdQRMfj6uSMcDPSpXvm/zQFeAX1uuJXhIgazH8itSfcDxcH9pMuXVRX1yQNi2pPwNQLJmAcw5mzvw=="],
|
||||
|
||||
"@elysiajs/swagger": ["@elysiajs/swagger@1.2.0", "", { "dependencies": { "@scalar/themes": "^0.9.52", "@scalar/types": "^0.0.12", "openapi-types": "^12.1.3", "pathe": "^1.1.2" }, "peerDependencies": { "elysia": ">= 1.2.0" } }, "sha512-OPx93DP6rM2VHjA3D44Xiz5MYm9AYlO2NGWPsnSsdyvaOCiL9wJj529583h7arX4iIEYE5LiLB0/A45unqbopw=="],
|
||||
|
||||
"@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.4.1", "", { "dependencies": { "eslint-visitor-keys": "^3.4.3" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA=="],
|
||||
|
||||
"@eslint-community/regexpp": ["@eslint-community/regexpp@4.12.1", "", {}, "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ=="],
|
||||
|
||||
"@eslint/eslintrc": ["@eslint/eslintrc@2.1.4", "", { "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", "espree": "^9.6.0", "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.0", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" } }, "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ=="],
|
||||
|
||||
"@eslint/js": ["@eslint/js@8.57.1", "", {}, "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q=="],
|
||||
|
||||
"@humanwhocodes/config-array": ["@humanwhocodes/config-array@0.13.0", "", { "dependencies": { "@humanwhocodes/object-schema": "^2.0.3", "debug": "^4.3.1", "minimatch": "^3.0.5" } }, "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw=="],
|
||||
|
||||
"@humanwhocodes/module-importer": ["@humanwhocodes/module-importer@1.0.1", "", {}, "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA=="],
|
||||
|
||||
"@humanwhocodes/object-schema": ["@humanwhocodes/object-schema@2.0.3", "", {}, "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA=="],
|
||||
|
||||
"@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="],
|
||||
|
||||
"@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="],
|
||||
|
||||
"@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="],
|
||||
|
||||
"@pkgr/core": ["@pkgr/core@0.1.1", "", {}, "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA=="],
|
||||
|
||||
"@scalar/openapi-types": ["@scalar/openapi-types@0.1.1", "", {}, "sha512-NMy3QNk6ytcCoPUGJH0t4NNr36OWXgZhA3ormr3TvhX1NDgoF95wFyodGVH8xiHeUyn2/FxtETm8UBLbB5xEmg=="],
|
||||
|
||||
"@scalar/themes": ["@scalar/themes@0.9.64", "", { "dependencies": { "@scalar/types": "0.0.30" } }, "sha512-hr9bCTdH9M/N8w31Td+IJVtbH+v0Ej31myW8QWhUfwYZe5qS815Tl1mp+qWFaObstOw5VX3zOtiZuuhF1zMIyw=="],
|
||||
|
||||
"@scalar/types": ["@scalar/types@0.0.12", "", { "dependencies": { "@scalar/openapi-types": "0.1.1", "@unhead/schema": "^1.9.5" } }, "sha512-XYZ36lSEx87i4gDqopQlGCOkdIITHHEvgkuJFrXFATQs9zHARop0PN0g4RZYWj+ZpCUclOcaOjbCt8JGe22mnQ=="],
|
||||
|
||||
"@sinclair/typebox": ["@sinclair/typebox@0.34.15", "", {}, "sha512-xeIzl3h1Znn9w/LTITqpiwag0gXjA+ldi2ZkXIBxGEppGCW211Tza+eL6D4pKqs10bj5z2umBWk5WL6spQ2OCQ=="],
|
||||
|
||||
"@types/jsonwebtoken": ["@types/jsonwebtoken@9.0.8", "", { "dependencies": { "@types/ms": "*", "@types/node": "*" } }, "sha512-7fx54m60nLFUVYlxAB1xpe9CBWX2vSrk50Y6ogRJ1v5xxtba7qXTg5BgYDN5dq+yuQQ9HaVlHJyAAt1/mxryFg=="],
|
||||
|
||||
"@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="],
|
||||
|
||||
"@types/node": ["@types/node@20.17.17", "", { "dependencies": { "undici-types": "~6.19.2" } }, "sha512-/WndGO4kIfMicEQLTi/mDANUu/iVUhT7KboZPdEqqHQ4aTS+3qT3U5gIqWDFV+XouorjfgGqvKILJeHhuQgFYg=="],
|
||||
|
||||
"@types/node-fetch": ["@types/node-fetch@2.6.12", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.0" } }, "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA=="],
|
||||
|
||||
"@types/sanitize-html": ["@types/sanitize-html@2.13.0", "", { "dependencies": { "htmlparser2": "^8.0.0" } }, "sha512-X31WxbvW9TjIhZZNyNBZ/p5ax4ti7qsNDBDEnH4zAgmEh35YnFD1UiS6z9Cd34kKm0LslFW0KPmTQzu/oGtsqQ=="],
|
||||
|
||||
"@types/triple-beam": ["@types/triple-beam@1.3.5", "", {}, "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw=="],
|
||||
|
||||
"@types/uuid": ["@types/uuid@10.0.0", "", {}, "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ=="],
|
||||
|
||||
"@types/ws": ["@types/ws@8.5.14", "", { "dependencies": { "@types/node": "*" } }, "sha512-bd/YFLW+URhBzMXurx7lWByOu+xzU9+kb3RboOteXYDfW+tr+JZa99OyNmPINEGB/ahzKrEuc8rcv4gnpJmxTw=="],
|
||||
|
||||
"@typescript-eslint/eslint-plugin": ["@typescript-eslint/eslint-plugin@7.18.0", "", { "dependencies": { "@eslint-community/regexpp": "^4.10.0", "@typescript-eslint/scope-manager": "7.18.0", "@typescript-eslint/type-utils": "7.18.0", "@typescript-eslint/utils": "7.18.0", "@typescript-eslint/visitor-keys": "7.18.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", "ts-api-utils": "^1.3.0" }, "peerDependencies": { "@typescript-eslint/parser": "^7.0.0", "eslint": "^8.56.0" } }, "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw=="],
|
||||
|
||||
"@typescript-eslint/parser": ["@typescript-eslint/parser@7.18.0", "", { "dependencies": { "@typescript-eslint/scope-manager": "7.18.0", "@typescript-eslint/types": "7.18.0", "@typescript-eslint/typescript-estree": "7.18.0", "@typescript-eslint/visitor-keys": "7.18.0", "debug": "^4.3.4" }, "peerDependencies": { "eslint": "^8.56.0" } }, "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg=="],
|
||||
|
||||
"@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@7.18.0", "", { "dependencies": { "@typescript-eslint/types": "7.18.0", "@typescript-eslint/visitor-keys": "7.18.0" } }, "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA=="],
|
||||
|
||||
"@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@7.18.0", "", { "dependencies": { "@typescript-eslint/typescript-estree": "7.18.0", "@typescript-eslint/utils": "7.18.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, "peerDependencies": { "eslint": "^8.56.0" } }, "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA=="],
|
||||
|
||||
"@typescript-eslint/types": ["@typescript-eslint/types@7.18.0", "", {}, "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ=="],
|
||||
|
||||
"@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@7.18.0", "", { "dependencies": { "@typescript-eslint/types": "7.18.0", "@typescript-eslint/visitor-keys": "7.18.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", "minimatch": "^9.0.4", "semver": "^7.6.0", "ts-api-utils": "^1.3.0" } }, "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA=="],
|
||||
|
||||
"@typescript-eslint/utils": ["@typescript-eslint/utils@7.18.0", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "@typescript-eslint/scope-manager": "7.18.0", "@typescript-eslint/types": "7.18.0", "@typescript-eslint/typescript-estree": "7.18.0" }, "peerDependencies": { "eslint": "^8.56.0" } }, "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw=="],
|
||||
|
||||
"@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@7.18.0", "", { "dependencies": { "@typescript-eslint/types": "7.18.0", "eslint-visitor-keys": "^3.4.3" } }, "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg=="],
|
||||
|
||||
"@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="],
|
||||
|
||||
"@unhead/schema": ["@unhead/schema@1.11.18", "", { "dependencies": { "hookable": "^5.5.3", "zhead": "^2.2.4" } }, "sha512-a3TA/OJCRdfbFhcA3Hq24k1ZU1o9szicESrw8DZcGyQFacHnh84mVgnyqSkMnwgCmfN4kvjSiTBlLEHS6+wATw=="],
|
||||
|
||||
"@xmldom/xmldom": ["@xmldom/xmldom@0.9.7", "", {}, "sha512-syvR8iIJjpTZ/stv7l89UAViwGFh6lbheeOaqSxkYx9YNmIVvPTRH+CT/fpykFtUx5N+8eSMDRvggF9J8GEPzQ=="],
|
||||
|
||||
"abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="],
|
||||
|
||||
"acorn": ["acorn@8.14.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA=="],
|
||||
|
||||
"acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="],
|
||||
|
||||
"agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="],
|
||||
|
||||
"ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="],
|
||||
|
||||
"ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||
|
||||
"argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="],
|
||||
|
||||
"array-union": ["array-union@2.1.0", "", {}, "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="],
|
||||
|
||||
"asap": ["asap@2.0.6", "", {}, "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA=="],
|
||||
|
||||
"async": ["async@3.2.6", "", {}, "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA=="],
|
||||
|
||||
"asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="],
|
||||
|
||||
"balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="],
|
||||
|
||||
"brace-expansion": ["brace-expansion@1.1.11", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA=="],
|
||||
|
||||
"braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="],
|
||||
|
||||
"buffer-equal-constant-time": ["buffer-equal-constant-time@1.0.1", "", {}, "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA=="],
|
||||
|
||||
"bun-types": ["bun-types@1.2.2", "", { "dependencies": { "@types/node": "*", "@types/ws": "~8.5.10" } }, "sha512-RCbMH5elr9gjgDGDhkTTugA21XtJAy/9jkKe/G3WR2q17VPGhcquf9Sir6uay9iW+7P/BV0CAHA1XlHXMAVKHg=="],
|
||||
|
||||
"call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.1", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g=="],
|
||||
|
||||
"call-bound": ["call-bound@1.0.3", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "get-intrinsic": "^1.2.6" } }, "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA=="],
|
||||
|
||||
"callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="],
|
||||
|
||||
"chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
|
||||
|
||||
"color": ["color@3.2.1", "", { "dependencies": { "color-convert": "^1.9.3", "color-string": "^1.6.0" } }, "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA=="],
|
||||
|
||||
"color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="],
|
||||
|
||||
"color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="],
|
||||
|
||||
"color-string": ["color-string@1.9.1", "", { "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg=="],
|
||||
|
||||
"colorspace": ["colorspace@1.1.4", "", { "dependencies": { "color": "^3.1.3", "text-hex": "1.0.x" } }, "sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w=="],
|
||||
|
||||
"combined-stream": ["combined-stream@1.0.8", "", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="],
|
||||
|
||||
"component-emitter": ["component-emitter@1.3.1", "", {}, "sha512-T0+barUSQRTUQASh8bx02dl+DhF54GtIDY13Y3m9oWTklKbb3Wv974meRpeZ3lp1JpLVECWWNHC4vaG2XHXouQ=="],
|
||||
|
||||
"concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="],
|
||||
|
||||
"cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="],
|
||||
|
||||
"cookiejar": ["cookiejar@2.1.4", "", {}, "sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw=="],
|
||||
|
||||
"cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="],
|
||||
|
||||
"data-uri-to-buffer": ["data-uri-to-buffer@4.0.1", "", {}, "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A=="],
|
||||
|
||||
"debug": ["debug@4.4.0", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA=="],
|
||||
|
||||
"deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="],
|
||||
|
||||
"deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="],
|
||||
|
||||
"delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="],
|
||||
|
||||
"dezalgo": ["dezalgo@1.0.4", "", { "dependencies": { "asap": "^2.0.0", "wrappy": "1" } }, "sha512-rXSP0bf+5n0Qonsb+SVVfNfIsimO4HEtmnIpPHY8Q1UCzKlQrDMfdobr8nJOOsRgWCyMRqeSBQzmWUMq7zvVig=="],
|
||||
|
||||
"dir-glob": ["dir-glob@3.0.1", "", { "dependencies": { "path-type": "^4.0.0" } }, "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA=="],
|
||||
|
||||
"doctrine": ["doctrine@3.0.0", "", { "dependencies": { "esutils": "^2.0.2" } }, "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w=="],
|
||||
|
||||
"dom-serializer": ["dom-serializer@2.0.0", "", { "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", "entities": "^4.2.0" } }, "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg=="],
|
||||
|
||||
"domelementtype": ["domelementtype@2.3.0", "", {}, "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw=="],
|
||||
|
||||
"domhandler": ["domhandler@5.0.3", "", { "dependencies": { "domelementtype": "^2.3.0" } }, "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w=="],
|
||||
|
||||
"domutils": ["domutils@3.2.2", "", { "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", "domhandler": "^5.0.3" } }, "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw=="],
|
||||
|
||||
"dotenv": ["dotenv@16.4.7", "", {}, "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ=="],
|
||||
|
||||
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
|
||||
|
||||
"ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="],
|
||||
|
||||
"elysia": ["elysia@1.2.12", "", { "dependencies": { "@sinclair/typebox": "^0.34.15", "cookie": "^1.0.2", "memoirist": "^0.3.0", "openapi-types": "^12.1.3" }, "peerDependencies": { "typescript": ">= 5.0.0" }, "optionalPeers": ["typescript"] }, "sha512-X1bZo09qe8/Poa/5tz08Y+sE/77B/wLwnA5xDDENU3FCrsUtYJuBVcy6BPXGRCgnJ1fPQpc0Ov2ZU5MYJXluTg=="],
|
||||
|
||||
"enabled": ["enabled@2.0.0", "", {}, "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ=="],
|
||||
|
||||
"entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="],
|
||||
|
||||
"es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="],
|
||||
|
||||
"es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="],
|
||||
|
||||
"es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="],
|
||||
|
||||
"escape-string-regexp": ["escape-string-regexp@4.0.0", "", {}, "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="],
|
||||
|
||||
"eslint": ["eslint@8.57.1", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.4", "@eslint/js": "8.57.1", "@humanwhocodes/config-array": "^0.13.0", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", "@ungap/structured-clone": "^1.2.0", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", "eslint-scope": "^7.2.2", "eslint-visitor-keys": "^3.4.3", "espree": "^9.6.1", "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "find-up": "^5.0.0", "glob-parent": "^6.0.2", "globals": "^13.19.0", "graphemer": "^1.4.0", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", "optionator": "^0.9.3", "strip-ansi": "^6.0.1", "text-table": "^0.2.0" }, "bin": { "eslint": "bin/eslint.js" } }, "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA=="],
|
||||
|
||||
"eslint-config-prettier": ["eslint-config-prettier@9.1.0", "", { "peerDependencies": { "eslint": ">=7.0.0" }, "bin": { "eslint-config-prettier": "bin/cli.js" } }, "sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw=="],
|
||||
|
||||
"eslint-plugin-prettier": ["eslint-plugin-prettier@5.2.3", "", { "dependencies": { "prettier-linter-helpers": "^1.0.0", "synckit": "^0.9.1" }, "peerDependencies": { "@types/eslint": ">=8.0.0", "eslint": ">=8.0.0", "eslint-config-prettier": "*", "prettier": ">=3.0.0" }, "optionalPeers": ["@types/eslint", "eslint-config-prettier"] }, "sha512-qJ+y0FfCp/mQYQ/vWQ3s7eUlFEL4PyKfAJxsnYTJ4YT73nsJBWqmEpFryxV9OeUiqmsTsYJ5Y+KDNaeP31wrRw=="],
|
||||
|
||||
"eslint-scope": ["eslint-scope@7.2.2", "", { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg=="],
|
||||
|
||||
"eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="],
|
||||
|
||||
"espree": ["espree@9.6.1", "", { "dependencies": { "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^3.4.1" } }, "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ=="],
|
||||
|
||||
"esquery": ["esquery@1.6.0", "", { "dependencies": { "estraverse": "^5.1.0" } }, "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg=="],
|
||||
|
||||
"esrecurse": ["esrecurse@4.3.0", "", { "dependencies": { "estraverse": "^5.2.0" } }, "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag=="],
|
||||
|
||||
"estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="],
|
||||
|
||||
"esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="],
|
||||
|
||||
"event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="],
|
||||
|
||||
"fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
|
||||
|
||||
"fast-diff": ["fast-diff@1.3.0", "", {}, "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw=="],
|
||||
|
||||
"fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="],
|
||||
|
||||
"fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="],
|
||||
|
||||
"fast-levenshtein": ["fast-levenshtein@2.0.6", "", {}, "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="],
|
||||
|
||||
"fast-safe-stringify": ["fast-safe-stringify@2.1.1", "", {}, "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA=="],
|
||||
|
||||
"fastq": ["fastq@1.19.0", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA=="],
|
||||
|
||||
"fecha": ["fecha@4.2.3", "", {}, "sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw=="],
|
||||
|
||||
"fetch-blob": ["fetch-blob@3.2.0", "", { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ=="],
|
||||
|
||||
"file-entry-cache": ["file-entry-cache@6.0.1", "", { "dependencies": { "flat-cache": "^3.0.4" } }, "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg=="],
|
||||
|
||||
"file-stream-rotator": ["file-stream-rotator@0.6.1", "", { "dependencies": { "moment": "^2.29.1" } }, "sha512-u+dBid4PvZw17PmDeRcNOtCP9CCK/9lRN2w+r1xIS7yOL9JFrIBKTvrYsxT4P0pGtThYTn++QS5ChHaUov3+zQ=="],
|
||||
|
||||
"fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="],
|
||||
|
||||
"find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="],
|
||||
|
||||
"flat-cache": ["flat-cache@3.2.0", "", { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.3", "rimraf": "^3.0.2" } }, "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw=="],
|
||||
|
||||
"flatted": ["flatted@3.3.2", "", {}, "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA=="],
|
||||
|
||||
"fn.name": ["fn.name@1.1.0", "", {}, "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw=="],
|
||||
|
||||
"form-data": ["form-data@4.0.1", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "mime-types": "^2.1.12" } }, "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw=="],
|
||||
|
||||
"form-data-encoder": ["form-data-encoder@1.7.2", "", {}, "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="],
|
||||
|
||||
"formdata-node": ["formdata-node@4.4.1", "", { "dependencies": { "node-domexception": "1.0.0", "web-streams-polyfill": "4.0.0-beta.3" } }, "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ=="],
|
||||
|
||||
"formdata-polyfill": ["formdata-polyfill@4.0.10", "", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="],
|
||||
|
||||
"formidable": ["formidable@2.1.2", "", { "dependencies": { "dezalgo": "^1.0.4", "hexoid": "^1.0.0", "once": "^1.4.0", "qs": "^6.11.0" } }, "sha512-CM3GuJ57US06mlpQ47YcunuUZ9jpm8Vx+P2CGt2j7HpgkKZO/DJYQ0Bobim8G6PFQmK5lOqOOdUXboU+h73A4g=="],
|
||||
|
||||
"fs.realpath": ["fs.realpath@1.0.0", "", {}, "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"get-intrinsic": ["get-intrinsic@1.2.7", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "function-bind": "^1.1.2", "get-proto": "^1.0.0", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA=="],
|
||||
|
||||
"get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="],
|
||||
|
||||
"glob": ["glob@7.2.3", "", { "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" } }, "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q=="],
|
||||
|
||||
"glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="],
|
||||
|
||||
"globals": ["globals@13.24.0", "", { "dependencies": { "type-fest": "^0.20.2" } }, "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ=="],
|
||||
|
||||
"globby": ["globby@11.1.0", "", { "dependencies": { "array-union": "^2.1.0", "dir-glob": "^3.0.1", "fast-glob": "^3.2.9", "ignore": "^5.2.0", "merge2": "^1.4.1", "slash": "^3.0.0" } }, "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g=="],
|
||||
|
||||
"gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="],
|
||||
|
||||
"graphemer": ["graphemer@1.4.0", "", {}, "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="],
|
||||
|
||||
"has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="],
|
||||
|
||||
"has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="],
|
||||
|
||||
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
|
||||
|
||||
"helmet": ["helmet@7.2.0", "", {}, "sha512-ZRiwvN089JfMXokizgqEPXsl2Guk094yExfoDXR0cBYWxtBbaSww/w+vT4WEJsBW2iTUi1GgZ6swmoug3Oy4Xw=="],
|
||||
|
||||
"hexoid": ["hexoid@1.0.0", "", {}, "sha512-QFLV0taWQOZtvIRIAdBChesmogZrtuXvVWsFHZTk2SU+anspqZ2vMnoLg7IE1+Uk16N19APic1BuF8bC8c2m5g=="],
|
||||
|
||||
"hookable": ["hookable@5.5.3", "", {}, "sha512-Yc+BQe8SvoXH1643Qez1zqLRmbA5rCL+sSmk6TVos0LWVfNIB7PGncdlId77WzLGSIB5KaWgTaNTs2lNVEI6VQ=="],
|
||||
|
||||
"htmlparser2": ["htmlparser2@8.0.2", "", { "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.3", "domutils": "^3.0.1", "entities": "^4.4.0" } }, "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA=="],
|
||||
|
||||
"humanize-ms": ["humanize-ms@1.2.1", "", { "dependencies": { "ms": "^2.0.0" } }, "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ=="],
|
||||
|
||||
"husky": ["husky@9.1.7", "", { "bin": { "husky": "bin.js" } }, "sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA=="],
|
||||
|
||||
"ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="],
|
||||
|
||||
"import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="],
|
||||
|
||||
"imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="],
|
||||
|
||||
"inflight": ["inflight@1.0.6", "", { "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA=="],
|
||||
|
||||
"inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="],
|
||||
|
||||
"is-arrayish": ["is-arrayish@0.3.2", "", {}, "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="],
|
||||
|
||||
"is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="],
|
||||
|
||||
"is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="],
|
||||
|
||||
"is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="],
|
||||
|
||||
"is-path-inside": ["is-path-inside@3.0.3", "", {}, "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ=="],
|
||||
|
||||
"is-plain-object": ["is-plain-object@5.0.0", "", {}, "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q=="],
|
||||
|
||||
"is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="],
|
||||
|
||||
"isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="],
|
||||
|
||||
"js-yaml": ["js-yaml@4.1.0", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA=="],
|
||||
|
||||
"json-buffer": ["json-buffer@3.0.1", "", {}, "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="],
|
||||
|
||||
"json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
|
||||
|
||||
"json-stable-stringify-without-jsonify": ["json-stable-stringify-without-jsonify@1.0.1", "", {}, "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="],
|
||||
|
||||
"jsonwebtoken": ["jsonwebtoken@9.0.2", "", { "dependencies": { "jws": "^3.2.2", "lodash.includes": "^4.3.0", "lodash.isboolean": "^3.0.3", "lodash.isinteger": "^4.0.4", "lodash.isnumber": "^3.0.3", "lodash.isplainobject": "^4.0.6", "lodash.isstring": "^4.0.1", "lodash.once": "^4.0.0", "ms": "^2.1.1", "semver": "^7.5.4" } }, "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ=="],
|
||||
|
||||
"jwa": ["jwa@1.4.1", "", { "dependencies": { "buffer-equal-constant-time": "1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA=="],
|
||||
|
||||
"jws": ["jws@3.2.2", "", { "dependencies": { "jwa": "^1.4.1", "safe-buffer": "^5.0.1" } }, "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA=="],
|
||||
|
||||
"keyv": ["keyv@4.5.4", "", { "dependencies": { "json-buffer": "3.0.1" } }, "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw=="],
|
||||
|
||||
"kuler": ["kuler@2.0.0", "", {}, "sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A=="],
|
||||
|
||||
"levn": ["levn@0.4.1", "", { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ=="],
|
||||
|
||||
"locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="],
|
||||
|
||||
"lodash.includes": ["lodash.includes@4.3.0", "", {}, "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w=="],
|
||||
|
||||
"lodash.isboolean": ["lodash.isboolean@3.0.3", "", {}, "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg=="],
|
||||
|
||||
"lodash.isinteger": ["lodash.isinteger@4.0.4", "", {}, "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA=="],
|
||||
|
||||
"lodash.isnumber": ["lodash.isnumber@3.0.3", "", {}, "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw=="],
|
||||
|
||||
"lodash.isplainobject": ["lodash.isplainobject@4.0.6", "", {}, "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="],
|
||||
|
||||
"lodash.isstring": ["lodash.isstring@4.0.1", "", {}, "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw=="],
|
||||
|
||||
"lodash.merge": ["lodash.merge@4.6.2", "", {}, "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="],
|
||||
|
||||
"lodash.once": ["lodash.once@4.1.1", "", {}, "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg=="],
|
||||
|
||||
"logform": ["logform@2.7.0", "", { "dependencies": { "@colors/colors": "1.6.0", "@types/triple-beam": "^1.3.2", "fecha": "^4.2.0", "ms": "^2.1.1", "safe-stable-stringify": "^2.3.1", "triple-beam": "^1.3.0" } }, "sha512-TFYA4jnP7PVbmlBIfhlSe+WKxs9dklXMTEGcBCIvLhE/Tn3H6Gk1norupVW7m5Cnd4bLcr08AytbyV/xj7f/kQ=="],
|
||||
|
||||
"math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="],
|
||||
|
||||
"memoirist": ["memoirist@0.3.0", "", {}, "sha512-wR+4chMgVPq+T6OOsk40u9Wlpw1Pjx66NMNiYxCQQ4EUJ7jDs3D9kTCeKdBOkvAiqXlHLVJlvYL01PvIJ1MPNg=="],
|
||||
|
||||
"merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="],
|
||||
|
||||
"methods": ["methods@1.1.2", "", {}, "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w=="],
|
||||
|
||||
"micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="],
|
||||
|
||||
"mime": ["mime@2.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg=="],
|
||||
|
||||
"mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
|
||||
|
||||
"mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="],
|
||||
|
||||
"moment": ["moment@2.30.1", "", {}, "sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how=="],
|
||||
|
||||
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
|
||||
|
||||
"nanoid": ["nanoid@3.3.8", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w=="],
|
||||
|
||||
"natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="],
|
||||
|
||||
"node-domexception": ["node-domexception@1.0.0", "", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="],
|
||||
|
||||
"node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="],
|
||||
|
||||
"object-hash": ["object-hash@3.0.0", "", {}, "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw=="],
|
||||
|
||||
"object-inspect": ["object-inspect@1.13.3", "", {}, "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA=="],
|
||||
|
||||
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
|
||||
|
||||
"one-time": ["one-time@1.0.0", "", { "dependencies": { "fn.name": "1.x.x" } }, "sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g=="],
|
||||
|
||||
"openai": ["openai@4.82.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" }, "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-1bTxOVGZuVGsKKUWbh3BEwX1QxIXUftJv+9COhhGGVDTFwiaOd4gWsMynF2ewj1mg6by3/O+U8+EEHpWRdPaJg=="],
|
||||
|
||||
"openapi-types": ["openapi-types@12.1.3", "", {}, "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw=="],
|
||||
|
||||
"optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="],
|
||||
|
||||
"p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="],
|
||||
|
||||
"p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="],
|
||||
|
||||
"parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="],
|
||||
|
||||
"parse-srcset": ["parse-srcset@1.0.2", "", {}, "sha512-/2qh0lav6CmI15FzA3i/2Bzk2zCgQhGMkvhOhKNcBVQ1ldgpbfiNTVslmooUmWJcADi1f1kIeynbDRVzNlfR6Q=="],
|
||||
|
||||
"path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="],
|
||||
|
||||
"path-is-absolute": ["path-is-absolute@1.0.1", "", {}, "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg=="],
|
||||
|
||||
"path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
|
||||
|
||||
"path-type": ["path-type@4.0.0", "", {}, "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="],
|
||||
|
||||
"pathe": ["pathe@1.1.2", "", {}, "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ=="],
|
||||
|
||||
"picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
|
||||
|
||||
"picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||
|
||||
"postcss": ["postcss@8.5.1", "", { "dependencies": { "nanoid": "^3.3.8", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ=="],
|
||||
|
||||
"prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="],
|
||||
|
||||
"prettier": ["prettier@3.4.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ=="],
|
||||
|
||||
"prettier-linter-helpers": ["prettier-linter-helpers@1.0.0", "", { "dependencies": { "fast-diff": "^1.1.2" } }, "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w=="],
|
||||
|
||||
"punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="],
|
||||
|
||||
"qs": ["qs@6.14.0", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w=="],
|
||||
|
||||
"queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="],
|
||||
|
||||
"readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="],
|
||||
|
||||
"resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="],
|
||||
|
||||
"reusify": ["reusify@1.0.4", "", {}, "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="],
|
||||
|
||||
"rimraf": ["rimraf@3.0.2", "", { "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" } }, "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA=="],
|
||||
|
||||
"run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="],
|
||||
|
||||
"safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
|
||||
|
||||
"safe-stable-stringify": ["safe-stable-stringify@2.5.0", "", {}, "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA=="],
|
||||
|
||||
"sanitize-html": ["sanitize-html@2.14.0", "", { "dependencies": { "deepmerge": "^4.2.2", "escape-string-regexp": "^4.0.0", "htmlparser2": "^8.0.0", "is-plain-object": "^5.0.0", "parse-srcset": "^1.0.2", "postcss": "^8.3.11" } }, "sha512-CafX+IUPxZshXqqRaG9ZClSlfPVjSxI0td7n07hk8QO2oO+9JDnlcL8iM8TWeOXOIBFgIOx6zioTzM53AOMn3g=="],
|
||||
|
||||
"semver": ["semver@7.7.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA=="],
|
||||
|
||||
"shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
|
||||
|
||||
"shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
|
||||
|
||||
"side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="],
|
||||
|
||||
"side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="],
|
||||
|
||||
"side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="],
|
||||
|
||||
"side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="],
|
||||
|
||||
"simple-swizzle": ["simple-swizzle@0.2.2", "", { "dependencies": { "is-arrayish": "^0.3.1" } }, "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg=="],
|
||||
|
||||
"slash": ["slash@3.0.0", "", {}, "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="],
|
||||
|
||||
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
|
||||
|
||||
"stack-trace": ["stack-trace@0.0.10", "", {}, "sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg=="],
|
||||
|
||||
"string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="],
|
||||
|
||||
"strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||
|
||||
"strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="],
|
||||
|
||||
"superagent": ["superagent@8.1.2", "", { "dependencies": { "component-emitter": "^1.3.0", "cookiejar": "^2.1.4", "debug": "^4.3.4", "fast-safe-stringify": "^2.1.1", "form-data": "^4.0.0", "formidable": "^2.1.2", "methods": "^1.1.2", "mime": "2.6.0", "qs": "^6.11.0", "semver": "^7.3.8" } }, "sha512-6WTxW1EB6yCxV5VFOIPQruWGHqc3yI7hEmZK6h+pyk69Lk/Ut7rLUY6W/ONF2MjBuGjvmMiIpsrVJ2vjrHlslA=="],
|
||||
|
||||
"supertest": ["supertest@6.3.4", "", { "dependencies": { "methods": "^1.1.2", "superagent": "^8.1.2" } }, "sha512-erY3HFDG0dPnhw4U+udPfrzXa4xhSG+n4rxfRuZWCUvjFWwKl+OxWf/7zk50s84/fAAs7vf5QAb9uRa0cCykxw=="],
|
||||
|
||||
"supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="],
|
||||
|
||||
"synckit": ["synckit@0.9.2", "", { "dependencies": { "@pkgr/core": "^0.1.0", "tslib": "^2.6.2" } }, "sha512-vrozgXDQwYO72vHjUb/HnFbQx1exDjoKzqx23aXEg2a9VIg2TSFZ8FmeZpTjUCFMYw7mpX4BE2SFu8wI7asYsw=="],
|
||||
|
||||
"text-hex": ["text-hex@1.0.0", "", {}, "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg=="],
|
||||
|
||||
"text-table": ["text-table@0.2.0", "", {}, "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="],
|
||||
|
||||
"to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="],
|
||||
|
||||
"tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||
|
||||
"triple-beam": ["triple-beam@1.4.1", "", {}, "sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg=="],
|
||||
|
||||
"ts-api-utils": ["ts-api-utils@1.4.3", "", { "peerDependencies": { "typescript": ">=4.2.0" } }, "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw=="],
|
||||
|
||||
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||
|
||||
"type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="],
|
||||
|
||||
"type-fest": ["type-fest@0.20.2", "", {}, "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ=="],
|
||||
|
||||
"typescript": ["typescript@5.7.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw=="],
|
||||
|
||||
"undici-types": ["undici-types@6.19.8", "", {}, "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw=="],
|
||||
|
||||
"uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="],
|
||||
|
||||
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
||||
|
||||
"uuid": ["uuid@11.0.5", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-508e6IcKLrhxKdBbcA2b4KQZlLVp2+J5UwQ6F7Drckkc5N9ZJwFa4TgWtsww9UG8fGHbm6gbV19TdM5pQ4GaIA=="],
|
||||
|
||||
"web-streams-polyfill": ["web-streams-polyfill@3.3.3", "", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="],
|
||||
|
||||
"webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
|
||||
"whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
|
||||
"which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
|
||||
|
||||
"winston": ["winston@3.17.0", "", { "dependencies": { "@colors/colors": "^1.6.0", "@dabh/diagnostics": "^2.0.2", "async": "^3.2.3", "is-stream": "^2.0.0", "logform": "^2.7.0", "one-time": "^1.0.0", "readable-stream": "^3.4.0", "safe-stable-stringify": "^2.3.1", "stack-trace": "0.0.x", "triple-beam": "^1.3.0", "winston-transport": "^4.9.0" } }, "sha512-DLiFIXYC5fMPxaRg832S6F5mJYvePtmO5G9v9IgUFPhXm9/GkXarH/TUrBAVzhTCzAj9anE/+GjrgXp/54nOgw=="],
|
||||
|
||||
"winston-daily-rotate-file": ["winston-daily-rotate-file@5.0.0", "", { "dependencies": { "file-stream-rotator": "^0.6.1", "object-hash": "^3.0.0", "triple-beam": "^1.4.1", "winston-transport": "^4.7.0" }, "peerDependencies": { "winston": "^3" } }, "sha512-JDjiXXkM5qvwY06733vf09I2wnMXpZEhxEVOSPenZMii+g7pcDcTBt2MRugnoi8BwVSuCT2jfRXBUy+n1Zz/Yw=="],
|
||||
|
||||
"winston-transport": ["winston-transport@4.9.0", "", { "dependencies": { "logform": "^2.7.0", "readable-stream": "^3.6.2", "triple-beam": "^1.3.0" } }, "sha512-8drMJ4rkgaPo1Me4zD/3WLfI/zPdA9o2IipKODunnGDcuqbHwjsbB79ylv04LCGGzU0xQ6vTznOMpQGaLhhm6A=="],
|
||||
|
||||
"word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="],
|
||||
|
||||
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
|
||||
|
||||
"ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="],
|
||||
|
||||
"yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="],
|
||||
|
||||
"zhead": ["zhead@2.2.4", "", {}, "sha512-8F0OI5dpWIA5IGG5NHUg9staDwz/ZPxZtvGVf01j7vHqSyZ0raHY+78atOVxRqb73AotX22uV1pXt3gYSstGag=="],
|
||||
|
||||
"zod": ["zod@3.24.1", "", {}, "sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A=="],
|
||||
|
||||
"@scalar/themes/@scalar/types": ["@scalar/types@0.0.30", "", { "dependencies": { "@scalar/openapi-types": "0.1.7", "@unhead/schema": "^1.11.11" } }, "sha512-rhgwovQb5f7PXuUB5bLUElpo90fdsiwcOgBXVWZ6n6dnFSKovNJ7GPXQimsZioMzTF6TdwfP94UpZVdZAK4aTw=="],
|
||||
|
||||
"@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="],
|
||||
|
||||
"color/color-convert": ["color-convert@1.9.3", "", { "dependencies": { "color-name": "1.1.3" } }, "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg=="],
|
||||
|
||||
"color-string/color-name": ["color-name@1.1.3", "", {}, "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="],
|
||||
|
||||
"fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="],
|
||||
|
||||
"formdata-node/web-streams-polyfill": ["web-streams-polyfill@4.0.0-beta.3", "", {}, "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug=="],
|
||||
|
||||
"openai/@types/node": ["@types/node@18.19.75", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw=="],
|
||||
|
||||
"openai/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
|
||||
"@scalar/themes/@scalar/types/@scalar/openapi-types": ["@scalar/openapi-types@0.1.7", "", {}, "sha512-oOTG3JQifg55U3DhKB7WdNIxFnJzbPJe7rqdyWdio977l8IkxQTVmObftJhdNIMvhV2K+1f/bDoMQGu6yTaD0A=="],
|
||||
|
||||
"@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.1", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA=="],
|
||||
|
||||
"color/color-convert/color-name": ["color-name@1.1.3", "", {}, "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="],
|
||||
|
||||
"openai/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
[test]
|
||||
preload = ["./src/__tests__/setup.ts"]
|
||||
preload = ["./test/setup.ts"]
|
||||
coverage = true
|
||||
coverageThreshold = {
|
||||
statements = 80,
|
||||
@@ -7,7 +7,7 @@ coverageThreshold = {
|
||||
functions = 80,
|
||||
lines = 80
|
||||
}
|
||||
timeout = 30000
|
||||
timeout = 10000
|
||||
testMatch = ["**/__tests__/**/*.test.ts"]
|
||||
testPathIgnorePatterns = ["/node_modules/", "/dist/"]
|
||||
collectCoverageFrom = [
|
||||
@@ -48,3 +48,6 @@ reload = true
|
||||
[performance]
|
||||
gc = true
|
||||
optimize = true
|
||||
|
||||
[test.env]
|
||||
NODE_ENV = "test"
|
||||
118
docker-build.sh
118
docker-build.sh
@@ -3,16 +3,52 @@
|
||||
# Enable error handling
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Function to print colored messages
|
||||
print_message() {
|
||||
local color=$1
|
||||
local message=$2
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Function to clean up on script exit
|
||||
cleanup() {
|
||||
echo "Cleaning up..."
|
||||
print_message "$YELLOW" "Cleaning up..."
|
||||
docker builder prune -f --filter until=24h
|
||||
docker image prune -f
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Parse command line arguments
|
||||
ENABLE_SPEECH=false
|
||||
ENABLE_GPU=false
|
||||
BUILD_TYPE="standard"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--speech)
|
||||
ENABLE_SPEECH=true
|
||||
BUILD_TYPE="speech"
|
||||
shift
|
||||
;;
|
||||
--gpu)
|
||||
ENABLE_GPU=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
print_message "$RED" "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Clean up Docker system
|
||||
echo "Cleaning up Docker system..."
|
||||
print_message "$YELLOW" "Cleaning up Docker system..."
|
||||
docker system prune -f --volumes
|
||||
|
||||
# Set build arguments for better performance
|
||||
@@ -26,23 +62,47 @@ BUILD_MEM=$(( TOTAL_MEM / 2 )) # Use half of available memory
|
||||
CPU_COUNT=$(nproc)
|
||||
CPU_QUOTA=$(( CPU_COUNT * 50000 )) # Allow 50% CPU usage per core
|
||||
|
||||
echo "Building with ${BUILD_MEM}MB memory limit and CPU quota ${CPU_QUOTA}"
|
||||
print_message "$YELLOW" "Building with ${BUILD_MEM}MB memory limit and CPU quota ${CPU_QUOTA}"
|
||||
|
||||
# Remove any existing lockfile
|
||||
rm -f bun.lockb
|
||||
|
||||
# Build with resource limits, optimizations, and timeout
|
||||
echo "Building Docker image..."
|
||||
# Base build arguments
|
||||
BUILD_ARGS=(
|
||||
--memory="${BUILD_MEM}m"
|
||||
--memory-swap="${BUILD_MEM}m"
|
||||
--cpu-quota="${CPU_QUOTA}"
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1
|
||||
--build-arg DOCKER_BUILDKIT=1
|
||||
--build-arg NODE_ENV=production
|
||||
--progress=plain
|
||||
--no-cache
|
||||
--compress
|
||||
)
|
||||
|
||||
# Add speech-specific build arguments if enabled
|
||||
if [ "$ENABLE_SPEECH" = true ]; then
|
||||
BUILD_ARGS+=(
|
||||
--build-arg ENABLE_SPEECH_FEATURES=true
|
||||
--build-arg ENABLE_WAKE_WORD=true
|
||||
--build-arg ENABLE_SPEECH_TO_TEXT=true
|
||||
)
|
||||
|
||||
# Add GPU support if requested
|
||||
if [ "$ENABLE_GPU" = true ]; then
|
||||
BUILD_ARGS+=(
|
||||
--build-arg CUDA_VISIBLE_DEVICES=0
|
||||
--build-arg COMPUTE_TYPE=float16
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build the images
|
||||
print_message "$YELLOW" "Building Docker image (${BUILD_TYPE} build)..."
|
||||
|
||||
# Build main image
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--memory="${BUILD_MEM}m" \
|
||||
--memory-swap="${BUILD_MEM}m" \
|
||||
--cpu-quota="${CPU_QUOTA}" \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--build-arg DOCKER_BUILDKIT=1 \
|
||||
--build-arg NODE_ENV=production \
|
||||
--progress=plain \
|
||||
--no-cache \
|
||||
--compress \
|
||||
"${BUILD_ARGS[@]}" \
|
||||
-t homeassistant-mcp:latest \
|
||||
-t homeassistant-mcp:$(date +%Y%m%d) \
|
||||
.
|
||||
@@ -50,15 +110,39 @@ DOCKER_BUILDKIT=1 docker build \
|
||||
# Check if build was successful
|
||||
BUILD_EXIT_CODE=$?
|
||||
if [ $BUILD_EXIT_CODE -eq 124 ]; then
|
||||
echo "Build timed out after 15 minutes!"
|
||||
print_message "$RED" "Build timed out after 15 minutes!"
|
||||
exit 1
|
||||
elif [ $BUILD_EXIT_CODE -ne 0 ]; then
|
||||
echo "Build failed with exit code ${BUILD_EXIT_CODE}!"
|
||||
print_message "$RED" "Build failed with exit code ${BUILD_EXIT_CODE}!"
|
||||
exit 1
|
||||
else
|
||||
echo "Build completed successfully!"
|
||||
print_message "$GREEN" "Main image build completed successfully!"
|
||||
|
||||
# Show image size and layers
|
||||
docker image ls homeassistant-mcp:latest --format "Image size: {{.Size}}"
|
||||
echo "Layer count: $(docker history homeassistant-mcp:latest | wc -l)"
|
||||
fi
|
||||
|
||||
# Build speech-related images if enabled
|
||||
if [ "$ENABLE_SPEECH" = true ]; then
|
||||
print_message "$YELLOW" "Building speech-related images..."
|
||||
|
||||
# Build fast-whisper image
|
||||
print_message "$YELLOW" "Building fast-whisper image..."
|
||||
docker pull onerahmet/openai-whisper-asr-webservice:latest
|
||||
|
||||
# Build wake-word image
|
||||
print_message "$YELLOW" "Building wake-word image..."
|
||||
docker pull rhasspy/wyoming-openwakeword:latest
|
||||
|
||||
print_message "$GREEN" "Speech-related images built successfully!"
|
||||
fi
|
||||
|
||||
print_message "$GREEN" "All builds completed successfully!"
|
||||
|
||||
# Show final status
|
||||
print_message "$YELLOW" "Build Summary:"
|
||||
echo "Build Type: $BUILD_TYPE"
|
||||
echo "Speech Features: $([ "$ENABLE_SPEECH" = true ] && echo 'Enabled' || echo 'Disabled')"
|
||||
echo "GPU Support: $([ "$ENABLE_GPU" = true ] && echo 'Enabled' || echo 'Disabled')"
|
||||
docker image ls | grep -E 'homeassistant-mcp|whisper|openwakeword'
|
||||
73
docker-compose.speech.yml
Normal file
73
docker-compose.speech.yml
Normal file
@@ -0,0 +1,73 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
homeassistant-mcp:
|
||||
image: homeassistant-mcp:latest
|
||||
environment:
|
||||
# Speech Feature Flags
|
||||
- ENABLE_SPEECH_FEATURES=${ENABLE_SPEECH_FEATURES:-true}
|
||||
- ENABLE_WAKE_WORD=${ENABLE_WAKE_WORD:-true}
|
||||
- ENABLE_SPEECH_TO_TEXT=${ENABLE_SPEECH_TO_TEXT:-true}
|
||||
|
||||
# Audio Configuration
|
||||
- NOISE_THRESHOLD=${NOISE_THRESHOLD:-0.05}
|
||||
- MIN_SPEECH_DURATION=${MIN_SPEECH_DURATION:-1.0}
|
||||
- SILENCE_DURATION=${SILENCE_DURATION:-0.5}
|
||||
- SAMPLE_RATE=${SAMPLE_RATE:-16000}
|
||||
- CHANNELS=${CHANNELS:-1}
|
||||
- CHUNK_SIZE=${CHUNK_SIZE:-1024}
|
||||
- PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
|
||||
|
||||
fast-whisper:
|
||||
image: onerahmet/openai-whisper-asr-webservice:latest
|
||||
volumes:
|
||||
- whisper-models:/models
|
||||
- audio-data:/audio
|
||||
environment:
|
||||
- ASR_MODEL=${WHISPER_MODEL_TYPE:-base}
|
||||
- ASR_ENGINE=faster_whisper
|
||||
- WHISPER_BEAM_SIZE=5
|
||||
- COMPUTE_TYPE=float32
|
||||
- LANGUAGE=en
|
||||
ports:
|
||||
- "9000:9000"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '4.0'
|
||||
memory: 2G
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:9000/health" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
wake-word:
|
||||
image: rhasspy/wyoming-openwakeword:latest
|
||||
restart: unless-stopped
|
||||
devices:
|
||||
- /dev/snd:/dev/snd
|
||||
volumes:
|
||||
- /run/user/1000/pulse/native:/run/user/1000/pulse/native
|
||||
environment:
|
||||
- PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
|
||||
- PULSE_COOKIE=/run/user/1000/pulse/cookie
|
||||
- PYTHONUNBUFFERED=1
|
||||
- OPENWAKEWORD_MODEL=hey_jarvis
|
||||
- OPENWAKEWORD_THRESHOLD=0.5
|
||||
- MICROPHONE_COMMAND=arecord -D hw:0,0 -f S16_LE -c 1 -r 16000 -t raw
|
||||
group_add:
|
||||
- "${AUDIO_GID:-29}"
|
||||
network_mode: host
|
||||
privileged: true
|
||||
entrypoint: >
|
||||
/bin/bash -c " apt-get update && apt-get install -y pulseaudio alsa-utils && rm -rf /var/lib/apt/lists/* && /run.sh"
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pactl info > /dev/null 2>&1 || exit 1" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
whisper-models:
|
||||
audio-data:
|
||||
@@ -1,88 +0,0 @@
|
||||
# Use Python slim image as builder
|
||||
FROM python:3.10-slim AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
curl \
|
||||
wget
|
||||
|
||||
# Create and activate virtual environment
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install Python dependencies with specific versions and CPU-only variants
|
||||
RUN pip install --no-cache-dir \
|
||||
"numpy>=1.24.3,<2.0" \
|
||||
"sounddevice" \
|
||||
"openwakeword" \
|
||||
"faster-whisper" \
|
||||
"transformers" \
|
||||
"torch" \
|
||||
"torchaudio" \
|
||||
"huggingface_hub" \
|
||||
"requests" \
|
||||
"soundfile" \
|
||||
"tflite-runtime"
|
||||
|
||||
# Create final image
|
||||
FROM python:3.10-slim
|
||||
|
||||
# Copy virtual environment from builder
|
||||
COPY --from=builder /opt/venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# Install audio dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
portaudio19-dev \
|
||||
pulseaudio \
|
||||
alsa-utils \
|
||||
curl \
|
||||
wget
|
||||
|
||||
# Create necessary directories with explicit permissions
|
||||
RUN mkdir -p /models/wake_word /audio /app /models/cache /models/models--Systran--faster-whisper-base /opt/venv/lib/python3.10/site-packages/openwakeword/resources/models \
|
||||
&& chmod -R 777 /models /audio /app /models/cache /models/models--Systran--faster-whisper-base /opt/venv/lib/python3.10/site-packages/openwakeword/resources/models
|
||||
|
||||
# Download wake word models
|
||||
RUN wget -O /opt/venv/lib/python3.10/site-packages/openwakeword/resources/models/alexa_v0.1.tflite \
|
||||
https://github.com/dscripka/openWakeWord/raw/main/openwakeword/resources/models/alexa_v0.1.tflite \
|
||||
&& wget -O /opt/venv/lib/python3.10/site-packages/openwakeword/resources/models/hey_jarvis_v0.1.tflite \
|
||||
https://github.com/dscripka/openWakeWord/raw/main/openwakeword/resources/models/hey_jarvis_v0.1.tflite \
|
||||
&& chmod 644 /opt/venv/lib/python3.10/site-packages/openwakeword/resources/models/*.tflite
|
||||
|
||||
# Set environment variables for model caching
|
||||
ENV HF_HOME=/models/cache
|
||||
ENV TRANSFORMERS_CACHE=/models/cache
|
||||
ENV HUGGINGFACE_HUB_CACHE=/models/cache
|
||||
|
||||
# Copy scripts and set permissions explicitly
|
||||
COPY wake_word_detector.py /app/wake_word_detector.py
|
||||
COPY setup-audio.sh /setup-audio.sh
|
||||
|
||||
# Ensure scripts are executable by any user
|
||||
RUN chmod 755 /setup-audio.sh /app/wake_word_detector.py
|
||||
|
||||
# Create a non-root user with explicit UID and GID
|
||||
RUN addgroup --gid 1000 user && \
|
||||
adduser --uid 1000 --gid 1000 --disabled-password --gecos '' user
|
||||
|
||||
# Change ownership of directories
|
||||
RUN chown -R 1000:1000 /models /audio /app /models/cache /models/models--Systran--faster-whisper-base \
|
||||
/opt/venv/lib/python3.10/site-packages/openwakeword/resources/models
|
||||
|
||||
# Switch to non-root user
|
||||
USER user
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Set environment variables
|
||||
ENV WHISPER_MODEL_PATH=/models \
|
||||
WAKEWORD_MODEL_PATH=/models/wake_word \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PULSE_SERVER=unix:/run/user/1000/pulse/native \
|
||||
HOME=/home/user
|
||||
|
||||
# Start the application
|
||||
CMD ["/setup-audio.sh"]
|
||||
35
docker/speech/asound.conf
Normal file
35
docker/speech/asound.conf
Normal file
@@ -0,0 +1,35 @@
|
||||
pcm.!default {
|
||||
type pulse
|
||||
fallback "sysdefault"
|
||||
hint {
|
||||
show on
|
||||
description "Default ALSA Output (currently PulseAudio Sound Server)"
|
||||
}
|
||||
}
|
||||
|
||||
ctl.!default {
|
||||
type pulse
|
||||
fallback "sysdefault"
|
||||
}
|
||||
|
||||
# Use PulseAudio by default
|
||||
pcm.pulse {
|
||||
type pulse
|
||||
}
|
||||
|
||||
ctl.pulse {
|
||||
type pulse
|
||||
}
|
||||
|
||||
# Explicit device for recording
|
||||
pcm.microphone {
|
||||
type hw
|
||||
card 0
|
||||
device 0
|
||||
}
|
||||
|
||||
# Default capture device
|
||||
pcm.!default {
|
||||
type pulse
|
||||
hint.description "Default Audio Device"
|
||||
}
|
||||
@@ -30,6 +30,9 @@ MAX_MODEL_LOAD_RETRIES = 3
|
||||
MODEL_LOAD_RETRY_DELAY = 5 # seconds
|
||||
MODEL_DOWNLOAD_TIMEOUT = 600 # 10 minutes timeout for model download
|
||||
|
||||
# ALSA device configuration
|
||||
AUDIO_DEVICE = 'hw:0,0' # Use ALSA hardware device directly
|
||||
|
||||
# Audio processing parameters
|
||||
NOISE_THRESHOLD = 0.08 # Increased threshold for better noise filtering
|
||||
MIN_SPEECH_DURATION = 2.0 # Longer minimum duration to avoid fragments
|
||||
@@ -44,7 +47,7 @@ WAKE_WORD_ENABLED = os.environ.get('ENABLE_WAKE_WORD', 'false').lower() == 'true
|
||||
SPEECH_ENABLED = os.environ.get('ENABLE_SPEECH_FEATURES', 'true').lower() == 'true'
|
||||
|
||||
# Wake word models to use (only if wake word is enabled)
|
||||
WAKE_WORDS = ["alexa"] # Using 'alexa' as temporary replacement for 'gaja'
|
||||
WAKE_WORDS = ["hey_jarvis"] # Using hey_jarvis as it's more similar to "hey gaja"
|
||||
WAKE_WORD_ALIAS = "gaja" # What we print when wake word is detected
|
||||
|
||||
# Home Assistant Configuration
|
||||
@@ -235,7 +238,22 @@ class AudioProcessor:
|
||||
self.buffer = np.zeros(SAMPLE_RATE * BUFFER_DURATION)
|
||||
self.buffer_lock = threading.Lock()
|
||||
self.last_transcription_time = 0
|
||||
self.stream = None
|
||||
|
||||
try:
|
||||
logger.info(f"Opening audio device: {AUDIO_DEVICE}")
|
||||
self.stream = sd.InputStream(
|
||||
device=AUDIO_DEVICE,
|
||||
samplerate=SAMPLE_RATE,
|
||||
channels=CHANNELS,
|
||||
dtype=np.int16,
|
||||
blocksize=CHUNK_SIZE,
|
||||
callback=self._audio_callback
|
||||
)
|
||||
logger.info("Audio stream initialized successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize audio stream: {e}")
|
||||
raise
|
||||
|
||||
self.speech_detected = False
|
||||
self.silence_frames = 0
|
||||
self.speech_frames = 0
|
||||
@@ -272,7 +290,7 @@ class AudioProcessor:
|
||||
return True
|
||||
return False
|
||||
|
||||
def audio_callback(self, indata, frames, time, status):
|
||||
def _audio_callback(self, indata, frames, time, status):
|
||||
"""Callback for audio input"""
|
||||
if status:
|
||||
logger.warning(f"Audio callback status: {status}")
|
||||
@@ -382,7 +400,7 @@ class AudioProcessor:
|
||||
channels=CHANNELS,
|
||||
samplerate=SAMPLE_RATE,
|
||||
blocksize=CHUNK_SIZE,
|
||||
callback=self.audio_callback
|
||||
callback=self._audio_callback
|
||||
):
|
||||
logger.info("Audio input stream started successfully")
|
||||
logger.info("Listening for audio input...")
|
||||
|
||||
23
docs/Gemfile
23
docs/Gemfile
@@ -1,23 +0,0 @@
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "github-pages", group: :jekyll_plugins
|
||||
gem "jekyll-theme-minimal"
|
||||
gem "jekyll-relative-links"
|
||||
gem "jekyll-seo-tag"
|
||||
gem "jekyll-remote-theme"
|
||||
gem "jekyll-github-metadata"
|
||||
gem "faraday-retry"
|
||||
|
||||
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
|
||||
# and associated library.
|
||||
platforms :mingw, :x64_mingw, :mswin, :jruby do
|
||||
gem "tzinfo", ">= 1"
|
||||
gem "tzinfo-data"
|
||||
end
|
||||
|
||||
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
|
||||
# do not have a Java counterpart.
|
||||
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
|
||||
|
||||
# Add webrick for Ruby 3.0+
|
||||
gem "webrick", "~> 1.7"
|
||||
@@ -1,78 +0,0 @@
|
||||
title: Model Context Protocol (MCP)
|
||||
description: A bridge between Home Assistant and Language Learning Models
|
||||
theme: jekyll-theme-minimal
|
||||
markdown: kramdown
|
||||
|
||||
# Repository settings
|
||||
repository: jango-blockchained/advanced-homeassistant-mcp
|
||||
github: [metadata]
|
||||
|
||||
# Add base URL and URL settings
|
||||
baseurl: "/advanced-homeassistant-mcp" # the subpath of your site
|
||||
url: "https://jango-blockchained.github.io" # the base hostname & protocol
|
||||
|
||||
# Theme settings
|
||||
logo: /assets/img/logo.png # path to logo (create this if you want a logo)
|
||||
show_downloads: true # show download buttons for your repo
|
||||
|
||||
plugins:
|
||||
- jekyll-relative-links
|
||||
- jekyll-seo-tag
|
||||
- jekyll-remote-theme
|
||||
- jekyll-github-metadata
|
||||
|
||||
# Enable relative links
|
||||
relative_links:
|
||||
enabled: true
|
||||
collections: true
|
||||
|
||||
# Navigation structure
|
||||
header_pages:
|
||||
- index.md
|
||||
- getting-started.md
|
||||
- api.md
|
||||
- usage.md
|
||||
- tools/tools.md
|
||||
- development/development.md
|
||||
- troubleshooting.md
|
||||
- contributing.md
|
||||
- roadmap.md
|
||||
|
||||
# Collections
|
||||
collections:
|
||||
tools:
|
||||
output: true
|
||||
permalink: /:collection/:name
|
||||
development:
|
||||
output: true
|
||||
permalink: /:collection/:name
|
||||
|
||||
# Default layouts
|
||||
defaults:
|
||||
- scope:
|
||||
path: ""
|
||||
type: "pages"
|
||||
values:
|
||||
layout: "default"
|
||||
- scope:
|
||||
path: "tools"
|
||||
type: "tools"
|
||||
values:
|
||||
layout: "default"
|
||||
- scope:
|
||||
path: "development"
|
||||
type: "development"
|
||||
values:
|
||||
layout: "default"
|
||||
|
||||
# Exclude files from processing
|
||||
exclude:
|
||||
- Gemfile
|
||||
- Gemfile.lock
|
||||
- node_modules
|
||||
- vendor
|
||||
|
||||
# Sass settings
|
||||
sass:
|
||||
style: compressed
|
||||
sass_dir: _sass
|
||||
@@ -1,52 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="{{ site.lang | default: " en-US" }}">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
{% seo %}
|
||||
<link rel="stylesheet" href="{{ " /assets/css/style.css?v=" | append: site.github.build_revision | relative_url }}">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="wrapper">
|
||||
<header>
|
||||
<h1><a href="{{ " /" | absolute_url }}">{{ site.title | default: site.github.repository_name }}</a></h1>
|
||||
|
||||
{% if site.logo %}
|
||||
<img src="{{site.logo | relative_url}}" alt="Logo" />
|
||||
{% endif %}
|
||||
|
||||
<p>{{ site.description | default: site.github.project_tagline }}</p>
|
||||
|
||||
<p class="view"><a href="{{ site.github.repository_url }}">View the Project on GitHub <small>{{
|
||||
site.github.repository_nwo }}</small></a></p>
|
||||
|
||||
<nav class="main-nav">
|
||||
<h3>Documentation</h3>
|
||||
<ul>
|
||||
<li><a href="{{ '/getting-started' | relative_url }}">Getting Started</a></li>
|
||||
<li><a href="{{ '/api' | relative_url }}">API Reference</a></li>
|
||||
<li><a href="{{ '/sse-api' | relative_url }}">SSE API</a></li>
|
||||
<li><a href="{{ '/architecture' | relative_url }}">Architecture</a></li>
|
||||
<li><a href="{{ '/contributing' | relative_url }}">Contributing</a></li>
|
||||
<li><a href="{{ '/troubleshooting' | relative_url }}">Troubleshooting</a></li>
|
||||
</ul>
|
||||
</nav>
|
||||
</header>
|
||||
<section>
|
||||
{{ content }}
|
||||
</section>
|
||||
<footer>
|
||||
{% if site.github.is_project_page %}
|
||||
<p>This project is maintained by <a href="{{ site.github.owner_url }}">{{ site.github.owner_name }}</a></p>
|
||||
{% endif %}
|
||||
<p><small>Hosted on GitHub Pages — Theme by <a
|
||||
href="https://github.com/orderedlist">orderedlist</a></small></p>
|
||||
</footer>
|
||||
</div>
|
||||
<script src="{{ " /assets/js/scale.fix.js" | relative_url }}"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
170
docs/api.md
170
docs/api.md
@@ -1,170 +0,0 @@
|
||||
# Home Assistant MCP Server API Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a reference for the MCP Server API, which offers basic device control and state management for Home Assistant.
|
||||
|
||||
## Authentication
|
||||
|
||||
All API requests require a valid JWT token in the Authorization header:
|
||||
|
||||
```http
|
||||
Authorization: Bearer YOUR_TOKEN
|
||||
```
|
||||
|
||||
## Core Endpoints
|
||||
|
||||
### Device State Management
|
||||
|
||||
#### Get Device State
|
||||
```http
|
||||
GET /api/state/{entity_id}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 128
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Update Device State
|
||||
```http
|
||||
POST /api/state
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 128
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Device Control
|
||||
|
||||
#### Execute Device Command
|
||||
```http
|
||||
POST /api/control
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"command": "turn_on",
|
||||
"parameters": {
|
||||
"brightness": 50
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Real-Time Updates
|
||||
|
||||
### WebSocket Connection
|
||||
Connect to real-time updates:
|
||||
|
||||
```javascript
|
||||
const ws = new WebSocket('ws://localhost:3000/events');
|
||||
ws.onmessage = (event) => {
|
||||
const deviceUpdate = JSON.parse(event.data);
|
||||
console.log('Device state changed:', deviceUpdate);
|
||||
};
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Responses
|
||||
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "INVALID_REQUEST",
|
||||
"message": "Invalid request parameters",
|
||||
"details": "Entity ID not found or invalid command"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
Basic rate limiting is implemented:
|
||||
- Maximum of 100 requests per minute
|
||||
- Excess requests will receive a 429 Too Many Requests response
|
||||
|
||||
## Supported Operations
|
||||
|
||||
### Supported Commands
|
||||
- `turn_on`
|
||||
- `turn_off`
|
||||
- `toggle`
|
||||
- `set_brightness`
|
||||
- `set_color`
|
||||
|
||||
### Supported Entities
|
||||
- Lights
|
||||
- Switches
|
||||
- Climate controls
|
||||
- Media players
|
||||
|
||||
## Limitations
|
||||
|
||||
- Limited to basic device control
|
||||
- No advanced automation
|
||||
- Minimal error handling
|
||||
- Basic authentication
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Always include a valid JWT token
|
||||
2. Handle potential errors in your client code
|
||||
3. Use WebSocket for real-time updates when possible
|
||||
4. Validate entity IDs before sending commands
|
||||
|
||||
## Example Client Usage
|
||||
|
||||
```typescript
|
||||
async function controlDevice(entityId: string, command: string, params?: Record<string, unknown>) {
|
||||
try {
|
||||
const response = await fetch('/api/control', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
entity_id: entityId,
|
||||
command,
|
||||
parameters: params
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.message);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error('Device control failed:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Usage example
|
||||
controlDevice('light.living_room', 'turn_on', { brightness: 50 })
|
||||
.then(result => console.log('Device controlled successfully'))
|
||||
.catch(error => console.error('Control failed', error));
|
||||
```
|
||||
|
||||
## Future Development
|
||||
|
||||
Planned improvements:
|
||||
- Enhanced error handling
|
||||
- More comprehensive device support
|
||||
- Improved authentication mechanisms
|
||||
|
||||
*API is subject to change. Always refer to the latest documentation.*
|
||||
326
docs/api/core.md
326
docs/api/core.md
@@ -1,326 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Core Functions
|
||||
parent: API Reference
|
||||
nav_order: 3
|
||||
---
|
||||
|
||||
# Core Functions API 🔧
|
||||
|
||||
The Core Functions API provides the fundamental operations for interacting with Home Assistant devices and services through MCP Server.
|
||||
|
||||
## Device Control
|
||||
|
||||
### Get Device State
|
||||
|
||||
Retrieve the current state of devices.
|
||||
|
||||
```http
|
||||
GET /api/state
|
||||
GET /api/state/{entity_id}
|
||||
```
|
||||
|
||||
Parameters:
|
||||
- `entity_id` (optional): Specific device ID to query
|
||||
|
||||
```bash
|
||||
# Get all states
|
||||
curl http://localhost:3000/api/state
|
||||
|
||||
# Get specific device state
|
||||
curl http://localhost:3000/api/state/light.living_room
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255,
|
||||
"color_temp": 370,
|
||||
"friendly_name": "Living Room Light"
|
||||
},
|
||||
"last_changed": "2024-01-20T15:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Control Device
|
||||
|
||||
Execute device commands.
|
||||
|
||||
```http
|
||||
POST /api/device/control
|
||||
```
|
||||
|
||||
Request body:
|
||||
```json
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"action": "turn_on",
|
||||
"parameters": {
|
||||
"brightness": 200,
|
||||
"color_temp": 400
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Available actions:
|
||||
- `turn_on`
|
||||
- `turn_off`
|
||||
- `toggle`
|
||||
- `set_value`
|
||||
|
||||
Example with curl:
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/device/control \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
|
||||
-d '{
|
||||
"entity_id": "light.living_room",
|
||||
"action": "turn_on",
|
||||
"parameters": {
|
||||
"brightness": 200
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Natural Language Commands
|
||||
|
||||
### Execute Command
|
||||
|
||||
Process natural language commands.
|
||||
|
||||
```http
|
||||
POST /api/command
|
||||
```
|
||||
|
||||
Request body:
|
||||
```json
|
||||
{
|
||||
"command": "Turn on the living room lights and set them to 50% brightness"
|
||||
}
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/command \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
|
||||
-d '{
|
||||
"command": "Turn on the living room lights and set them to 50% brightness"
|
||||
}'
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"actions": [
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"action": "turn_on",
|
||||
"parameters": {
|
||||
"brightness": 127
|
||||
},
|
||||
"status": "completed"
|
||||
}
|
||||
],
|
||||
"message": "Command executed successfully"
|
||||
}
|
||||
```
|
||||
|
||||
## Scene Management
|
||||
|
||||
### Create Scene
|
||||
|
||||
Define a new scene with multiple actions.
|
||||
|
||||
```http
|
||||
POST /api/scene
|
||||
```
|
||||
|
||||
Request body:
|
||||
```json
|
||||
{
|
||||
"name": "Movie Night",
|
||||
"description": "Perfect lighting for movie watching",
|
||||
"actions": [
|
||||
{
|
||||
"entity_id": "light.living_room",
|
||||
"action": "turn_on",
|
||||
"parameters": {
|
||||
"brightness": 50,
|
||||
"color_temp": 500
|
||||
}
|
||||
},
|
||||
{
|
||||
"entity_id": "cover.living_room",
|
||||
"action": "close"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Activate Scene
|
||||
|
||||
Trigger a predefined scene.
|
||||
|
||||
```http
|
||||
POST /api/scene/{scene_name}/activate
|
||||
```
|
||||
|
||||
Example:
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/scene/movie_night/activate \
|
||||
-H "Authorization: Bearer YOUR_JWT_TOKEN"
|
||||
```
|
||||
|
||||
## Groups
|
||||
|
||||
### Create Device Group
|
||||
|
||||
Create a group of devices for collective control.
|
||||
|
||||
```http
|
||||
POST /api/group
|
||||
```
|
||||
|
||||
Request body:
|
||||
```json
|
||||
{
|
||||
"name": "Living Room",
|
||||
"entities": [
|
||||
"light.living_room_main",
|
||||
"light.living_room_accent",
|
||||
"switch.living_room_fan"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Control Group
|
||||
|
||||
Control multiple devices in a group.
|
||||
|
||||
```http
|
||||
POST /api/group/{group_name}/control
|
||||
```
|
||||
|
||||
Request body:
|
||||
```json
|
||||
{
|
||||
"action": "turn_off"
|
||||
}
|
||||
```
|
||||
|
||||
## System Operations
|
||||
|
||||
### Health Check
|
||||
|
||||
Check server status and connectivity.
|
||||
|
||||
```http
|
||||
GET /health
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"status": "healthy",
|
||||
"version": "1.0.0",
|
||||
"uptime": 3600,
|
||||
"homeAssistant": {
|
||||
"connected": true,
|
||||
"version": "2024.1.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Get current server configuration.
|
||||
|
||||
```http
|
||||
GET /api/config
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"server": {
|
||||
"port": 3000,
|
||||
"host": "0.0.0.0",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"homeAssistant": {
|
||||
"url": "http://homeassistant:8123",
|
||||
"connected": true
|
||||
},
|
||||
"features": {
|
||||
"nlp": true,
|
||||
"scenes": true,
|
||||
"groups": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All endpoints follow standard HTTP status codes and return detailed error messages:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": true,
|
||||
"code": "INVALID_ENTITY",
|
||||
"message": "Device 'light.nonexistent' not found",
|
||||
"details": {
|
||||
"entity_id": "light.nonexistent",
|
||||
"available_entities": [
|
||||
"light.living_room",
|
||||
"light.kitchen"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Common error codes:
|
||||
- `INVALID_ENTITY`: Device not found
|
||||
- `INVALID_ACTION`: Unsupported action
|
||||
- `INVALID_PARAMETERS`: Invalid command parameters
|
||||
- `AUTHENTICATION_ERROR`: Invalid or missing token
|
||||
- `CONNECTION_ERROR`: Home Assistant connection issue
|
||||
|
||||
## TypeScript Interfaces
|
||||
|
||||
```typescript
|
||||
interface DeviceState {
|
||||
entity_id: string;
|
||||
state: string;
|
||||
attributes: Record<string, any>;
|
||||
last_changed: string;
|
||||
}
|
||||
|
||||
interface DeviceCommand {
|
||||
entity_id: string;
|
||||
action: 'turn_on' | 'turn_off' | 'toggle' | 'set_value';
|
||||
parameters?: Record<string, any>;
|
||||
}
|
||||
|
||||
interface Scene {
|
||||
name: string;
|
||||
description?: string;
|
||||
actions: DeviceCommand[];
|
||||
}
|
||||
|
||||
interface Group {
|
||||
name: string;
|
||||
entities: string[];
|
||||
}
|
||||
```
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [API Overview](index.md)
|
||||
- [SSE API](sse.md)
|
||||
- [Architecture](../architecture.md)
|
||||
- [Examples](https://github.com/jango-blockchained/advanced-homeassistant-mcp/tree/main/examples)
|
||||
@@ -1,242 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: API Overview
|
||||
parent: API Reference
|
||||
nav_order: 1
|
||||
has_children: false
|
||||
---
|
||||
|
||||
# API Documentation 📚
|
||||
|
||||
Welcome to the MCP Server API documentation. This guide covers all available endpoints, authentication methods, and integration patterns.
|
||||
|
||||
## API Overview
|
||||
|
||||
The MCP Server provides several API categories:
|
||||
|
||||
1. **Core API** - Basic device control and state management
|
||||
2. **SSE API** - Real-time event subscriptions
|
||||
3. **Scene API** - Scene management and automation
|
||||
4. **Voice API** - Natural language command processing
|
||||
|
||||
## Authentication
|
||||
|
||||
All API endpoints require authentication using JWT tokens:
|
||||
|
||||
```bash
|
||||
# Include the token in your requests
|
||||
curl -H "Authorization: Bearer YOUR_JWT_TOKEN" http://localhost:3000/api/state
|
||||
```
|
||||
|
||||
To obtain a token:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "your_username", "password": "your_password"}'
|
||||
```
|
||||
|
||||
## Core Endpoints
|
||||
|
||||
### Device State
|
||||
|
||||
```http
|
||||
GET /api/state
|
||||
```
|
||||
|
||||
Retrieve the current state of all devices:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3000/api/state
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"devices": [
|
||||
{
|
||||
"id": "light.living_room",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255,
|
||||
"color_temp": 370
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Command Execution
|
||||
|
||||
```http
|
||||
POST /api/command
|
||||
```
|
||||
|
||||
Execute a natural language command:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/command \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"command": "Turn on the kitchen lights"}'
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"action": "turn_on",
|
||||
"device": "light.kitchen",
|
||||
"message": "Kitchen lights turned on"
|
||||
}
|
||||
```
|
||||
|
||||
## Real-Time Events
|
||||
|
||||
### Event Subscription
|
||||
|
||||
```http
|
||||
GET /subscribe_events
|
||||
```
|
||||
|
||||
Subscribe to device state changes:
|
||||
|
||||
```javascript
|
||||
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_TOKEN');
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('State changed:', data);
|
||||
};
|
||||
```
|
||||
|
||||
### Filtered Subscriptions
|
||||
|
||||
Subscribe to specific device types:
|
||||
|
||||
```http
|
||||
GET /subscribe_events?domain=light
|
||||
GET /subscribe_events?entity_id=light.living_room
|
||||
```
|
||||
|
||||
## Scene Management
|
||||
|
||||
### Create Scene
|
||||
|
||||
```http
|
||||
POST /api/scene
|
||||
```
|
||||
|
||||
Create a new scene:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/scene \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Movie Night",
|
||||
"actions": [
|
||||
{"device": "light.living_room", "action": "dim", "value": 20},
|
||||
{"device": "media_player.tv", "action": "on"}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
### Activate Scene
|
||||
|
||||
```http
|
||||
POST /api/scene/activate
|
||||
```
|
||||
|
||||
Activate an existing scene:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/api/scene/activate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "Movie Night"}'
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes:
|
||||
|
||||
- `200` - Success
|
||||
- `400` - Bad Request
|
||||
- `401` - Unauthorized
|
||||
- `404` - Not Found
|
||||
- `500` - Server Error
|
||||
|
||||
Error responses include detailed messages:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": true,
|
||||
"message": "Device not found",
|
||||
"code": "DEVICE_NOT_FOUND",
|
||||
"details": {
|
||||
"device_id": "light.nonexistent"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
API requests are rate-limited to prevent abuse:
|
||||
|
||||
```http
|
||||
X-RateLimit-Limit: 100
|
||||
X-RateLimit-Remaining: 99
|
||||
X-RateLimit-Reset: 1640995200
|
||||
```
|
||||
|
||||
When exceeded, returns `429 Too Many Requests`:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": true,
|
||||
"message": "Rate limit exceeded",
|
||||
"reset": 1640995200
|
||||
}
|
||||
```
|
||||
|
||||
## WebSocket API
|
||||
|
||||
For bi-directional communication:
|
||||
|
||||
```javascript
|
||||
const ws = new WebSocket('ws://localhost:3000/ws');
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('Received:', data);
|
||||
};
|
||||
|
||||
ws.send(JSON.stringify({
|
||||
type: 'command',
|
||||
payload: {
|
||||
command: 'Turn on lights'
|
||||
}
|
||||
}));
|
||||
```
|
||||
|
||||
## API Versioning
|
||||
|
||||
The current API version is v1. Include the version in the URL:
|
||||
|
||||
```http
|
||||
/api/v1/state
|
||||
/api/v1/command
|
||||
```
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [SSE API Details](sse.md) - In-depth SSE documentation
|
||||
- [Core Functions](core.md) - Detailed endpoint documentation
|
||||
- [Architecture Overview](../architecture.md) - System design details
|
||||
- [Troubleshooting](../troubleshooting.md) - Common issues and solutions
|
||||
|
||||
# API Reference
|
||||
|
||||
The Advanced Home Assistant MCP provides several APIs for integration and automation:
|
||||
|
||||
- [Core API](core.md) - Primary interface for system control
|
||||
- [SSE API](sse.md) - Server-Sent Events for real-time updates
|
||||
- [Core Functions](core.md) - Essential system functions
|
||||
266
docs/api/sse.md
266
docs/api/sse.md
@@ -1,266 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: SSE API
|
||||
parent: API Reference
|
||||
nav_order: 2
|
||||
---
|
||||
|
||||
# Server-Sent Events (SSE) API 📡
|
||||
|
||||
The SSE API provides real-time updates about device states and events from your Home Assistant setup. This guide covers how to use and implement SSE connections in your applications.
|
||||
|
||||
## Overview
|
||||
|
||||
Server-Sent Events (SSE) is a standard that enables servers to push real-time updates to clients over HTTP connections. MCP Server uses SSE to provide:
|
||||
|
||||
- Real-time device state updates
|
||||
- Event notifications
|
||||
- System status changes
|
||||
- Command execution results
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Establishing a Connection
|
||||
|
||||
Create an EventSource connection to receive updates:
|
||||
|
||||
```javascript
|
||||
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_JWT_TOKEN');
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('Received update:', data);
|
||||
};
|
||||
```
|
||||
|
||||
### Connection States
|
||||
|
||||
Handle different connection states:
|
||||
|
||||
```javascript
|
||||
eventSource.onopen = () => {
|
||||
console.log('Connection established');
|
||||
};
|
||||
|
||||
eventSource.onerror = (error) => {
|
||||
console.error('Connection error:', error);
|
||||
// Implement reconnection logic if needed
|
||||
};
|
||||
```
|
||||
|
||||
## Event Types
|
||||
|
||||
### Device State Events
|
||||
|
||||
Subscribe to all device state changes:
|
||||
|
||||
```javascript
|
||||
const stateEvents = new EventSource('http://localhost:3000/subscribe_events?type=state');
|
||||
|
||||
stateEvents.onmessage = (event) => {
|
||||
const state = JSON.parse(event.data);
|
||||
console.log('Device state changed:', state);
|
||||
};
|
||||
```
|
||||
|
||||
Example state event:
|
||||
```json
|
||||
{
|
||||
"type": "state_changed",
|
||||
"entity_id": "light.living_room",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255,
|
||||
"color_temp": 370
|
||||
},
|
||||
"timestamp": "2024-01-20T15:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Filtered Subscriptions
|
||||
|
||||
#### By Domain
|
||||
Subscribe to specific device types:
|
||||
|
||||
```javascript
|
||||
// Subscribe to only light events
|
||||
const lightEvents = new EventSource('http://localhost:3000/subscribe_events?domain=light');
|
||||
|
||||
// Subscribe to multiple domains
|
||||
const multiEvents = new EventSource('http://localhost:3000/subscribe_events?domain=light,switch,sensor');
|
||||
```
|
||||
|
||||
#### By Entity ID
|
||||
Subscribe to specific devices:
|
||||
|
||||
```javascript
|
||||
// Single entity
|
||||
const livingRoomLight = new EventSource(
|
||||
'http://localhost:3000/subscribe_events?entity_id=light.living_room'
|
||||
);
|
||||
|
||||
// Multiple entities
|
||||
const kitchenDevices = new EventSource(
|
||||
'http://localhost:3000/subscribe_events?entity_id=light.kitchen,switch.coffee_maker'
|
||||
);
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Connection Management
|
||||
|
||||
Implement robust connection handling:
|
||||
|
||||
```javascript
|
||||
class SSEManager {
|
||||
constructor(url, options = {}) {
|
||||
this.url = url;
|
||||
this.options = {
|
||||
maxRetries: 3,
|
||||
retryDelay: 1000,
|
||||
...options
|
||||
};
|
||||
this.retryCount = 0;
|
||||
this.connect();
|
||||
}
|
||||
|
||||
connect() {
|
||||
this.eventSource = new EventSource(this.url);
|
||||
|
||||
this.eventSource.onopen = () => {
|
||||
this.retryCount = 0;
|
||||
console.log('Connected to SSE stream');
|
||||
};
|
||||
|
||||
this.eventSource.onerror = (error) => {
|
||||
this.handleError(error);
|
||||
};
|
||||
|
||||
this.eventSource.onmessage = (event) => {
|
||||
this.handleMessage(event);
|
||||
};
|
||||
}
|
||||
|
||||
handleError(error) {
|
||||
console.error('SSE Error:', error);
|
||||
this.eventSource.close();
|
||||
|
||||
if (this.retryCount < this.options.maxRetries) {
|
||||
this.retryCount++;
|
||||
setTimeout(() => {
|
||||
console.log(`Retrying connection (${this.retryCount}/${this.options.maxRetries})`);
|
||||
this.connect();
|
||||
}, this.options.retryDelay * this.retryCount);
|
||||
}
|
||||
}
|
||||
|
||||
handleMessage(event) {
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
// Handle the event data
|
||||
console.log('Received:', data);
|
||||
} catch (error) {
|
||||
console.error('Error parsing SSE data:', error);
|
||||
}
|
||||
}
|
||||
|
||||
disconnect() {
|
||||
if (this.eventSource) {
|
||||
this.eventSource.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Usage
|
||||
const sseManager = new SSEManager('http://localhost:3000/subscribe_events?token=YOUR_TOKEN');
|
||||
```
|
||||
|
||||
### Event Filtering
|
||||
|
||||
Filter events on the client side:
|
||||
|
||||
```javascript
|
||||
class EventFilter {
|
||||
constructor(conditions) {
|
||||
this.conditions = conditions;
|
||||
}
|
||||
|
||||
matches(event) {
|
||||
return Object.entries(this.conditions).every(([key, value]) => {
|
||||
if (Array.isArray(value)) {
|
||||
return value.includes(event[key]);
|
||||
}
|
||||
return event[key] === value;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Usage
|
||||
const filter = new EventFilter({
|
||||
domain: ['light', 'switch'],
|
||||
state: 'on'
|
||||
});
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
if (filter.matches(data)) {
|
||||
console.log('Matched event:', data);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Authentication**
|
||||
- Always include authentication tokens
|
||||
- Implement token refresh mechanisms
|
||||
- Handle authentication errors gracefully
|
||||
|
||||
2. **Error Handling**
|
||||
- Implement progressive retry logic
|
||||
- Log connection issues
|
||||
- Notify users of connection status
|
||||
|
||||
3. **Resource Management**
|
||||
- Close EventSource connections when not needed
|
||||
- Limit the number of concurrent connections
|
||||
- Use filtered subscriptions when possible
|
||||
|
||||
4. **Performance**
|
||||
- Process events efficiently
|
||||
- Batch UI updates
|
||||
- Consider debouncing frequent updates
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Connection Drops
|
||||
If the connection drops, the EventSource will automatically attempt to reconnect. You can customize this behavior:
|
||||
|
||||
```javascript
|
||||
eventSource.addEventListener('error', (error) => {
|
||||
if (eventSource.readyState === EventSource.CLOSED) {
|
||||
// Connection closed, implement custom retry logic
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Memory Leaks
|
||||
Always clean up EventSource connections:
|
||||
|
||||
```javascript
|
||||
// In a React component
|
||||
useEffect(() => {
|
||||
const eventSource = new EventSource('http://localhost:3000/subscribe_events');
|
||||
|
||||
return () => {
|
||||
eventSource.close(); // Cleanup on unmount
|
||||
};
|
||||
}, []);
|
||||
```
|
||||
|
||||
## Related Resources
|
||||
|
||||
- [API Overview](index.md)
|
||||
- [Core Functions](core.md)
|
||||
- [WebSocket API](index.md#websocket-api)
|
||||
- [Troubleshooting](../troubleshooting.md)
|
||||
@@ -1,88 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Architecture
|
||||
nav_order: 4
|
||||
---
|
||||
|
||||
# Architecture Overview 🏗️
|
||||
|
||||
This document describes the architecture of the MCP Server, explaining how different components work together to provide a bridge between Home Assistant and custom automation tools.
|
||||
|
||||
## System Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph "Client Layer"
|
||||
WC[Web Clients]
|
||||
MC[Mobile Clients]
|
||||
end
|
||||
|
||||
subgraph "MCP Server"
|
||||
API[API Gateway]
|
||||
SSE[SSE Manager]
|
||||
WS[WebSocket Server]
|
||||
CM[Command Manager]
|
||||
end
|
||||
|
||||
subgraph "Home Assistant"
|
||||
HA[Home Assistant Core]
|
||||
Dev[Devices & Services]
|
||||
end
|
||||
|
||||
WC --> |HTTP/WS| API
|
||||
MC --> |HTTP/WS| API
|
||||
|
||||
API --> |Events| SSE
|
||||
API --> |Real-time| WS
|
||||
|
||||
API --> HA
|
||||
HA --> API
|
||||
```
|
||||
|
||||
## Core Components
|
||||
|
||||
### API Gateway
|
||||
- Handles incoming HTTP and WebSocket requests
|
||||
- Provides endpoints for device management
|
||||
- Implements basic authentication and request validation
|
||||
|
||||
### SSE Manager
|
||||
- Manages Server-Sent Events for real-time updates
|
||||
- Broadcasts device state changes to connected clients
|
||||
|
||||
### WebSocket Server
|
||||
- Provides real-time, bidirectional communication
|
||||
- Supports basic device control and state monitoring
|
||||
|
||||
### Command Manager
|
||||
- Processes device control requests
|
||||
- Translates API commands to Home Assistant compatible formats
|
||||
|
||||
## Communication Flow
|
||||
|
||||
1. Client sends a request to the MCP Server API
|
||||
2. API Gateway authenticates the request
|
||||
3. Command Manager processes the request
|
||||
4. Request is forwarded to Home Assistant
|
||||
5. Response is sent back to the client via API or WebSocket
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
- **Simplicity:** Lightweight, focused design
|
||||
- **Flexibility:** Easily extendable architecture
|
||||
- **Performance:** Efficient request handling
|
||||
- **Security:** Basic authentication and validation
|
||||
|
||||
## Limitations
|
||||
|
||||
- Basic device control capabilities
|
||||
- Limited advanced automation features
|
||||
- Minimal third-party integrations
|
||||
|
||||
## Future Improvements
|
||||
|
||||
- Enhanced error handling
|
||||
- More robust authentication
|
||||
- Expanded device type support
|
||||
|
||||
*Architecture is subject to change as the project evolves.*
|
||||
@@ -1,54 +0,0 @@
|
||||
@import "{{ site.theme }}";
|
||||
|
||||
// Custom styles
|
||||
.main-nav {
|
||||
margin-top: 20px;
|
||||
|
||||
ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
li {
|
||||
margin-bottom: 8px;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #267CB9;
|
||||
text-decoration: none;
|
||||
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3 {
|
||||
color: #333;
|
||||
}
|
||||
|
||||
code {
|
||||
background-color: #f8f8f8;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 3px;
|
||||
padding: 2px 5px;
|
||||
}
|
||||
|
||||
pre {
|
||||
background-color: #f8f8f8;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 3px;
|
||||
padding: 10px;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
.wrapper {
|
||||
max-width: 960px;
|
||||
}
|
||||
|
||||
section {
|
||||
max-width: 700px;
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
:root {
|
||||
--md-primary-fg-color: #1a73e8;
|
||||
--md-primary-fg-color--light: #5195ee;
|
||||
--md-primary-fg-color--dark: #0d47a1;
|
||||
}
|
||||
|
||||
.md-header {
|
||||
box-shadow: 0 0 0.2rem rgba(0,0,0,.1), 0 0.2rem 0.4rem rgba(0,0,0,.2);
|
||||
}
|
||||
|
||||
.md-main__inner {
|
||||
margin-top: 1.5rem;
|
||||
}
|
||||
|
||||
.md-typeset h1 {
|
||||
font-weight: 700;
|
||||
color: var(--md-primary-fg-color);
|
||||
}
|
||||
|
||||
.md-typeset .admonition {
|
||||
font-size: .8rem;
|
||||
}
|
||||
|
||||
code {
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
padding: .2em .4em;
|
||||
border-radius: 6px;
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"homeassistant-mcp": {
|
||||
"command": "bun",
|
||||
"args": [
|
||||
"run",
|
||||
"start",
|
||||
"--port",
|
||||
"8080"
|
||||
],
|
||||
"env": {
|
||||
"NODE_ENV": "production"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"homeassistant-mcp": {
|
||||
"command": "bun",
|
||||
"args": [
|
||||
"run",
|
||||
"start",
|
||||
"--enable-cline",
|
||||
"--config",
|
||||
"${configDir}/.env"
|
||||
],
|
||||
"env": {
|
||||
"NODE_ENV": "production",
|
||||
"CLINE_MODE": "true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
# Configuration
|
||||
|
||||
This section covers the configuration options available in the Home Assistant MCP Server.
|
||||
|
||||
## Overview
|
||||
|
||||
The MCP Server can be configured through various configuration files and environment variables. This section will guide you through the available options and their usage.
|
||||
|
||||
## Configuration Files
|
||||
|
||||
The main configuration files are:
|
||||
|
||||
1. `.env` - Environment variables
|
||||
2. `config.yaml` - Main configuration file
|
||||
3. `devices.yaml` - Device-specific configurations
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Key environment variables that can be set:
|
||||
|
||||
- `MCP_HOST` - Host address (default: 0.0.0.0)
|
||||
- `MCP_PORT` - Port number (default: 8123)
|
||||
- `MCP_LOG_LEVEL` - Logging level (default: INFO)
|
||||
- `MCP_CONFIG_DIR` - Configuration directory path
|
||||
|
||||
## Next Steps
|
||||
|
||||
- See [System Configuration](../configuration.md) for detailed configuration options
|
||||
- Check [Environment Setup](../getting-started/configuration.md) for initial setup
|
||||
- Review [Security](../security.md) for security-related configurations
|
||||
@@ -1,270 +0,0 @@
|
||||
# System Configuration
|
||||
|
||||
This document provides detailed information about configuring the Home Assistant MCP Server.
|
||||
|
||||
## Configuration File Structure
|
||||
|
||||
The MCP Server uses environment variables for configuration, with support for different environments (development, test, production):
|
||||
|
||||
```bash
|
||||
# .env, .env.development, or .env.test
|
||||
PORT=4000
|
||||
NODE_ENV=development
|
||||
HASS_HOST=http://192.168.178.63:8123
|
||||
HASS_TOKEN=your_token_here
|
||||
JWT_SECRET=your_secret_key
|
||||
```
|
||||
|
||||
## Server Settings
|
||||
|
||||
### Basic Server Configuration
|
||||
- `PORT`: Server port number (default: 4000)
|
||||
- `NODE_ENV`: Environment mode (development, production, test)
|
||||
- `HASS_HOST`: Home Assistant instance URL
|
||||
- `HASS_TOKEN`: Home Assistant long-lived access token
|
||||
|
||||
### Security Settings
|
||||
- `JWT_SECRET`: Secret key for JWT token generation
|
||||
- `RATE_LIMIT`: Rate limiting configuration
|
||||
- `windowMs`: Time window in milliseconds (default: 15 minutes)
|
||||
- `max`: Maximum requests per window (default: 100)
|
||||
|
||||
### WebSocket Settings
|
||||
- `SSE`: Server-Sent Events configuration
|
||||
- `MAX_CLIENTS`: Maximum concurrent clients (default: 1000)
|
||||
- `PING_INTERVAL`: Keep-alive ping interval in ms (default: 30000)
|
||||
|
||||
### Speech Features (Optional)
|
||||
- `ENABLE_SPEECH_FEATURES`: Enable speech processing features (default: false)
|
||||
- `ENABLE_WAKE_WORD`: Enable wake word detection (default: false)
|
||||
- `ENABLE_SPEECH_TO_TEXT`: Enable speech-to-text conversion (default: false)
|
||||
- `WHISPER_MODEL_PATH`: Path to Whisper models directory (default: /models)
|
||||
- `WHISPER_MODEL_TYPE`: Whisper model type (default: base)
|
||||
- Available models: tiny.en, base.en, small.en, medium.en, large-v2
|
||||
|
||||
## Environment Variables
|
||||
|
||||
All configuration is managed through environment variables:
|
||||
|
||||
```bash
|
||||
# Server
|
||||
PORT=4000
|
||||
NODE_ENV=development
|
||||
|
||||
# Home Assistant
|
||||
HASS_HOST=http://your-hass-instance:8123
|
||||
HASS_TOKEN=your_token_here
|
||||
|
||||
# Security
|
||||
JWT_SECRET=your-secret-key
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=info
|
||||
LOG_DIR=logs
|
||||
LOG_MAX_SIZE=20m
|
||||
LOG_MAX_DAYS=14d
|
||||
LOG_COMPRESS=true
|
||||
LOG_REQUESTS=true
|
||||
|
||||
# Speech Features (Optional)
|
||||
ENABLE_SPEECH_FEATURES=false
|
||||
ENABLE_WAKE_WORD=false
|
||||
ENABLE_SPEECH_TO_TEXT=false
|
||||
WHISPER_MODEL_PATH=/models
|
||||
WHISPER_MODEL_TYPE=base
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Security Rate Limiting
|
||||
Rate limiting is enabled by default to protect against brute force attacks:
|
||||
|
||||
```typescript
|
||||
RATE_LIMIT: {
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
max: 100 // limit each IP to 100 requests per window
|
||||
}
|
||||
```
|
||||
|
||||
### Logging
|
||||
The server uses Bun's built-in logging capabilities with additional configuration:
|
||||
|
||||
```typescript
|
||||
LOGGING: {
|
||||
LEVEL: "info", // debug, info, warn, error
|
||||
DIR: "logs",
|
||||
MAX_SIZE: "20m",
|
||||
MAX_DAYS: "14d",
|
||||
COMPRESS: true,
|
||||
TIMESTAMP_FORMAT: "YYYY-MM-DD HH:mm:ss:ms",
|
||||
LOG_REQUESTS: true
|
||||
}
|
||||
```
|
||||
|
||||
### Speech-to-Text Configuration
|
||||
When speech features are enabled, you can configure the following options:
|
||||
|
||||
```typescript
|
||||
SPEECH: {
|
||||
ENABLED: false, // Master switch for all speech features
|
||||
WAKE_WORD_ENABLED: false, // Enable wake word detection
|
||||
SPEECH_TO_TEXT_ENABLED: false, // Enable speech-to-text
|
||||
WHISPER_MODEL_PATH: "/models", // Path to Whisper models
|
||||
WHISPER_MODEL_TYPE: "base", // Model type to use
|
||||
}
|
||||
```
|
||||
|
||||
Available Whisper models:
|
||||
- `tiny.en`: Fastest, lowest accuracy
|
||||
- `base.en`: Good balance of speed and accuracy
|
||||
- `small.en`: Better accuracy, slower
|
||||
- `medium.en`: High accuracy, much slower
|
||||
- `large-v2`: Best accuracy, very slow
|
||||
|
||||
For production deployments, we recommend using system tools like `logrotate` for log management.
|
||||
|
||||
Example logrotate configuration (`/etc/logrotate.d/mcp-server`):
|
||||
```
|
||||
/var/log/mcp-server.log {
|
||||
daily
|
||||
rotate 7
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 644 mcp mcp
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Always use environment variables for sensitive information
|
||||
2. Keep .env files secure and never commit them to version control
|
||||
3. Use different environment files for development, test, and production
|
||||
4. Enable SSL/TLS in production (preferably via reverse proxy)
|
||||
5. Monitor log files for issues
|
||||
6. Regularly rotate logs in production
|
||||
7. Start with smaller Whisper models and upgrade if needed
|
||||
8. Consider GPU acceleration for larger Whisper models
|
||||
|
||||
## Validation
|
||||
|
||||
The server validates configuration on startup using Zod schemas:
|
||||
- Required fields are checked (e.g., HASS_TOKEN)
|
||||
- Value types are verified
|
||||
- Enums are validated (e.g., LOG_LEVEL, WHISPER_MODEL_TYPE)
|
||||
- Default values are applied when not specified
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Common configuration issues:
|
||||
1. Missing required environment variables
|
||||
2. Invalid environment variable values
|
||||
3. Permission issues with log directories
|
||||
4. Rate limiting too restrictive
|
||||
5. Speech model loading failures
|
||||
6. Docker not available for speech features
|
||||
7. Insufficient system resources for larger models
|
||||
|
||||
See the [Troubleshooting Guide](troubleshooting.md) for solutions.
|
||||
|
||||
# Configuration Guide
|
||||
|
||||
This document describes all available configuration options for the Home Assistant MCP Server.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Required Settings
|
||||
|
||||
```bash
|
||||
# Server Configuration
|
||||
PORT=3000 # Server port
|
||||
HOST=localhost # Server host
|
||||
|
||||
# Home Assistant
|
||||
HASS_URL=http://localhost:8123 # Home Assistant URL
|
||||
HASS_TOKEN=your_token # Long-lived access token
|
||||
|
||||
# Security
|
||||
JWT_SECRET=your_secret # JWT signing secret
|
||||
```
|
||||
|
||||
### Optional Settings
|
||||
|
||||
```bash
|
||||
# Rate Limiting
|
||||
RATE_LIMIT_WINDOW=60000 # Time window in ms (default: 60000)
|
||||
RATE_LIMIT_MAX=100 # Max requests per window (default: 100)
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=info # debug, info, warn, error (default: info)
|
||||
LOG_DIR=logs # Log directory (default: logs)
|
||||
LOG_MAX_SIZE=10m # Max log file size (default: 10m)
|
||||
LOG_MAX_FILES=5 # Max number of log files (default: 5)
|
||||
|
||||
# WebSocket/SSE
|
||||
WS_HEARTBEAT=30000 # WebSocket heartbeat interval in ms (default: 30000)
|
||||
SSE_RETRY=3000 # SSE retry interval in ms (default: 3000)
|
||||
|
||||
# Speech Features
|
||||
ENABLE_SPEECH_FEATURES=false # Enable speech processing (default: false)
|
||||
ENABLE_WAKE_WORD=false # Enable wake word detection (default: false)
|
||||
ENABLE_SPEECH_TO_TEXT=false # Enable speech-to-text (default: false)
|
||||
|
||||
# Speech Model Configuration
|
||||
WHISPER_MODEL_PATH=/models # Path to whisper models (default: /models)
|
||||
WHISPER_MODEL_TYPE=base # Model type: tiny|base|small|medium|large-v2 (default: base)
|
||||
WHISPER_LANGUAGE=en # Primary language (default: en)
|
||||
WHISPER_TASK=transcribe # Task type: transcribe|translate (default: transcribe)
|
||||
WHISPER_DEVICE=cuda # Processing device: cpu|cuda (default: cuda if available, else cpu)
|
||||
|
||||
# Wake Word Configuration
|
||||
WAKE_WORDS=hey jarvis,ok google,alexa # Comma-separated wake words (default: hey jarvis)
|
||||
WAKE_WORD_SENSITIVITY=0.5 # Detection sensitivity 0-1 (default: 0.5)
|
||||
```
|
||||
|
||||
## Speech Features
|
||||
|
||||
### Model Selection
|
||||
|
||||
Choose a model based on your needs:
|
||||
|
||||
| Model | Size | Memory Required | Speed | Accuracy |
|
||||
|------------|-------|-----------------|-------|----------|
|
||||
| tiny.en | 75MB | 1GB | Fast | Basic |
|
||||
| base.en | 150MB | 2GB | Good | Good |
|
||||
| small.en | 500MB | 4GB | Med | Better |
|
||||
| medium.en | 1.5GB | 8GB | Slow | High |
|
||||
| large-v2 | 3GB | 16GB | Slow | Best |
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
When `WHISPER_DEVICE=cuda`:
|
||||
- NVIDIA GPU with CUDA support required
|
||||
- Significantly faster processing
|
||||
- Higher memory requirements
|
||||
|
||||
### Wake Word Detection
|
||||
|
||||
- Multiple wake words supported via comma-separated list
|
||||
- Adjustable sensitivity (0-1):
|
||||
- Lower values: Fewer false positives, may miss some triggers
|
||||
- Higher values: More responsive, may have false triggers
|
||||
- Default (0.5): Balanced detection
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. Model Selection:
|
||||
- Start with `base.en` model
|
||||
- Upgrade if better accuracy needed
|
||||
- Downgrade if performance issues
|
||||
|
||||
2. Resource Management:
|
||||
- Monitor memory usage
|
||||
- Use GPU acceleration when available
|
||||
- Consider model size vs available resources
|
||||
|
||||
3. Wake Word Configuration:
|
||||
- Use distinct wake words
|
||||
- Adjust sensitivity based on environment
|
||||
- Limit number of wake words for better performance
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Contributing
|
||||
nav_order: 5
|
||||
---
|
||||
|
||||
# Contributing Guide 🤝
|
||||
|
||||
Thank you for your interest in contributing to the MCP Server project!
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Bun](https://bun.sh) >= 1.0.26
|
||||
- Home Assistant instance
|
||||
- Basic understanding of TypeScript
|
||||
|
||||
### Development Setup
|
||||
|
||||
1. Fork the repository
|
||||
2. Clone your fork:
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/homeassistant-mcp.git
|
||||
cd homeassistant-mcp
|
||||
```
|
||||
|
||||
3. Install dependencies:
|
||||
```bash
|
||||
bun install
|
||||
```
|
||||
|
||||
4. Configure environment:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your Home Assistant details
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Branch Naming
|
||||
|
||||
- `feature/` - New features
|
||||
- `fix/` - Bug fixes
|
||||
- `docs/` - Documentation updates
|
||||
|
||||
Example:
|
||||
```bash
|
||||
git checkout -b feature/device-control-improvements
|
||||
```
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Follow simple, clear commit messages:
|
||||
|
||||
```
|
||||
type: brief description
|
||||
|
||||
[optional detailed explanation]
|
||||
```
|
||||
|
||||
Types:
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `docs:` - Documentation
|
||||
- `chore:` - Maintenance
|
||||
|
||||
### Code Style
|
||||
|
||||
- Use TypeScript
|
||||
- Follow existing code structure
|
||||
- Keep changes focused and minimal
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests before submitting:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
bun test
|
||||
|
||||
# Run specific test
|
||||
bun test test/api/control.test.ts
|
||||
```
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. Ensure tests pass
|
||||
2. Update documentation if needed
|
||||
3. Provide clear description of changes
|
||||
|
||||
### PR Template
|
||||
|
||||
```markdown
|
||||
## Description
|
||||
Brief explanation of the changes
|
||||
|
||||
## Type of Change
|
||||
- [ ] Bug fix
|
||||
- [ ] New feature
|
||||
- [ ] Documentation update
|
||||
|
||||
## Testing
|
||||
Describe how you tested these changes
|
||||
```
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
- Use GitHub Issues
|
||||
- Provide clear, reproducible steps
|
||||
- Include environment details
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
- Be respectful
|
||||
- Focus on constructive feedback
|
||||
- Help maintain a positive environment
|
||||
|
||||
## Resources
|
||||
|
||||
- [API Documentation](api.md)
|
||||
- [Troubleshooting Guide](troubleshooting.md)
|
||||
|
||||
*Thank you for contributing!*
|
||||
@@ -1,141 +0,0 @@
|
||||
# Deployment Guide
|
||||
|
||||
This documentation is automatically deployed to GitHub Pages using GitHub Actions. Here's how it works and how to manage deployments.
|
||||
|
||||
## Automatic Deployment
|
||||
|
||||
The documentation is automatically deployed when changes are pushed to the `main` or `master` branch. The deployment process:
|
||||
|
||||
1. Triggers on push to main/master
|
||||
2. Sets up Python environment
|
||||
3. Installs required dependencies
|
||||
4. Builds the documentation
|
||||
5. Deploys to the `gh-pages` branch
|
||||
|
||||
### GitHub Actions Workflow
|
||||
|
||||
The deployment is handled by the workflow in `.github/workflows/deploy-docs.yml`. This is the single source of truth for documentation deployment:
|
||||
|
||||
```yaml
|
||||
name: Deploy MkDocs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
workflow_dispatch: # Allow manual trigger
|
||||
```
|
||||
|
||||
## Manual Deployment
|
||||
|
||||
If needed, you can deploy manually using:
|
||||
|
||||
```bash
|
||||
# Create a virtual environment
|
||||
python -m venv venv
|
||||
|
||||
# Activate the virtual environment
|
||||
source venv/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
pip install -r docs/requirements.txt
|
||||
|
||||
# Build the documentation
|
||||
mkdocs build
|
||||
|
||||
# Deploy to GitHub Pages
|
||||
mkdocs gh-deploy --force
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Documentation Updates
|
||||
- Test locally before pushing: `mkdocs serve`
|
||||
- Verify all links work
|
||||
- Ensure images are optimized
|
||||
- Check mobile responsiveness
|
||||
|
||||
### 2. Version Control
|
||||
- Keep documentation in sync with code versions
|
||||
- Use meaningful commit messages
|
||||
- Tag important documentation versions
|
||||
|
||||
### 3. Content Guidelines
|
||||
- Use consistent formatting
|
||||
- Keep navigation structure logical
|
||||
- Include examples where appropriate
|
||||
- Maintain up-to-date screenshots
|
||||
|
||||
### 4. Maintenance
|
||||
- Regularly review and update content
|
||||
- Check for broken links
|
||||
- Update dependencies
|
||||
- Monitor GitHub Actions logs
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Failed Deployments**
|
||||
- Check GitHub Actions logs
|
||||
- Verify dependencies are up to date
|
||||
- Ensure all required files exist
|
||||
|
||||
2. **Broken Links**
|
||||
- Run `mkdocs build --strict`
|
||||
- Use relative paths in markdown
|
||||
- Check case sensitivity
|
||||
|
||||
3. **Style Issues**
|
||||
- Verify theme configuration
|
||||
- Check CSS customizations
|
||||
- Test on multiple browsers
|
||||
|
||||
## Configuration Files
|
||||
|
||||
### requirements.txt
|
||||
|
||||
Create a requirements file for documentation dependencies:
|
||||
|
||||
```txt
|
||||
mkdocs-material
|
||||
mkdocs-minify-plugin
|
||||
mkdocs-git-revision-date-plugin
|
||||
mkdocs-mkdocstrings
|
||||
mkdocs-social-plugin
|
||||
mkdocs-redirects
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
- Check [GitHub Pages settings](https://github.com/jango-blockchained/advanced-homeassistant-mcp/settings/pages)
|
||||
- Monitor build status in Actions tab
|
||||
- Verify site accessibility
|
||||
|
||||
## Workflow Features
|
||||
|
||||
### Caching
|
||||
The workflow implements caching for Python dependencies to speed up deployments:
|
||||
- Pip cache for Python packages
|
||||
- MkDocs dependencies cache
|
||||
|
||||
### Deployment Checks
|
||||
Several checks are performed during deployment:
|
||||
1. Link validation with `mkdocs build --strict`
|
||||
2. Build verification
|
||||
3. Post-deployment site accessibility check
|
||||
|
||||
### Manual Triggers
|
||||
You can manually trigger deployments using the "workflow_dispatch" event in GitHub Actions.
|
||||
|
||||
## Cleanup
|
||||
|
||||
To clean up duplicate workflow files, run:
|
||||
|
||||
```bash
|
||||
# Make the script executable
|
||||
chmod +x scripts/cleanup-workflows.sh
|
||||
|
||||
# Run the cleanup script
|
||||
./scripts/cleanup-workflows.sh
|
||||
```
|
||||
@@ -1,310 +0,0 @@
|
||||
# Development Best Practices
|
||||
|
||||
This guide outlines the best practices for developing tools and features for the Home Assistant MCP.
|
||||
|
||||
## Code Style
|
||||
|
||||
### TypeScript
|
||||
|
||||
1. Use TypeScript for all new code
|
||||
2. Enable strict mode
|
||||
3. Use explicit types
|
||||
4. Avoid `any` type
|
||||
5. Use interfaces over types
|
||||
6. Document with JSDoc comments
|
||||
|
||||
```typescript
|
||||
/**
|
||||
* Represents a device in the system.
|
||||
* @interface
|
||||
*/
|
||||
interface Device {
|
||||
/** Unique device identifier */
|
||||
id: string;
|
||||
|
||||
/** Human-readable device name */
|
||||
name: string;
|
||||
|
||||
/** Device state */
|
||||
state: DeviceState;
|
||||
}
|
||||
```
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
1. Use PascalCase for:
|
||||
- Classes
|
||||
- Interfaces
|
||||
- Types
|
||||
- Enums
|
||||
|
||||
2. Use camelCase for:
|
||||
- Variables
|
||||
- Functions
|
||||
- Methods
|
||||
- Properties
|
||||
|
||||
3. Use UPPER_SNAKE_CASE for:
|
||||
- Constants
|
||||
- Enum values
|
||||
|
||||
```typescript
|
||||
class DeviceManager {
|
||||
private readonly DEFAULT_TIMEOUT = 5000;
|
||||
|
||||
async getDeviceState(deviceId: string): Promise<DeviceState> {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### SOLID Principles
|
||||
|
||||
1. Single Responsibility
|
||||
- Each class/module has one job
|
||||
- Split complex functionality
|
||||
|
||||
2. Open/Closed
|
||||
- Open for extension
|
||||
- Closed for modification
|
||||
|
||||
3. Liskov Substitution
|
||||
- Subtypes must be substitutable
|
||||
- Use interfaces properly
|
||||
|
||||
4. Interface Segregation
|
||||
- Keep interfaces focused
|
||||
- Split large interfaces
|
||||
|
||||
5. Dependency Inversion
|
||||
- Depend on abstractions
|
||||
- Use dependency injection
|
||||
|
||||
### Example
|
||||
|
||||
```typescript
|
||||
// Bad
|
||||
class DeviceManager {
|
||||
async getState() { /* ... */ }
|
||||
async setState() { /* ... */ }
|
||||
async sendNotification() { /* ... */ } // Wrong responsibility
|
||||
}
|
||||
|
||||
// Good
|
||||
class DeviceManager {
|
||||
constructor(
|
||||
private notifier: NotificationService
|
||||
) {}
|
||||
|
||||
async getState() { /* ... */ }
|
||||
async setState() { /* ... */ }
|
||||
}
|
||||
|
||||
class NotificationService {
|
||||
async send() { /* ... */ }
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. Use custom error classes
|
||||
2. Include error codes
|
||||
3. Provide meaningful messages
|
||||
4. Include error context
|
||||
5. Handle async errors
|
||||
6. Log appropriately
|
||||
|
||||
```typescript
|
||||
class DeviceError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
public code: string,
|
||||
public context: Record<string, any>
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'DeviceError';
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await device.connect();
|
||||
} catch (error) {
|
||||
throw new DeviceError(
|
||||
'Failed to connect to device',
|
||||
'DEVICE_CONNECTION_ERROR',
|
||||
{ deviceId: device.id, attempt: 1 }
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Guidelines
|
||||
|
||||
1. Write unit tests first
|
||||
2. Use meaningful descriptions
|
||||
3. Test edge cases
|
||||
4. Mock external dependencies
|
||||
5. Keep tests focused
|
||||
6. Use test fixtures
|
||||
|
||||
```typescript
|
||||
describe('DeviceManager', () => {
|
||||
let manager: DeviceManager;
|
||||
let mockDevice: jest.Mocked<Device>;
|
||||
|
||||
beforeEach(() => {
|
||||
mockDevice = {
|
||||
id: 'test_device',
|
||||
getState: jest.fn()
|
||||
};
|
||||
manager = new DeviceManager(mockDevice);
|
||||
});
|
||||
|
||||
it('should get device state', async () => {
|
||||
mockDevice.getState.mockResolvedValue('on');
|
||||
const state = await manager.getDeviceState();
|
||||
expect(state).toBe('on');
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
### Optimization
|
||||
|
||||
1. Use caching
|
||||
2. Implement pagination
|
||||
3. Optimize database queries
|
||||
4. Use connection pooling
|
||||
5. Implement rate limiting
|
||||
6. Batch operations
|
||||
|
||||
```typescript
|
||||
class DeviceCache {
|
||||
private cache = new Map<string, CacheEntry>();
|
||||
private readonly TTL = 60000; // 1 minute
|
||||
|
||||
async getDevice(id: string): Promise<Device> {
|
||||
const cached = this.cache.get(id);
|
||||
if (cached && Date.now() - cached.timestamp < this.TTL) {
|
||||
return cached.device;
|
||||
}
|
||||
|
||||
const device = await this.fetchDevice(id);
|
||||
this.cache.set(id, {
|
||||
device,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
return device;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security
|
||||
|
||||
### Guidelines
|
||||
|
||||
1. Validate all input
|
||||
2. Use parameterized queries
|
||||
3. Implement rate limiting
|
||||
4. Use proper authentication
|
||||
5. Follow OWASP guidelines
|
||||
6. Sanitize output
|
||||
|
||||
```typescript
|
||||
class InputValidator {
|
||||
static validateDeviceId(id: string): boolean {
|
||||
return /^[a-zA-Z0-9_-]{1,64}$/.test(id);
|
||||
}
|
||||
|
||||
static sanitizeOutput(data: any): any {
|
||||
// Implement output sanitization
|
||||
return data;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
### Standards
|
||||
|
||||
1. Use JSDoc comments
|
||||
2. Document interfaces
|
||||
3. Include examples
|
||||
4. Document errors
|
||||
5. Keep docs updated
|
||||
6. Use markdown
|
||||
|
||||
```typescript
|
||||
/**
|
||||
* Manages device operations.
|
||||
* @class
|
||||
*/
|
||||
class DeviceManager {
|
||||
/**
|
||||
* Gets the current state of a device.
|
||||
* @param {string} deviceId - The device identifier.
|
||||
* @returns {Promise<DeviceState>} The current device state.
|
||||
* @throws {DeviceError} If device is not found or unavailable.
|
||||
* @example
|
||||
* const state = await deviceManager.getDeviceState('living_room_light');
|
||||
*/
|
||||
async getDeviceState(deviceId: string): Promise<DeviceState> {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Logging
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. Use appropriate levels
|
||||
2. Include context
|
||||
3. Structure log data
|
||||
4. Handle sensitive data
|
||||
5. Implement rotation
|
||||
6. Use correlation IDs
|
||||
|
||||
```typescript
|
||||
class Logger {
|
||||
info(message: string, context: Record<string, any>) {
|
||||
console.log(JSON.stringify({
|
||||
level: 'info',
|
||||
message,
|
||||
context,
|
||||
timestamp: new Date().toISOString(),
|
||||
correlationId: context.correlationId
|
||||
}));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Version Control
|
||||
|
||||
### Guidelines
|
||||
|
||||
1. Use meaningful commits
|
||||
2. Follow branching strategy
|
||||
3. Write good PR descriptions
|
||||
4. Review code thoroughly
|
||||
5. Keep changes focused
|
||||
6. Use conventional commits
|
||||
|
||||
```bash
|
||||
# Good commit messages
|
||||
git commit -m "feat(device): add support for zigbee devices"
|
||||
git commit -m "fix(api): handle timeout errors properly"
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Tool Development Guide](tools.md)
|
||||
- [Interface Documentation](interfaces.md)
|
||||
- [Testing Guide](../testing.md)
|
||||
@@ -1,197 +0,0 @@
|
||||
# Development Environment Setup
|
||||
|
||||
This guide will help you set up your development environment for the Home Assistant MCP Server.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Required Software
|
||||
- Python 3.10 or higher
|
||||
- pip (Python package manager)
|
||||
- git
|
||||
- Docker (optional, for containerized development)
|
||||
- Node.js 18+ (for frontend development)
|
||||
|
||||
### System Requirements
|
||||
- 4GB RAM minimum
|
||||
- 2 CPU cores minimum
|
||||
- 10GB free disk space
|
||||
|
||||
## Initial Setup
|
||||
|
||||
1. Clone the Repository
|
||||
```bash
|
||||
git clone https://github.com/jango-blockchained/homeassistant-mcp.git
|
||||
cd homeassistant-mcp
|
||||
```
|
||||
|
||||
2. Create Virtual Environment
|
||||
```bash
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate # Linux/macOS
|
||||
# or
|
||||
.venv\Scripts\activate # Windows
|
||||
```
|
||||
|
||||
3. Install Dependencies
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install -r docs/requirements.txt # for documentation
|
||||
```
|
||||
|
||||
## Development Tools
|
||||
|
||||
### Code Editor Setup
|
||||
We recommend using Visual Studio Code with these extensions:
|
||||
- Python
|
||||
- Docker
|
||||
- YAML
|
||||
- ESLint
|
||||
- Prettier
|
||||
|
||||
### VS Code Settings
|
||||
```json
|
||||
{
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.formatting.provider": "black",
|
||||
"editor.formatOnSave": true
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
1. Create Local Config
|
||||
```bash
|
||||
cp config.example.yaml config.yaml
|
||||
```
|
||||
|
||||
2. Set Environment Variables
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your settings
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Unit Tests
|
||||
```bash
|
||||
pytest tests/unit
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
```bash
|
||||
pytest tests/integration
|
||||
```
|
||||
|
||||
### Coverage Report
|
||||
```bash
|
||||
pytest --cov=src tests/
|
||||
```
|
||||
|
||||
## Docker Development
|
||||
|
||||
### Build Container
|
||||
```bash
|
||||
docker build -t mcp-server-dev -f Dockerfile.dev .
|
||||
```
|
||||
|
||||
### Run Development Container
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
-v $(pwd):/app \
|
||||
-p 8123:8123 \
|
||||
mcp-server-dev
|
||||
```
|
||||
|
||||
## Database Setup
|
||||
|
||||
### Local Development Database
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 5432:5432 \
|
||||
-e POSTGRES_USER=mcp \
|
||||
-e POSTGRES_PASSWORD=development \
|
||||
-e POSTGRES_DB=mcp_dev \
|
||||
postgres:14
|
||||
```
|
||||
|
||||
### Run Migrations
|
||||
```bash
|
||||
alembic upgrade head
|
||||
```
|
||||
|
||||
## Frontend Development
|
||||
|
||||
1. Install Node.js Dependencies
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
```
|
||||
|
||||
2. Start Development Server
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
### Build Documentation
|
||||
```bash
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
### View Documentation
|
||||
Open http://localhost:8000 in your browser
|
||||
|
||||
## Debugging
|
||||
|
||||
### VS Code Launch Configuration
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python: MCP Server",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "src/main.py",
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Git Hooks
|
||||
|
||||
### Install Pre-commit
|
||||
```bash
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
### Available Hooks
|
||||
- black (code formatting)
|
||||
- flake8 (linting)
|
||||
- isort (import sorting)
|
||||
- mypy (type checking)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Common Issues:
|
||||
1. Port already in use
|
||||
- Check for running processes: `lsof -i :8123`
|
||||
- Kill process if needed: `kill -9 PID`
|
||||
|
||||
2. Database connection issues
|
||||
- Verify PostgreSQL is running
|
||||
- Check connection settings in .env
|
||||
|
||||
3. Virtual environment problems
|
||||
- Delete and recreate: `rm -rf .venv && python -m venv .venv`
|
||||
- Reinstall dependencies
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Review the [Architecture Guide](../architecture.md)
|
||||
2. Check [Contributing Guidelines](../contributing.md)
|
||||
3. Start with [Simple Issues](https://github.com/jango-blockchained/homeassistant-mcp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
@@ -1,54 +0,0 @@
|
||||
# Development Guide
|
||||
|
||||
Welcome to the development guide for the Home Assistant MCP Server. This section provides comprehensive information for developers who want to contribute to or extend the project.
|
||||
|
||||
## Development Overview
|
||||
|
||||
The MCP Server is built with modern development practices in mind, focusing on:
|
||||
|
||||
- Clean, maintainable code
|
||||
- Comprehensive testing
|
||||
- Clear documentation
|
||||
- Modular architecture
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Set up your development environment
|
||||
2. Fork the repository
|
||||
3. Install dependencies
|
||||
4. Run tests
|
||||
5. Make your changes
|
||||
6. Submit a pull request
|
||||
|
||||
## Development Topics
|
||||
|
||||
- [Architecture](../architecture.md) - System architecture and design
|
||||
- [Contributing](../contributing.md) - Contribution guidelines
|
||||
- [Testing](../testing.md) - Testing framework and guidelines
|
||||
- [Troubleshooting](../troubleshooting.md) - Common issues and solutions
|
||||
- [Deployment](../deployment.md) - Deployment procedures
|
||||
- [Roadmap](../roadmap.md) - Future development plans
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Follow the coding style guide
|
||||
- Write comprehensive tests
|
||||
- Document your changes
|
||||
- Keep commits atomic
|
||||
- Use meaningful commit messages
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. Create a feature branch
|
||||
2. Make your changes
|
||||
3. Run tests
|
||||
4. Update documentation
|
||||
5. Submit a pull request
|
||||
6. Address review comments
|
||||
7. Merge when approved
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Review the [Architecture](../architecture.md)
|
||||
- Check [Contributing Guidelines](../contributing.md)
|
||||
- Set up your [Development Environment](environment.md)
|
||||
@@ -1,296 +0,0 @@
|
||||
# Interface Documentation
|
||||
|
||||
This document describes the core interfaces used throughout the Home Assistant MCP.
|
||||
|
||||
## Core Interfaces
|
||||
|
||||
### Tool Interface
|
||||
|
||||
```typescript
|
||||
interface Tool {
|
||||
/** Unique identifier for the tool */
|
||||
id: string;
|
||||
|
||||
/** Human-readable name */
|
||||
name: string;
|
||||
|
||||
/** Detailed description */
|
||||
description: string;
|
||||
|
||||
/** Semantic version */
|
||||
version: string;
|
||||
|
||||
/** Tool category */
|
||||
category: ToolCategory;
|
||||
|
||||
/** Execute tool functionality */
|
||||
execute(params: any): Promise<ToolResult>;
|
||||
}
|
||||
```
|
||||
|
||||
### Tool Result
|
||||
|
||||
```typescript
|
||||
interface ToolResult {
|
||||
/** Operation success status */
|
||||
success: boolean;
|
||||
|
||||
/** Response data */
|
||||
data?: any;
|
||||
|
||||
/** Error message if failed */
|
||||
message?: string;
|
||||
|
||||
/** Error code if failed */
|
||||
error_code?: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Tool Category
|
||||
|
||||
```typescript
|
||||
enum ToolCategory {
|
||||
DeviceManagement = 'device_management',
|
||||
HistoryState = 'history_state',
|
||||
Automation = 'automation',
|
||||
AddonsPackages = 'addons_packages',
|
||||
Notifications = 'notifications',
|
||||
Events = 'events',
|
||||
Utility = 'utility'
|
||||
}
|
||||
```
|
||||
|
||||
## Event Interfaces
|
||||
|
||||
### Event Subscription
|
||||
|
||||
```typescript
|
||||
interface EventSubscription {
|
||||
/** Unique subscription ID */
|
||||
id: string;
|
||||
|
||||
/** Event type to subscribe to */
|
||||
event_type: string;
|
||||
|
||||
/** Optional entity ID filter */
|
||||
entity_id?: string;
|
||||
|
||||
/** Optional domain filter */
|
||||
domain?: string;
|
||||
|
||||
/** Subscription creation timestamp */
|
||||
created_at: string;
|
||||
|
||||
/** Last event timestamp */
|
||||
last_event?: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Event Message
|
||||
|
||||
```typescript
|
||||
interface EventMessage {
|
||||
/** Event type */
|
||||
event_type: string;
|
||||
|
||||
/** Entity ID if applicable */
|
||||
entity_id?: string;
|
||||
|
||||
/** Event data */
|
||||
data: any;
|
||||
|
||||
/** Event origin */
|
||||
origin: 'LOCAL' | 'REMOTE';
|
||||
|
||||
/** Event timestamp */
|
||||
time_fired: string;
|
||||
|
||||
/** Event context */
|
||||
context: EventContext;
|
||||
}
|
||||
```
|
||||
|
||||
## Device Interfaces
|
||||
|
||||
### Device
|
||||
|
||||
```typescript
|
||||
interface Device {
|
||||
/** Device ID */
|
||||
id: string;
|
||||
|
||||
/** Device name */
|
||||
name: string;
|
||||
|
||||
/** Device domain */
|
||||
domain: string;
|
||||
|
||||
/** Current state */
|
||||
state: string;
|
||||
|
||||
/** Device attributes */
|
||||
attributes: Record<string, any>;
|
||||
|
||||
/** Device capabilities */
|
||||
capabilities: DeviceCapabilities;
|
||||
}
|
||||
```
|
||||
|
||||
### Device Capabilities
|
||||
|
||||
```typescript
|
||||
interface DeviceCapabilities {
|
||||
/** Supported features */
|
||||
features: string[];
|
||||
|
||||
/** Supported commands */
|
||||
commands: string[];
|
||||
|
||||
/** State attributes */
|
||||
attributes: {
|
||||
/** Attribute name */
|
||||
[key: string]: {
|
||||
/** Attribute type */
|
||||
type: 'string' | 'number' | 'boolean' | 'object';
|
||||
/** Attribute description */
|
||||
description: string;
|
||||
/** Optional value constraints */
|
||||
constraints?: {
|
||||
min?: number;
|
||||
max?: number;
|
||||
enum?: any[];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Authentication Interfaces
|
||||
|
||||
### Auth Token
|
||||
|
||||
```typescript
|
||||
interface AuthToken {
|
||||
/** Token value */
|
||||
token: string;
|
||||
|
||||
/** Token type */
|
||||
type: 'bearer' | 'jwt';
|
||||
|
||||
/** Expiration timestamp */
|
||||
expires_at: string;
|
||||
|
||||
/** Token refresh info */
|
||||
refresh?: {
|
||||
token: string;
|
||||
expires_at: string;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### User
|
||||
|
||||
```typescript
|
||||
interface User {
|
||||
/** User ID */
|
||||
id: string;
|
||||
|
||||
/** Username */
|
||||
username: string;
|
||||
|
||||
/** User type */
|
||||
type: 'admin' | 'user' | 'service';
|
||||
|
||||
/** User permissions */
|
||||
permissions: string[];
|
||||
}
|
||||
```
|
||||
|
||||
## Error Interfaces
|
||||
|
||||
### Tool Error
|
||||
|
||||
```typescript
|
||||
interface ToolError extends Error {
|
||||
/** Error code */
|
||||
code: string;
|
||||
|
||||
/** HTTP status code */
|
||||
status: number;
|
||||
|
||||
/** Error details */
|
||||
details?: Record<string, any>;
|
||||
}
|
||||
```
|
||||
|
||||
### Validation Error
|
||||
|
||||
```typescript
|
||||
interface ValidationError {
|
||||
/** Error path */
|
||||
path: string;
|
||||
|
||||
/** Error message */
|
||||
message: string;
|
||||
|
||||
/** Error code */
|
||||
code: string;
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Interfaces
|
||||
|
||||
### Tool Configuration
|
||||
|
||||
```typescript
|
||||
interface ToolConfig {
|
||||
/** Enable/disable tool */
|
||||
enabled: boolean;
|
||||
|
||||
/** Tool-specific settings */
|
||||
settings: Record<string, any>;
|
||||
|
||||
/** Rate limiting */
|
||||
rate_limit?: {
|
||||
/** Max requests */
|
||||
max: number;
|
||||
/** Time window in seconds */
|
||||
window: number;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### System Configuration
|
||||
|
||||
```typescript
|
||||
interface SystemConfig {
|
||||
/** System name */
|
||||
name: string;
|
||||
|
||||
/** Environment */
|
||||
environment: 'development' | 'production';
|
||||
|
||||
/** Log level */
|
||||
log_level: 'debug' | 'info' | 'warn' | 'error';
|
||||
|
||||
/** Tool configurations */
|
||||
tools: Record<string, ToolConfig>;
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Use TypeScript for all interfaces
|
||||
2. Include JSDoc comments
|
||||
3. Use strict typing
|
||||
4. Keep interfaces focused
|
||||
5. Use consistent naming
|
||||
6. Document constraints
|
||||
7. Version interfaces
|
||||
8. Include examples
|
||||
|
||||
## See Also
|
||||
|
||||
- [Tool Development Guide](tools.md)
|
||||
- [Best Practices](best-practices.md)
|
||||
- [Testing Guide](../testing.md)
|
||||
@@ -1,323 +0,0 @@
|
||||
# Migrating Tests from Jest to Bun
|
||||
|
||||
This guide provides instructions for migrating test files from Jest to Bun's test framework.
|
||||
|
||||
## Table of Contents
|
||||
- [Basic Setup](#basic-setup)
|
||||
- [Import Changes](#import-changes)
|
||||
- [API Changes](#api-changes)
|
||||
- [Mocking](#mocking)
|
||||
- [Common Patterns](#common-patterns)
|
||||
- [Examples](#examples)
|
||||
|
||||
## Basic Setup
|
||||
|
||||
1. Remove Jest-related dependencies from `package.json`:
|
||||
```json
|
||||
{
|
||||
"devDependencies": {
|
||||
"@jest/globals": "...",
|
||||
"jest": "...",
|
||||
"ts-jest": "..."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. Remove Jest configuration files:
|
||||
- `jest.config.js`
|
||||
- `jest.setup.js`
|
||||
|
||||
3. Update test scripts in `package.json`:
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"test": "bun test",
|
||||
"test:watch": "bun test --watch",
|
||||
"test:coverage": "bun test --coverage"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Import Changes
|
||||
|
||||
### Before (Jest):
|
||||
```typescript
|
||||
import { jest, describe, it, expect, beforeEach, afterEach } from '@jest/globals';
|
||||
```
|
||||
|
||||
### After (Bun):
|
||||
```typescript
|
||||
import { describe, expect, test, beforeEach, afterEach, mock } from "bun:test";
|
||||
import type { Mock } from "bun:test";
|
||||
```
|
||||
|
||||
Note: `it` is replaced with `test` in Bun.
|
||||
|
||||
## API Changes
|
||||
|
||||
### Test Structure
|
||||
```typescript
|
||||
// Jest
|
||||
describe('Suite', () => {
|
||||
it('should do something', () => {
|
||||
// test
|
||||
});
|
||||
});
|
||||
|
||||
// Bun
|
||||
describe('Suite', () => {
|
||||
test('should do something', () => {
|
||||
// test
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Assertions
|
||||
Most Jest assertions work the same in Bun:
|
||||
|
||||
```typescript
|
||||
// These work the same in both:
|
||||
expect(value).toBe(expected);
|
||||
expect(value).toEqual(expected);
|
||||
expect(value).toBeDefined();
|
||||
expect(value).toBeUndefined();
|
||||
expect(value).toBeTruthy();
|
||||
expect(value).toBeFalsy();
|
||||
expect(array).toContain(item);
|
||||
expect(value).toBeInstanceOf(Class);
|
||||
expect(spy).toHaveBeenCalled();
|
||||
expect(spy).toHaveBeenCalledWith(...args);
|
||||
```
|
||||
|
||||
## Mocking
|
||||
|
||||
### Function Mocking
|
||||
|
||||
#### Before (Jest):
|
||||
```typescript
|
||||
const mockFn = jest.fn();
|
||||
mockFn.mockImplementation(() => 'result');
|
||||
mockFn.mockResolvedValue('result');
|
||||
mockFn.mockRejectedValue(new Error());
|
||||
```
|
||||
|
||||
#### After (Bun):
|
||||
```typescript
|
||||
const mockFn = mock(() => 'result');
|
||||
const mockAsyncFn = mock(() => Promise.resolve('result'));
|
||||
const mockErrorFn = mock(() => Promise.reject(new Error()));
|
||||
```
|
||||
|
||||
### Module Mocking
|
||||
|
||||
#### Before (Jest):
|
||||
```typescript
|
||||
jest.mock('module-name', () => ({
|
||||
default: jest.fn(),
|
||||
namedExport: jest.fn()
|
||||
}));
|
||||
```
|
||||
|
||||
#### After (Bun):
|
||||
```typescript
|
||||
// Option 1: Using vi.mock (if available)
|
||||
vi.mock('module-name', () => ({
|
||||
default: mock(() => {}),
|
||||
namedExport: mock(() => {})
|
||||
}));
|
||||
|
||||
// Option 2: Using dynamic imports
|
||||
const mockModule = {
|
||||
default: mock(() => {}),
|
||||
namedExport: mock(() => {})
|
||||
};
|
||||
```
|
||||
|
||||
### Mock Reset/Clear
|
||||
|
||||
#### Before (Jest):
|
||||
```typescript
|
||||
jest.clearAllMocks();
|
||||
mockFn.mockClear();
|
||||
jest.resetModules();
|
||||
```
|
||||
|
||||
#### After (Bun):
|
||||
```typescript
|
||||
mockFn.mockReset();
|
||||
// or for specific calls
|
||||
mockFn.mock.calls = [];
|
||||
```
|
||||
|
||||
### Spy on Methods
|
||||
|
||||
#### Before (Jest):
|
||||
```typescript
|
||||
jest.spyOn(object, 'method');
|
||||
```
|
||||
|
||||
#### After (Bun):
|
||||
```typescript
|
||||
const spy = mock(((...args) => object.method(...args)));
|
||||
object.method = spy;
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Async Tests
|
||||
```typescript
|
||||
// Works the same in both Jest and Bun:
|
||||
test('async test', async () => {
|
||||
const result = await someAsyncFunction();
|
||||
expect(result).toBe(expected);
|
||||
});
|
||||
```
|
||||
|
||||
### Setup and Teardown
|
||||
```typescript
|
||||
describe('Suite', () => {
|
||||
beforeEach(() => {
|
||||
// setup
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// cleanup
|
||||
});
|
||||
|
||||
test('test', () => {
|
||||
// test
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Mocking Fetch
|
||||
```typescript
|
||||
// Before (Jest)
|
||||
global.fetch = jest.fn(() => Promise.resolve(new Response()));
|
||||
|
||||
// After (Bun)
|
||||
const mockFetch = mock(() => Promise.resolve(new Response()));
|
||||
global.fetch = mockFetch as unknown as typeof fetch;
|
||||
```
|
||||
|
||||
### Mocking WebSocket
|
||||
```typescript
|
||||
// Create a MockWebSocket class implementing WebSocket interface
|
||||
class MockWebSocket implements WebSocket {
|
||||
public static readonly CONNECTING = 0;
|
||||
public static readonly OPEN = 1;
|
||||
public static readonly CLOSING = 2;
|
||||
public static readonly CLOSED = 3;
|
||||
|
||||
public readyState: 0 | 1 | 2 | 3 = MockWebSocket.OPEN;
|
||||
public addEventListener = mock(() => undefined);
|
||||
public removeEventListener = mock(() => undefined);
|
||||
public send = mock(() => undefined);
|
||||
public close = mock(() => undefined);
|
||||
// ... implement other required methods
|
||||
}
|
||||
|
||||
// Use it in tests
|
||||
global.WebSocket = MockWebSocket as unknown as typeof WebSocket;
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Test
|
||||
```typescript
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
describe('formatToolCall', () => {
|
||||
test('should format an object into the correct structure', () => {
|
||||
const testObj = { name: 'test', value: 123 };
|
||||
const result = formatToolCall(testObj);
|
||||
|
||||
expect(result).toEqual({
|
||||
content: [{
|
||||
type: 'text',
|
||||
text: JSON.stringify(testObj, null, 2),
|
||||
isError: false
|
||||
}]
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Async Test with Mocking
|
||||
```typescript
|
||||
import { describe, expect, test, mock } from "bun:test";
|
||||
|
||||
describe('API Client', () => {
|
||||
test('should fetch data', async () => {
|
||||
const mockResponse = { data: 'test' };
|
||||
const mockFetch = mock(() => Promise.resolve(new Response(
|
||||
JSON.stringify(mockResponse),
|
||||
{ status: 200, headers: new Headers() }
|
||||
)));
|
||||
global.fetch = mockFetch as unknown as typeof fetch;
|
||||
|
||||
const result = await apiClient.getData();
|
||||
expect(result).toEqual(mockResponse);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Complex Mocking Example
|
||||
```typescript
|
||||
import { describe, expect, test, mock } from "bun:test";
|
||||
import type { Mock } from "bun:test";
|
||||
|
||||
interface MockServices {
|
||||
light: {
|
||||
turn_on: Mock<() => Promise<{ success: boolean }>>;
|
||||
turn_off: Mock<() => Promise<{ success: boolean }>>;
|
||||
};
|
||||
}
|
||||
|
||||
const mockServices: MockServices = {
|
||||
light: {
|
||||
turn_on: mock(() => Promise.resolve({ success: true })),
|
||||
turn_off: mock(() => Promise.resolve({ success: true }))
|
||||
}
|
||||
};
|
||||
|
||||
describe('Home Assistant Service', () => {
|
||||
test('should control lights', async () => {
|
||||
const result = await mockServices.light.turn_on();
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Use TypeScript for better type safety in mocks
|
||||
2. Keep mocks as simple as possible
|
||||
3. Prefer interface-based mocks over concrete implementations
|
||||
4. Use proper type assertions when necessary
|
||||
5. Clean up mocks in `afterEach` blocks
|
||||
6. Use descriptive test names
|
||||
7. Group related tests using `describe` blocks
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### Issue: Type Errors with Mocks
|
||||
```typescript
|
||||
// Solution: Use proper typing with Mock type
|
||||
import type { Mock } from "bun:test";
|
||||
const mockFn: Mock<() => string> = mock(() => "result");
|
||||
```
|
||||
|
||||
### Issue: Global Object Mocking
|
||||
```typescript
|
||||
// Solution: Use type assertions carefully
|
||||
global.someGlobal = mockImplementation as unknown as typeof someGlobal;
|
||||
```
|
||||
|
||||
### Issue: Module Mocking
|
||||
```typescript
|
||||
// Solution: Use dynamic imports or vi.mock if available
|
||||
const mockModule = {
|
||||
default: mock(() => mockImplementation)
|
||||
};
|
||||
```
|
||||
@@ -1,226 +0,0 @@
|
||||
# Tool Development Guide
|
||||
|
||||
This guide explains how to create new tools for the Home Assistant MCP.
|
||||
|
||||
## Tool Structure
|
||||
|
||||
Each tool should follow this basic structure:
|
||||
|
||||
```typescript
|
||||
interface Tool {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
version: string;
|
||||
category: ToolCategory;
|
||||
execute(params: any): Promise<ToolResult>;
|
||||
}
|
||||
```
|
||||
|
||||
## Creating a New Tool
|
||||
|
||||
1. Create a new file in the appropriate category directory
|
||||
2. Implement the Tool interface
|
||||
3. Add API endpoints
|
||||
4. Add WebSocket handlers
|
||||
5. Add documentation
|
||||
6. Add tests
|
||||
|
||||
### Example Tool Implementation
|
||||
|
||||
```typescript
|
||||
import { Tool, ToolCategory, ToolResult } from '../interfaces';
|
||||
|
||||
export class MyCustomTool implements Tool {
|
||||
id = 'my_custom_tool';
|
||||
name = 'My Custom Tool';
|
||||
description = 'Description of what the tool does';
|
||||
version = '1.0.0';
|
||||
category = ToolCategory.Utility;
|
||||
|
||||
async execute(params: any): Promise<ToolResult> {
|
||||
// Tool implementation
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
// Tool-specific response data
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tool Categories
|
||||
|
||||
- Device Management
|
||||
- History & State
|
||||
- Automation
|
||||
- Add-ons & Packages
|
||||
- Notifications
|
||||
- Events
|
||||
- Utility
|
||||
|
||||
## API Integration
|
||||
|
||||
### REST Endpoint
|
||||
|
||||
```typescript
|
||||
import { Router } from 'express';
|
||||
import { MyCustomTool } from './my-custom-tool';
|
||||
|
||||
const router = Router();
|
||||
const tool = new MyCustomTool();
|
||||
|
||||
router.post('/api/tools/custom', async (req, res) => {
|
||||
try {
|
||||
const result = await tool.execute(req.body);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
message: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### WebSocket Handler
|
||||
|
||||
```typescript
|
||||
import { WebSocketServer } from 'ws';
|
||||
import { MyCustomTool } from './my-custom-tool';
|
||||
|
||||
const tool = new MyCustomTool();
|
||||
|
||||
wss.on('connection', (ws) => {
|
||||
ws.on('message', async (message) => {
|
||||
const { type, params } = JSON.parse(message);
|
||||
if (type === 'my_custom_tool') {
|
||||
const result = await tool.execute(params);
|
||||
ws.send(JSON.stringify(result));
|
||||
}
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```typescript
|
||||
class ToolError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
public code: string,
|
||||
public status: number = 500
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'ToolError';
|
||||
}
|
||||
}
|
||||
|
||||
// Usage in tool
|
||||
async execute(params: any): Promise<ToolResult> {
|
||||
try {
|
||||
// Tool implementation
|
||||
} catch (error) {
|
||||
throw new ToolError(
|
||||
'Operation failed',
|
||||
'TOOL_ERROR',
|
||||
500
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
```typescript
|
||||
import { MyCustomTool } from './my-custom-tool';
|
||||
|
||||
describe('MyCustomTool', () => {
|
||||
let tool: MyCustomTool;
|
||||
|
||||
beforeEach(() => {
|
||||
tool = new MyCustomTool();
|
||||
});
|
||||
|
||||
it('should execute successfully', async () => {
|
||||
const result = await tool.execute({
|
||||
// Test parameters
|
||||
});
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle errors', async () => {
|
||||
// Error test cases
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
1. Create tool documentation in `docs/tools/category/tool-name.md`
|
||||
2. Update `tools/tools.md` with tool reference
|
||||
3. Add tool to navigation in `mkdocs.yml`
|
||||
|
||||
### Documentation Template
|
||||
|
||||
```markdown
|
||||
# Tool Name
|
||||
|
||||
Description of the tool.
|
||||
|
||||
## Features
|
||||
|
||||
- Feature 1
|
||||
- Feature 2
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
// API endpoints
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// WebSocket usage
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1
|
||||
|
||||
```typescript
|
||||
// Usage example
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
// Response data structure
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Follow consistent naming conventions
|
||||
2. Implement proper error handling
|
||||
3. Add comprehensive documentation
|
||||
4. Write thorough tests
|
||||
5. Use TypeScript for type safety
|
||||
6. Follow SOLID principles
|
||||
7. Implement rate limiting
|
||||
8. Add proper logging
|
||||
|
||||
## See Also
|
||||
|
||||
- [Interface Documentation](interfaces.md)
|
||||
- [Best Practices](best-practices.md)
|
||||
- [Testing Guide](../testing.md)
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Examples
|
||||
nav_order: 7
|
||||
has_children: true
|
||||
---
|
||||
|
||||
# Example Projects 📚
|
||||
|
||||
This section contains examples and tutorials for common MCP Server integrations.
|
||||
|
||||
## Speech-to-Text Integration
|
||||
|
||||
Example of integrating speech recognition with MCP Server:
|
||||
|
||||
```typescript
|
||||
// From examples/speech-to-text-example.ts
|
||||
// Add example code and explanation
|
||||
```
|
||||
|
||||
## More Examples Coming Soon
|
||||
...
|
||||
@@ -1,212 +0,0 @@
|
||||
# Speech Features
|
||||
|
||||
The Home Assistant MCP Server includes powerful speech processing capabilities powered by fast-whisper and custom wake word detection. This guide explains how to set up and use these features effectively.
|
||||
|
||||
## Overview
|
||||
|
||||
The speech processing system consists of two main components:
|
||||
1. Wake Word Detection - Listens for specific trigger phrases
|
||||
2. Speech-to-Text - Transcribes spoken commands using fast-whisper
|
||||
|
||||
## Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. Docker environment:
|
||||
```bash
|
||||
docker --version # Should be 20.10.0 or higher
|
||||
```
|
||||
|
||||
2. For GPU acceleration:
|
||||
- NVIDIA GPU with CUDA support
|
||||
- NVIDIA Container Toolkit installed
|
||||
- NVIDIA drivers 450.80.02 or higher
|
||||
|
||||
### Installation
|
||||
|
||||
1. Enable speech features in your `.env`:
|
||||
```bash
|
||||
ENABLE_SPEECH_FEATURES=true
|
||||
ENABLE_WAKE_WORD=true
|
||||
ENABLE_SPEECH_TO_TEXT=true
|
||||
```
|
||||
|
||||
2. Configure model settings:
|
||||
```bash
|
||||
WHISPER_MODEL_PATH=/models
|
||||
WHISPER_MODEL_TYPE=base
|
||||
WHISPER_LANGUAGE=en
|
||||
WHISPER_TASK=transcribe
|
||||
WHISPER_DEVICE=cuda # or cpu
|
||||
```
|
||||
|
||||
3. Start the services:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Wake Word Detection
|
||||
|
||||
The wake word detector continuously listens for configured trigger phrases. Default wake words:
|
||||
- "hey jarvis"
|
||||
- "ok google"
|
||||
- "alexa"
|
||||
|
||||
Custom wake words can be configured:
|
||||
```bash
|
||||
WAKE_WORDS=computer,jarvis,assistant
|
||||
```
|
||||
|
||||
When a wake word is detected:
|
||||
1. The system starts recording audio
|
||||
2. Audio is processed through the speech-to-text pipeline
|
||||
3. The resulting command is processed by the server
|
||||
|
||||
### Speech-to-Text
|
||||
|
||||
#### Automatic Transcription
|
||||
|
||||
After wake word detection:
|
||||
1. Audio is automatically captured (default: 5 seconds)
|
||||
2. The audio is transcribed using the configured whisper model
|
||||
3. The transcribed text is processed as a command
|
||||
|
||||
#### Manual Transcription
|
||||
|
||||
You can also manually transcribe audio using the API:
|
||||
|
||||
```typescript
|
||||
// Using the TypeScript client
|
||||
import { SpeechService } from '@ha-mcp/client';
|
||||
|
||||
const speech = new SpeechService();
|
||||
|
||||
// Transcribe from audio buffer
|
||||
const buffer = await getAudioBuffer();
|
||||
const text = await speech.transcribe(buffer);
|
||||
|
||||
// Transcribe from file
|
||||
const text = await speech.transcribeFile('command.wav');
|
||||
```
|
||||
|
||||
```javascript
|
||||
// Using the REST API
|
||||
POST /api/speech/transcribe
|
||||
Content-Type: multipart/form-data
|
||||
|
||||
file: <audio file>
|
||||
```
|
||||
|
||||
### Event Handling
|
||||
|
||||
The system emits various events during speech processing:
|
||||
|
||||
```typescript
|
||||
speech.on('wakeWord', (word: string) => {
|
||||
console.log(`Wake word detected: ${word}`);
|
||||
});
|
||||
|
||||
speech.on('listening', () => {
|
||||
console.log('Listening for command...');
|
||||
});
|
||||
|
||||
speech.on('transcribing', () => {
|
||||
console.log('Processing speech...');
|
||||
});
|
||||
|
||||
speech.on('transcribed', (text: string) => {
|
||||
console.log(`Transcribed text: ${text}`);
|
||||
});
|
||||
|
||||
speech.on('error', (error: Error) => {
|
||||
console.error('Speech processing error:', error);
|
||||
});
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Model Selection
|
||||
|
||||
Choose an appropriate model based on your needs:
|
||||
|
||||
1. Resource-constrained environments:
|
||||
- Use `tiny.en` or `base.en`
|
||||
- Run on CPU if GPU unavailable
|
||||
- Limit concurrent processing
|
||||
|
||||
2. High-accuracy requirements:
|
||||
- Use `small.en` or `medium.en`
|
||||
- Enable GPU acceleration
|
||||
- Increase audio quality
|
||||
|
||||
3. Production environments:
|
||||
- Use `base.en` or `small.en`
|
||||
- Enable GPU acceleration
|
||||
- Configure appropriate timeouts
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
When using GPU acceleration:
|
||||
|
||||
1. Monitor GPU memory usage:
|
||||
```bash
|
||||
nvidia-smi -l 1
|
||||
```
|
||||
|
||||
2. Adjust model size if needed:
|
||||
```bash
|
||||
WHISPER_MODEL_TYPE=small # Decrease if GPU memory limited
|
||||
```
|
||||
|
||||
3. Configure processing device:
|
||||
```bash
|
||||
WHISPER_DEVICE=cuda # Use GPU
|
||||
WHISPER_DEVICE=cpu # Use CPU if GPU unavailable
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. Wake word detection not working:
|
||||
- Check microphone permissions
|
||||
- Adjust `WAKE_WORD_SENSITIVITY`
|
||||
- Verify wake words configuration
|
||||
|
||||
2. Poor transcription quality:
|
||||
- Check audio input quality
|
||||
- Try a larger model
|
||||
- Verify language settings
|
||||
|
||||
3. Performance issues:
|
||||
- Monitor resource usage
|
||||
- Consider smaller model
|
||||
- Check GPU acceleration status
|
||||
|
||||
### Logging
|
||||
|
||||
Enable debug logging for detailed information:
|
||||
```bash
|
||||
LOG_LEVEL=debug
|
||||
```
|
||||
|
||||
Speech-specific logs will be tagged with `[SPEECH]` prefix.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. Audio Privacy:
|
||||
- Audio is processed locally
|
||||
- No data sent to external services
|
||||
- Temporary files automatically cleaned
|
||||
|
||||
2. Access Control:
|
||||
- Speech endpoints require authentication
|
||||
- Rate limiting applies to transcription
|
||||
- Configurable command restrictions
|
||||
|
||||
3. Resource Protection:
|
||||
- Timeouts prevent hanging
|
||||
- Memory limits enforced
|
||||
- Graceful error handling
|
||||
@@ -1,5 +0,0 @@
|
||||
# Configuration
|
||||
|
||||
## Basic Configuration
|
||||
|
||||
## Advanced Settings
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Docker Deployment
|
||||
parent: Getting Started
|
||||
nav_order: 3
|
||||
---
|
||||
|
||||
# Docker Deployment Guide 🐳
|
||||
|
||||
Detailed guide for deploying MCP Server with Docker...
|
||||
@@ -1,8 +0,0 @@
|
||||
# Getting Started
|
||||
|
||||
Welcome to the Advanced Home Assistant MCP getting started guide. Follow these steps to begin:
|
||||
|
||||
1. [Installation](installation.md)
|
||||
2. [Configuration](configuration.md)
|
||||
3. [Docker Setup](docker.md)
|
||||
4. [Quick Start](quickstart.md)
|
||||
@@ -1,181 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Installation
|
||||
parent: Getting Started
|
||||
nav_order: 1
|
||||
---
|
||||
|
||||
# Installation Guide 🛠️
|
||||
|
||||
This guide covers different methods to install and set up the MCP Server for Home Assistant. Choose the installation method that best suits your needs.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before installing MCP Server, ensure you have:
|
||||
|
||||
- Home Assistant instance running and accessible
|
||||
- Node.js 18+ or Docker installed
|
||||
- Home Assistant Long-Lived Access Token ([How to get one](https://developers.home-assistant.io/docs/auth_api/#long-lived-access-token))
|
||||
|
||||
## Installation Methods
|
||||
|
||||
### 1. 🔧 Smithery Installation (Recommended)
|
||||
|
||||
The easiest way to install MCP Server is through Smithery:
|
||||
|
||||
#### Smithery Configuration
|
||||
|
||||
The project includes a `smithery.yaml` configuration:
|
||||
|
||||
```yaml
|
||||
# Add smithery.yaml contents and explanation
|
||||
```
|
||||
|
||||
#### Installation Steps
|
||||
|
||||
```bash
|
||||
npx -y @smithery/cli install @jango-blockchained/advanced-homeassistant-mcp --client claude
|
||||
```
|
||||
|
||||
### 2. 🐳 Docker Installation
|
||||
|
||||
For a containerized deployment:
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone --depth 1 https://github.com/jango-blockchained/advanced-homeassistant-mcp.git
|
||||
cd advanced-homeassistant-mcp
|
||||
|
||||
# Configure environment variables
|
||||
cp .env.example .env
|
||||
# Edit .env with your Home Assistant details:
|
||||
# - HA_URL: Your Home Assistant URL
|
||||
# - HA_TOKEN: Your Long-Lived Access Token
|
||||
# - Other configuration options
|
||||
|
||||
# Build and start containers
|
||||
docker compose up -d --build
|
||||
|
||||
# View logs (optional)
|
||||
docker compose logs -f --tail=50
|
||||
```
|
||||
|
||||
### 3. 💻 Manual Installation
|
||||
|
||||
For direct installation on your system:
|
||||
|
||||
```bash
|
||||
# Install Bun runtime
|
||||
curl -fsSL https://bun.sh/install | bash
|
||||
|
||||
# Clone and install
|
||||
git clone https://github.com/jango-blockchained/advanced-homeassistant-mcp.git
|
||||
cd advanced-homeassistant-mcp
|
||||
bun install --frozen-lockfile
|
||||
|
||||
# Configure environment
|
||||
cp .env.example .env
|
||||
# Edit .env with your configuration
|
||||
|
||||
# Start the server
|
||||
bun run dev --watch
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Key configuration options in your `.env` file:
|
||||
|
||||
```env
|
||||
# Home Assistant Configuration
|
||||
HA_URL=http://your-homeassistant:8123
|
||||
HA_TOKEN=your_long_lived_access_token
|
||||
|
||||
# Server Configuration
|
||||
PORT=3000
|
||||
HOST=0.0.0.0
|
||||
NODE_ENV=production
|
||||
|
||||
# Security Settings
|
||||
JWT_SECRET=your_secure_jwt_secret
|
||||
RATE_LIMIT=100
|
||||
```
|
||||
|
||||
### Client Integration
|
||||
|
||||
#### Cursor Integration
|
||||
|
||||
Add to `.cursor/config/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"homeassistant-mcp": {
|
||||
"command": "bun",
|
||||
"args": ["run", "start"],
|
||||
"cwd": "${workspaceRoot}",
|
||||
"env": {
|
||||
"NODE_ENV": "development"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Claude Desktop Integration
|
||||
|
||||
Add to your Claude configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"homeassistant-mcp": {
|
||||
"command": "bun",
|
||||
"args": ["run", "start", "--port", "8080"],
|
||||
"env": {
|
||||
"NODE_ENV": "production"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
To verify your installation:
|
||||
|
||||
1. Check server status:
|
||||
```bash
|
||||
curl http://localhost:3000/health
|
||||
```
|
||||
|
||||
2. Test Home Assistant connection:
|
||||
```bash
|
||||
curl http://localhost:3000/api/state
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check the [Troubleshooting Guide](../troubleshooting.md)
|
||||
2. Verify your environment variables
|
||||
3. Check server logs:
|
||||
```bash
|
||||
# For Docker installation
|
||||
docker compose logs -f
|
||||
|
||||
# For manual installation
|
||||
bun run dev
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Follow the [Quick Start Guide](quickstart.md) to begin using MCP Server
|
||||
- Read the [API Documentation](../api/index.md) for integration details
|
||||
- Check the [Architecture Overview](../architecture.md) to understand the system
|
||||
|
||||
## Support
|
||||
|
||||
Need help? Check our [Support Resources](../index.md#support) or [open an issue](https://github.com/jango-blockchained/advanced-homeassistant-mcp/issues).
|
||||
@@ -1,219 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Quick Start
|
||||
parent: Getting Started
|
||||
nav_order: 2
|
||||
---
|
||||
|
||||
# Quick Start Guide 🚀
|
||||
|
||||
This guide will help you get started with MCP Server after installation. We'll cover basic usage, common commands, and simple integrations.
|
||||
|
||||
## First Steps
|
||||
|
||||
### 1. Verify Connection
|
||||
|
||||
After installation, verify your MCP Server is running and connected to Home Assistant:
|
||||
|
||||
```bash
|
||||
# Check server health
|
||||
curl http://localhost:3000/health
|
||||
|
||||
# Verify Home Assistant connection
|
||||
curl http://localhost:3000/api/state
|
||||
```
|
||||
|
||||
### 2. Basic Voice Commands
|
||||
|
||||
Try these basic voice commands to test your setup:
|
||||
|
||||
```bash
|
||||
# Example using curl for testing
|
||||
curl -X POST http://localhost:3000/api/command \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"command": "Turn on the living room lights"}'
|
||||
```
|
||||
|
||||
Common voice commands:
|
||||
- "Turn on/off [device name]"
|
||||
- "Set [device] to [value]"
|
||||
- "What's the temperature in [room]?"
|
||||
- "Is [device] on or off?"
|
||||
|
||||
## Real-World Examples
|
||||
|
||||
### 1. Smart Lighting Control
|
||||
|
||||
```javascript
|
||||
// Browser example using fetch
|
||||
const response = await fetch('http://localhost:3000/api/command', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
command: 'Set living room lights to 50% brightness and warm white color'
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### 2. Real-Time Updates
|
||||
|
||||
Subscribe to device state changes using Server-Sent Events (SSE):
|
||||
|
||||
```javascript
|
||||
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_TOKEN&domain=light');
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('Device state changed:', data);
|
||||
// Update your UI here
|
||||
};
|
||||
```
|
||||
|
||||
### 3. Scene Automation
|
||||
|
||||
Create and trigger scenes for different activities:
|
||||
|
||||
```javascript
|
||||
// Create a "Movie Night" scene
|
||||
const createScene = async () => {
|
||||
await fetch('http://localhost:3000/api/scene', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
name: 'Movie Night',
|
||||
actions: [
|
||||
{ device: 'living_room_lights', action: 'dim', value: 20 },
|
||||
{ device: 'tv', action: 'on' },
|
||||
{ device: 'soundbar', action: 'on' }
|
||||
]
|
||||
})
|
||||
});
|
||||
};
|
||||
|
||||
// Trigger the scene with voice command:
|
||||
// "Hey MCP, activate movie night scene"
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### 1. Web Dashboard Integration
|
||||
|
||||
```javascript
|
||||
// React component example
|
||||
function SmartHomeControl() {
|
||||
const [devices, setDevices] = useState([]);
|
||||
|
||||
useEffect(() => {
|
||||
// Subscribe to device updates
|
||||
const events = new EventSource('http://localhost:3000/subscribe_events');
|
||||
events.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
setDevices(currentDevices =>
|
||||
currentDevices.map(device =>
|
||||
device.id === data.id ? {...device, ...data} : device
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
return () => events.close();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
{devices.map(device => (
|
||||
<DeviceCard key={device.id} device={device} />
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Voice Assistant Integration
|
||||
|
||||
```typescript
|
||||
// Example using speech-to-text with MCP
|
||||
async function handleVoiceCommand(audioBlob: Blob) {
|
||||
// First, convert speech to text
|
||||
const text = await speechToText(audioBlob);
|
||||
|
||||
// Then send command to MCP
|
||||
const response = await fetch('http://localhost:3000/api/command', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ command: text })
|
||||
});
|
||||
|
||||
return response.json();
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Error Handling**
|
||||
```javascript
|
||||
try {
|
||||
const response = await fetch('http://localhost:3000/api/command', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ command: 'Turn on lights' })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
// Handle error appropriately
|
||||
}
|
||||
```
|
||||
|
||||
2. **Connection Management**
|
||||
```javascript
|
||||
class MCPConnection {
|
||||
constructor() {
|
||||
this.eventSource = null;
|
||||
this.reconnectAttempts = 0;
|
||||
}
|
||||
|
||||
connect() {
|
||||
this.eventSource = new EventSource('http://localhost:3000/subscribe_events');
|
||||
this.eventSource.onerror = this.handleError.bind(this);
|
||||
}
|
||||
|
||||
handleError() {
|
||||
if (this.reconnectAttempts < 3) {
|
||||
setTimeout(() => {
|
||||
this.reconnectAttempts++;
|
||||
this.connect();
|
||||
}, 1000 * this.reconnectAttempts);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Explore the [API Documentation](../api/index.md) for advanced features
|
||||
- Learn about [SSE API](../api/sse.md) for real-time updates
|
||||
- Check out [Architecture](../architecture.md) for system design details
|
||||
- Read the [Contributing Guide](../contributing.md) to get involved
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues:
|
||||
- Verify your authentication token
|
||||
- Check server logs for errors
|
||||
- Ensure Home Assistant is accessible
|
||||
- Review the [Troubleshooting Guide](../troubleshooting.md)
|
||||
|
||||
Need more help? Visit our [Support Resources](../index.md#support).
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Home
|
||||
nav_order: 1
|
||||
---
|
||||
|
||||
# Advanced Home Assistant MCP
|
||||
|
||||
Welcome to the Advanced Home Assistant Master Control Program documentation.
|
||||
|
||||
This documentation provides comprehensive information about setting up, configuring, and using the Advanced Home Assistant MCP system.
|
||||
|
||||
## Quick Links
|
||||
|
||||
- [Getting Started](getting-started/index.md)
|
||||
- [API Reference](api/index.md)
|
||||
- [Configuration Guide](getting-started/configuration.md)
|
||||
- [Docker Setup](getting-started/docker.md)
|
||||
|
||||
## What is MCP Server?
|
||||
|
||||
MCP Server is a bridge between Home Assistant and custom automation tools, enabling basic device control and real-time monitoring of your smart home environment. It provides a flexible interface for managing and interacting with your home automation setup.
|
||||
|
||||
## Key Features
|
||||
|
||||
### 🎮 Device Control
|
||||
- Basic REST API for device management
|
||||
- WebSocket and Server-Sent Events (SSE) for real-time updates
|
||||
- Simple automation rule support
|
||||
|
||||
### 🛡️ Security & Performance
|
||||
- JWT authentication
|
||||
- Basic request validation
|
||||
- Lightweight server design
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
### Getting Started
|
||||
- [Installation Guide](getting-started/installation.md) - Set up MCP Server
|
||||
- [Quick Start Tutorial](getting-started/quickstart.md) - Basic usage examples
|
||||
|
||||
### Core Documentation
|
||||
- [API Documentation](api/index.md) - API reference
|
||||
- [Architecture Overview](architecture.md) - System design
|
||||
- [Contributing Guidelines](contributing.md) - How to contribute
|
||||
- [Troubleshooting Guide](troubleshooting.md) - Common issues
|
||||
|
||||
## Support
|
||||
|
||||
Need help or want to report issues?
|
||||
|
||||
- [GitHub Issues](https://github.com/jango-blockchained/homeassistant-mcp/issues)
|
||||
- [GitHub Discussions](https://github.com/jango-blockchained/homeassistant-mcp/discussions)
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License. See the [LICENSE](https://github.com/jango-blockchained/homeassistant-mcp/blob/main/LICENSE) file for details.
|
||||
@@ -1,62 +0,0 @@
|
||||
// Dark mode handling
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
// Check for saved dark mode preference
|
||||
const darkMode = localStorage.getItem('darkMode');
|
||||
if (darkMode === 'true') {
|
||||
document.body.classList.add('dark-mode');
|
||||
}
|
||||
});
|
||||
|
||||
// Smooth scrolling for anchor links
|
||||
document.querySelectorAll('a[href^="#"]').forEach(anchor => {
|
||||
anchor.addEventListener('click', function (e) {
|
||||
e.preventDefault();
|
||||
document.querySelector(this.getAttribute('href')).scrollIntoView({
|
||||
behavior: 'smooth'
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Add copy button to code blocks
|
||||
document.querySelectorAll('pre code').forEach((block) => {
|
||||
const button = document.createElement('button');
|
||||
button.className = 'copy-button';
|
||||
button.textContent = 'Copy';
|
||||
|
||||
button.addEventListener('click', async () => {
|
||||
await navigator.clipboard.writeText(block.textContent);
|
||||
button.textContent = 'Copied!';
|
||||
setTimeout(() => {
|
||||
button.textContent = 'Copy';
|
||||
}, 2000);
|
||||
});
|
||||
|
||||
const pre = block.parentNode;
|
||||
pre.insertBefore(button, block);
|
||||
});
|
||||
|
||||
// Add version selector handling
|
||||
const versionSelector = document.querySelector('.version-selector');
|
||||
if (versionSelector) {
|
||||
versionSelector.addEventListener('change', (e) => {
|
||||
const version = e.target.value;
|
||||
window.location.href = `/${version}/`;
|
||||
});
|
||||
}
|
||||
|
||||
// Add feedback handling
|
||||
document.querySelectorAll('.feedback-button').forEach(button => {
|
||||
button.addEventListener('click', function () {
|
||||
const feedback = this.getAttribute('data-feedback');
|
||||
// Send feedback to analytics
|
||||
if (typeof gtag !== 'undefined') {
|
||||
gtag('event', 'feedback', {
|
||||
'event_category': 'Documentation',
|
||||
'event_label': feedback
|
||||
});
|
||||
}
|
||||
// Show thank you message
|
||||
this.textContent = 'Thank you!';
|
||||
this.disabled = true;
|
||||
});
|
||||
});
|
||||
@@ -1,12 +0,0 @@
|
||||
window.MathJax = {
|
||||
tex: {
|
||||
inlineMath: [["\\(", "\\)"]],
|
||||
displayMath: [["\\[", "\\]"]],
|
||||
processEscapes: true,
|
||||
processEnvironments: true
|
||||
},
|
||||
options: {
|
||||
ignoreHtmlClass: ".*|",
|
||||
processHtmlClass: "arithmatex"
|
||||
}
|
||||
};
|
||||
@@ -1,42 +0,0 @@
|
||||
# Core
|
||||
mkdocs>=1.5.3
|
||||
mkdocs-material>=9.5.3
|
||||
|
||||
# Enhanced Functionality
|
||||
mkdocs-minify-plugin>=0.7.1
|
||||
mkdocs-git-revision-date-localized-plugin>=1.2.1
|
||||
mkdocs-glightbox>=0.3.4
|
||||
mkdocs-git-authors-plugin>=0.7.2
|
||||
mkdocs-git-committers-plugin>=0.2.3
|
||||
mkdocs-static-i18n>=1.2.0
|
||||
mkdocs-awesome-pages-plugin>=2.9.2
|
||||
mkdocs-redirects>=1.2.1
|
||||
mkdocs-include-markdown-plugin>=6.0.4
|
||||
mkdocs-macros-plugin>=1.0.4
|
||||
mkdocs-meta-descriptions-plugin>=3.0.0
|
||||
mkdocs-print-site-plugin>=2.3.6
|
||||
|
||||
# Code Documentation
|
||||
mkdocstrings>=0.24.0
|
||||
mkdocstrings-python>=1.7.5
|
||||
|
||||
# Markdown Extensions
|
||||
pymdown-extensions>=10.5
|
||||
markdown>=3.5.1
|
||||
mdx_truly_sane_lists>=1.3
|
||||
pygments>=2.17.2
|
||||
|
||||
# Math Support
|
||||
python-markdown-math>=0.8
|
||||
|
||||
# Diagrams
|
||||
plantuml-markdown>=3.9.2
|
||||
mkdocs-mermaid2-plugin>=1.1.1
|
||||
|
||||
# Search Enhancements
|
||||
mkdocs-material[imaging]>=9.5.3
|
||||
pillow>=10.2.0
|
||||
cairosvg>=2.7.1
|
||||
|
||||
# Development Tools
|
||||
mike>=2.0.0 # For version management
|
||||
@@ -1,52 +0,0 @@
|
||||
# Roadmap for MCP Server
|
||||
|
||||
The following roadmap outlines our planned enhancements and future directions for the Home Assistant MCP Server. This document is a living guide that will be updated as new features are developed.
|
||||
|
||||
## Near-Term Goals
|
||||
|
||||
- **Core Functionality Improvements:**
|
||||
- Enhance REST API capabilities
|
||||
- Improve WebSocket and SSE reliability
|
||||
- Develop more robust error handling
|
||||
|
||||
- **Security Enhancements:**
|
||||
- Strengthen JWT authentication
|
||||
- Improve input validation
|
||||
- Add basic logging for security events
|
||||
|
||||
- **Performance Optimizations:**
|
||||
- Optimize server response times
|
||||
- Improve resource utilization
|
||||
- Implement basic caching mechanisms
|
||||
|
||||
## Mid-Term Goals
|
||||
|
||||
- **Device Integration:**
|
||||
- Expand support for additional Home Assistant device types
|
||||
- Improve device state synchronization
|
||||
- Develop more flexible automation rule support
|
||||
|
||||
- **Developer Experience:**
|
||||
- Improve documentation
|
||||
- Create more comprehensive examples
|
||||
- Develop basic CLI tools for configuration
|
||||
|
||||
## Long-Term Vision
|
||||
|
||||
- **Extensibility:**
|
||||
- Design a simple plugin system
|
||||
- Create guidelines for community contributions
|
||||
- Establish a clear extension mechanism
|
||||
|
||||
- **Reliability:**
|
||||
- Implement comprehensive testing
|
||||
- Develop monitoring and basic health check features
|
||||
- Improve overall system stability
|
||||
|
||||
## How to Follow the Roadmap
|
||||
|
||||
- **Community Involvement:** We welcome feedback and contributions.
|
||||
- **Transparency:** Check our GitHub repository for ongoing discussions.
|
||||
- **Iterative Development:** Goals may change based on community needs and technical feasibility.
|
||||
|
||||
*This roadmap is intended as a guide and may evolve based on community needs, technological advancements, and strategic priorities.*
|
||||
146
docs/security.md
146
docs/security.md
@@ -1,146 +0,0 @@
|
||||
# Security Guide
|
||||
|
||||
This document outlines security best practices and configurations for the Home Assistant MCP Server.
|
||||
|
||||
## Authentication
|
||||
|
||||
### JWT Authentication
|
||||
The server uses JWT (JSON Web Tokens) for API authentication:
|
||||
|
||||
```http
|
||||
Authorization: Bearer YOUR_JWT_TOKEN
|
||||
```
|
||||
|
||||
### Token Configuration
|
||||
```yaml
|
||||
security:
|
||||
jwt_secret: YOUR_SECRET_KEY
|
||||
token_expiry: 24h
|
||||
refresh_token_expiry: 7d
|
||||
```
|
||||
|
||||
## Access Control
|
||||
|
||||
### CORS Configuration
|
||||
Configure allowed origins to prevent unauthorized access:
|
||||
|
||||
```yaml
|
||||
security:
|
||||
allowed_origins:
|
||||
- http://localhost:3000
|
||||
- https://your-domain.com
|
||||
```
|
||||
|
||||
### IP Filtering
|
||||
Restrict access by IP address:
|
||||
|
||||
```yaml
|
||||
security:
|
||||
allowed_ips:
|
||||
- 192.168.1.0/24
|
||||
- 10.0.0.0/8
|
||||
```
|
||||
|
||||
## SSL/TLS Configuration
|
||||
|
||||
### Enable HTTPS
|
||||
```yaml
|
||||
ssl:
|
||||
enabled: true
|
||||
cert_file: /path/to/cert.pem
|
||||
key_file: /path/to/key.pem
|
||||
```
|
||||
|
||||
### Certificate Management
|
||||
1. Use Let's Encrypt for free SSL certificates
|
||||
2. Regularly renew certificates
|
||||
3. Monitor certificate expiration
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
### Basic Rate Limiting
|
||||
```yaml
|
||||
rate_limit:
|
||||
enabled: true
|
||||
requests_per_minute: 100
|
||||
burst: 20
|
||||
```
|
||||
|
||||
### Advanced Rate Limiting
|
||||
```yaml
|
||||
rate_limit:
|
||||
rules:
|
||||
- endpoint: /api/control
|
||||
requests_per_minute: 50
|
||||
- endpoint: /api/state
|
||||
requests_per_minute: 200
|
||||
```
|
||||
|
||||
## Data Protection
|
||||
|
||||
### Sensitive Data
|
||||
- Use environment variables for secrets
|
||||
- Encrypt sensitive data at rest
|
||||
- Implement secure backup procedures
|
||||
|
||||
### Logging Security
|
||||
- Avoid logging sensitive information
|
||||
- Rotate logs regularly
|
||||
- Protect log file access
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Regular Security Updates
|
||||
- Keep dependencies updated
|
||||
- Monitor security advisories
|
||||
- Apply patches promptly
|
||||
|
||||
2. Password Policies
|
||||
- Enforce strong passwords
|
||||
- Implement password expiration
|
||||
- Use secure password storage
|
||||
|
||||
3. Monitoring
|
||||
- Log security events
|
||||
- Monitor access patterns
|
||||
- Set up alerts for suspicious activity
|
||||
|
||||
4. Network Security
|
||||
- Use VPN for remote access
|
||||
- Implement network segmentation
|
||||
- Configure firewalls properly
|
||||
|
||||
## Security Checklist
|
||||
|
||||
- [ ] Configure SSL/TLS
|
||||
- [ ] Set up JWT authentication
|
||||
- [ ] Configure CORS properly
|
||||
- [ ] Enable rate limiting
|
||||
- [ ] Implement IP filtering
|
||||
- [ ] Secure sensitive data
|
||||
- [ ] Set up monitoring
|
||||
- [ ] Configure backup encryption
|
||||
- [ ] Update security policies
|
||||
|
||||
## Incident Response
|
||||
|
||||
1. Detection
|
||||
- Monitor security logs
|
||||
- Set up intrusion detection
|
||||
- Configure alerts
|
||||
|
||||
2. Response
|
||||
- Document incident details
|
||||
- Isolate affected systems
|
||||
- Investigate root cause
|
||||
|
||||
3. Recovery
|
||||
- Apply security fixes
|
||||
- Restore from backups
|
||||
- Update security measures
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Security Best Practices](https://owasp.org/www-project-top-ten/)
|
||||
- [JWT Security](https://jwt.io/introduction)
|
||||
- [SSL Configuration](https://ssl-config.mozilla.org/)
|
||||
@@ -1,164 +0,0 @@
|
||||
/* Modern Dark Theme Enhancements */
|
||||
[data-md-color-scheme="slate"] {
|
||||
--md-default-bg-color: #1a1b26;
|
||||
--md-default-fg-color: #a9b1d6;
|
||||
--md-default-fg-color--light: #a9b1d6;
|
||||
--md-default-fg-color--lighter: #787c99;
|
||||
--md-default-fg-color--lightest: #4e5173;
|
||||
--md-primary-fg-color: #7aa2f7;
|
||||
--md-primary-fg-color--light: #7dcfff;
|
||||
--md-primary-fg-color--dark: #2ac3de;
|
||||
--md-accent-fg-color: #bb9af7;
|
||||
--md-accent-fg-color--transparent: #bb9af722;
|
||||
--md-accent-bg-color: #1a1b26;
|
||||
--md-accent-bg-color--light: #24283b;
|
||||
}
|
||||
|
||||
/* Code Blocks */
|
||||
.highlight pre {
|
||||
background-color: #24283b !important;
|
||||
border-radius: 6px;
|
||||
padding: 1em;
|
||||
margin: 1em 0;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
.highlight code {
|
||||
font-family: 'Roboto Mono', monospace;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
/* Copy Button */
|
||||
.copy-button {
|
||||
position: absolute;
|
||||
right: 0.5em;
|
||||
top: 0.5em;
|
||||
padding: 0.4em 0.8em;
|
||||
background-color: var(--md-accent-bg-color--light);
|
||||
border: 1px solid var(--md-accent-fg-color--transparent);
|
||||
border-radius: 4px;
|
||||
color: var(--md-default-fg-color);
|
||||
font-size: 0.8em;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.copy-button:hover {
|
||||
background-color: var(--md-accent-fg-color--transparent);
|
||||
border-color: var(--md-accent-fg-color);
|
||||
}
|
||||
|
||||
/* Navigation Enhancements */
|
||||
.md-nav {
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
.md-nav__link {
|
||||
padding: 0.4rem 0;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
.md-nav__link:hover {
|
||||
color: var(--md-primary-fg-color) !important;
|
||||
}
|
||||
|
||||
/* Tabs */
|
||||
.md-tabs__link {
|
||||
opacity: 0.8;
|
||||
transition: opacity 0.2s ease;
|
||||
}
|
||||
|
||||
.md-tabs__link:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.md-tabs__link--active {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* Admonitions */
|
||||
.md-typeset .admonition,
|
||||
.md-typeset details {
|
||||
border-width: 0;
|
||||
border-left-width: 4px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* Tables */
|
||||
.md-typeset table:not([class]) {
|
||||
border-radius: 4px;
|
||||
box-shadow: 0 2px 4px var(--md-accent-fg-color--transparent);
|
||||
}
|
||||
|
||||
.md-typeset table:not([class]) th {
|
||||
background-color: var(--md-accent-bg-color--light);
|
||||
border-bottom: 2px solid var(--md-accent-fg-color--transparent);
|
||||
}
|
||||
|
||||
/* Search */
|
||||
.md-search__form {
|
||||
background-color: var(--md-accent-bg-color--light);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* Feedback Buttons */
|
||||
.feedback-button {
|
||||
padding: 0.5em 1em;
|
||||
margin: 0 0.5em;
|
||||
border-radius: 4px;
|
||||
background-color: var(--md-accent-bg-color--light);
|
||||
border: 1px solid var(--md-accent-fg-color--transparent);
|
||||
color: var(--md-default-fg-color);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.feedback-button:hover {
|
||||
background-color: var(--md-accent-fg-color--transparent);
|
||||
border-color: var(--md-accent-fg-color);
|
||||
}
|
||||
|
||||
.feedback-button:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
/* Version Selector */
|
||||
.version-selector {
|
||||
padding: 0.5em;
|
||||
border-radius: 4px;
|
||||
background-color: var(--md-accent-bg-color--light);
|
||||
border: 1px solid var(--md-accent-fg-color--transparent);
|
||||
color: var(--md-default-fg-color);
|
||||
}
|
||||
|
||||
/* Scrollbar */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: var(--md-accent-bg-color--light);
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: var(--md-accent-fg-color--transparent);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--md-accent-fg-color);
|
||||
}
|
||||
|
||||
/* Print Styles */
|
||||
@media print {
|
||||
.md-typeset a {
|
||||
color: var(--md-default-fg-color) !important;
|
||||
}
|
||||
|
||||
.md-content__inner {
|
||||
margin: 0;
|
||||
padding: 1rem;
|
||||
}
|
||||
}
|
||||
422
docs/testing.md
422
docs/testing.md
@@ -1,422 +0,0 @@
|
||||
# Testing Documentation
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```bash
|
||||
# Most Common Commands
|
||||
bun test # Run all tests
|
||||
bun test --watch # Run tests in watch mode
|
||||
bun test --coverage # Run tests with coverage
|
||||
bun test path/to/test.ts # Run a specific test file
|
||||
|
||||
# Additional Options
|
||||
DEBUG=true bun test # Run with debug output
|
||||
bun test --pattern "auth" # Run tests matching a pattern
|
||||
bun test --timeout 60000 # Run with a custom timeout
|
||||
```
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the testing setup and practices used in the Home Assistant MCP project. We use Bun's test runner for both unit and integration testing, ensuring comprehensive coverage across modules.
|
||||
|
||||
## Test Structure
|
||||
|
||||
Tests are organized in two main locations:
|
||||
|
||||
1. **Root Level Integration Tests** (`/__tests__/`):
|
||||
|
||||
```
|
||||
__tests__/
|
||||
├── ai/ # AI/ML component tests
|
||||
├── api/ # API integration tests
|
||||
├── context/ # Context management tests
|
||||
├── hass/ # Home Assistant integration tests
|
||||
├── schemas/ # Schema validation tests
|
||||
├── security/ # Security integration tests
|
||||
├── tools/ # Tools and utilities tests
|
||||
├── websocket/ # WebSocket integration tests
|
||||
├── helpers.test.ts # Helper function tests
|
||||
├── index.test.ts # Main application tests
|
||||
└── server.test.ts # Server integration tests
|
||||
```
|
||||
|
||||
2. **Component Level Unit Tests** (`src/**/`):
|
||||
|
||||
```
|
||||
src/
|
||||
├── __tests__/ # Global test setup and utilities
|
||||
│ └── setup.ts # Global test configuration
|
||||
├── component/
|
||||
│ ├── __tests__/ # Component-specific unit tests
|
||||
│ └── component.ts
|
||||
```
|
||||
|
||||
## Test Configuration
|
||||
|
||||
### Bun Test Configuration (`bunfig.toml`)
|
||||
|
||||
```toml
|
||||
[test]
|
||||
preload = ["./src/__tests__/setup.ts"] # Global test setup
|
||||
coverage = true # Enable coverage by default
|
||||
timeout = 30000 # Test timeout in milliseconds
|
||||
testMatch = ["**/__tests__/**/*.test.ts"] # Test file patterns
|
||||
```
|
||||
|
||||
### Bun Scripts
|
||||
|
||||
Available test commands in `package.json`:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
bun test
|
||||
|
||||
# Watch mode for development
|
||||
bun test --watch
|
||||
|
||||
# Generate coverage report
|
||||
bun test --coverage
|
||||
|
||||
# Run linting
|
||||
bun run lint
|
||||
|
||||
# Format code
|
||||
bun run format
|
||||
```
|
||||
|
||||
## Test Setup
|
||||
|
||||
### Global Configuration
|
||||
|
||||
A global test setup file (`src/__tests__/setup.ts`) provides:
|
||||
- Environment configuration
|
||||
- Mock utilities
|
||||
- Test helper functions
|
||||
- Global lifecycle hooks
|
||||
|
||||
### Test Environment
|
||||
|
||||
- Environment variables are loaded from `.env.test`.
|
||||
- Console output is minimized unless `DEBUG=true`.
|
||||
- JWT secrets and tokens are preconfigured for testing.
|
||||
- Rate limiting and security features are initialized appropriately.
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Basic test run
|
||||
bun test
|
||||
|
||||
# Run tests with coverage
|
||||
bun test --coverage
|
||||
|
||||
# Run a specific test file
|
||||
bun test path/to/test.test.ts
|
||||
|
||||
# Run tests in watch mode
|
||||
bun test --watch
|
||||
|
||||
# Run tests with debug output
|
||||
DEBUG=true bun test
|
||||
|
||||
# Run tests with increased timeout
|
||||
bun test --timeout 60000
|
||||
|
||||
# Run tests matching a pattern
|
||||
bun test --pattern "auth"
|
||||
```
|
||||
|
||||
## Advanced Debugging
|
||||
|
||||
### Using Node Inspector
|
||||
|
||||
```bash
|
||||
# Start tests with inspector
|
||||
bun test --inspect
|
||||
|
||||
# Start tests with inspector and break on first line
|
||||
bun test --inspect-brk
|
||||
```
|
||||
|
||||
### Using VS Code
|
||||
|
||||
Create a launch configuration in `.vscode/launch.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "bun",
|
||||
"request": "launch",
|
||||
"name": "Debug Tests",
|
||||
"program": "${workspaceFolder}/node_modules/bun/bin/bun",
|
||||
"args": ["test", "${file}"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"env": { "DEBUG": "true" }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Test Isolation
|
||||
|
||||
To run a single test in isolation:
|
||||
|
||||
```typescript
|
||||
describe.only("specific test suite", () => {
|
||||
it.only("specific test case", () => {
|
||||
// Only this test will run
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Writing Tests
|
||||
|
||||
### Test File Naming
|
||||
|
||||
- Place test files in a `__tests__` directory adjacent to the code being tested.
|
||||
- Name files with the pattern `*.test.ts`.
|
||||
- Mirror the structure of the source code in your test organization.
|
||||
|
||||
### Example Test Structure
|
||||
|
||||
```typescript
|
||||
describe("Security Features", () => {
|
||||
it("should validate tokens correctly", () => {
|
||||
const payload = { userId: "123", role: "user" };
|
||||
const token = jwt.sign(payload, validSecret, { expiresIn: "1h" });
|
||||
const result = TokenManager.validateToken(token, testIp);
|
||||
expect(result.valid).toBe(true);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Coverage
|
||||
|
||||
The project maintains strict coverage:
|
||||
- Overall coverage: at least 80%
|
||||
- Critical paths: 90%+
|
||||
- New features: ≥85% coverage
|
||||
|
||||
Generate a coverage report with:
|
||||
|
||||
```bash
|
||||
bun test --coverage
|
||||
```
|
||||
|
||||
## Security Middleware Testing
|
||||
|
||||
### Utility Function Testing
|
||||
|
||||
The security middleware now uses a utility-first approach, which allows for more granular and comprehensive testing. Each security function is now independently testable, improving code reliability and maintainability.
|
||||
|
||||
#### Key Utility Functions
|
||||
|
||||
1. **Rate Limiting (`checkRateLimit`)**
|
||||
- Tests multiple scenarios:
|
||||
- Requests under threshold
|
||||
- Requests exceeding threshold
|
||||
- Rate limit reset after window expiration
|
||||
|
||||
```typescript
|
||||
// Example test
|
||||
it('should throw when requests exceed threshold', () => {
|
||||
const ip = '127.0.0.2';
|
||||
for (let i = 0; i < 11; i++) {
|
||||
if (i < 10) {
|
||||
expect(() => checkRateLimit(ip, 10)).not.toThrow();
|
||||
} else {
|
||||
expect(() => checkRateLimit(ip, 10)).toThrow('Too many requests from this IP');
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
2. **Request Validation (`validateRequestHeaders`)**
|
||||
- Tests content type validation
|
||||
- Checks request size limits
|
||||
- Validates authorization headers
|
||||
|
||||
```typescript
|
||||
it('should reject invalid content type', () => {
|
||||
const mockRequest = new Request('http://localhost', {
|
||||
method: 'POST',
|
||||
headers: { 'content-type': 'text/plain' }
|
||||
});
|
||||
expect(() => validateRequestHeaders(mockRequest)).toThrow('Content-Type must be application/json');
|
||||
});
|
||||
```
|
||||
|
||||
3. **Input Sanitization (`sanitizeValue`)**
|
||||
- Sanitizes HTML tags
|
||||
- Handles nested objects
|
||||
- Preserves non-string values
|
||||
|
||||
```typescript
|
||||
it('should sanitize HTML tags', () => {
|
||||
const input = '<script>alert("xss")</script>Hello';
|
||||
const sanitized = sanitizeValue(input);
|
||||
expect(sanitized).toBe('<script>alert("xss")</script>Hello');
|
||||
});
|
||||
```
|
||||
|
||||
4. **Security Headers (`applySecurityHeaders`)**
|
||||
- Verifies correct security header application
|
||||
- Checks CSP, frame options, and other security headers
|
||||
|
||||
```typescript
|
||||
it('should apply security headers', () => {
|
||||
const mockRequest = new Request('http://localhost');
|
||||
const headers = applySecurityHeaders(mockRequest);
|
||||
expect(headers['content-security-policy']).toBeDefined();
|
||||
expect(headers['x-frame-options']).toBeDefined();
|
||||
});
|
||||
```
|
||||
|
||||
5. **Error Handling (`handleError`)**
|
||||
- Tests error responses in production and development modes
|
||||
- Verifies error message and stack trace inclusion
|
||||
|
||||
```typescript
|
||||
it('should include error details in development mode', () => {
|
||||
const error = new Error('Test error');
|
||||
const result = handleError(error, 'development');
|
||||
expect(result).toEqual({
|
||||
error: true,
|
||||
message: 'Internal server error',
|
||||
error: 'Test error',
|
||||
stack: expect.any(String)
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Testing Philosophy
|
||||
|
||||
- **Isolation**: Each utility function is tested independently
|
||||
- **Comprehensive Coverage**: Multiple scenarios for each function
|
||||
- **Predictable Behavior**: Clear expectations for input and output
|
||||
- **Error Handling**: Robust testing of error conditions
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. Use minimal, focused test cases
|
||||
2. Test both successful and failure scenarios
|
||||
3. Verify input sanitization and security measures
|
||||
4. Mock external dependencies when necessary
|
||||
|
||||
### Running Security Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
bun test
|
||||
|
||||
# Run specific security tests
|
||||
bun test __tests__/security/
|
||||
```
|
||||
|
||||
### Continuous Improvement
|
||||
|
||||
- Regularly update test cases
|
||||
- Add new test scenarios as security requirements evolve
|
||||
- Perform periodic security audits
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Isolation**: Each test should be independent and not rely on the state of other tests.
|
||||
2. **Mocking**: Use the provided mock utilities for external dependencies.
|
||||
3. **Cleanup**: Clean up any resources or state modifications in `afterEach` or `afterAll` hooks.
|
||||
4. **Descriptive Names**: Use clear, descriptive test names that explain the expected behavior.
|
||||
5. **Assertions**: Make specific, meaningful assertions rather than general ones.
|
||||
6. **Setup**: Use `beforeEach` for common test setup to avoid repetition.
|
||||
7. **Error Cases**: Test both success and error cases for complete coverage.
|
||||
|
||||
## Coverage
|
||||
|
||||
The project aims for high test coverage, particularly focusing on:
|
||||
- Security-critical code paths
|
||||
- API endpoints
|
||||
- Data validation
|
||||
- Error handling
|
||||
- Event broadcasting
|
||||
|
||||
Run coverage reports using:
|
||||
```bash
|
||||
bun test --coverage
|
||||
```
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
To debug tests:
|
||||
1. Set `DEBUG=true` to enable console output during tests
|
||||
2. Use the `--watch` flag for development
|
||||
3. Add `console.log()` statements (they're only shown when DEBUG is true)
|
||||
4. Use the test utilities' debugging helpers
|
||||
|
||||
### Advanced Debugging
|
||||
|
||||
1. **Using Node Inspector**:
|
||||
```bash
|
||||
# Start tests with inspector
|
||||
bun test --inspect
|
||||
|
||||
# Start tests with inspector and break on first line
|
||||
bun test --inspect-brk
|
||||
```
|
||||
|
||||
2. **Using VS Code**:
|
||||
```jsonc
|
||||
// .vscode/launch.json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "bun",
|
||||
"request": "launch",
|
||||
"name": "Debug Tests",
|
||||
"program": "${workspaceFolder}/node_modules/bun/bin/bun",
|
||||
"args": ["test", "${file}"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"env": { "DEBUG": "true" }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. **Test Isolation**:
|
||||
To run a single test in isolation:
|
||||
```typescript
|
||||
describe.only("specific test suite", () => {
|
||||
it.only("specific test case", () => {
|
||||
// Only this test will run
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing new code:
|
||||
1. Add tests for new features
|
||||
2. Ensure existing tests pass
|
||||
3. Maintain or improve coverage
|
||||
4. Follow the existing test patterns and naming conventions
|
||||
5. Document any new test utilities or patterns
|
||||
|
||||
## Coverage Requirements
|
||||
|
||||
The project maintains strict coverage requirements:
|
||||
|
||||
- Minimum overall coverage: 80%
|
||||
- Critical paths (security, API, data validation): 90%
|
||||
- New features must include tests with >= 85% coverage
|
||||
|
||||
Coverage reports are generated in multiple formats:
|
||||
- Console summary
|
||||
- HTML report (./coverage/index.html)
|
||||
- LCOV report (./coverage/lcov.info)
|
||||
|
||||
To view detailed coverage:
|
||||
```bash
|
||||
# Generate and open coverage report
|
||||
bun test --coverage && open coverage/index.html
|
||||
```
|
||||
@@ -1,240 +0,0 @@
|
||||
# Add-on Management Tool
|
||||
|
||||
The Add-on Management tool provides functionality to manage Home Assistant add-ons through the MCP interface.
|
||||
|
||||
## Features
|
||||
|
||||
- List available add-ons
|
||||
- Install/uninstall add-ons
|
||||
- Start/stop/restart add-ons
|
||||
- Get add-on information
|
||||
- Update add-ons
|
||||
- Configure add-ons
|
||||
- View add-on logs
|
||||
- Monitor add-on status
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/addons
|
||||
GET /api/addons/{addon_slug}
|
||||
POST /api/addons/{addon_slug}/install
|
||||
POST /api/addons/{addon_slug}/uninstall
|
||||
POST /api/addons/{addon_slug}/start
|
||||
POST /api/addons/{addon_slug}/stop
|
||||
POST /api/addons/{addon_slug}/restart
|
||||
GET /api/addons/{addon_slug}/logs
|
||||
PUT /api/addons/{addon_slug}/config
|
||||
GET /api/addons/{addon_slug}/stats
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// List add-ons
|
||||
{
|
||||
"type": "get_addons"
|
||||
}
|
||||
|
||||
// Get add-on info
|
||||
{
|
||||
"type": "get_addon_info",
|
||||
"addon_slug": "required_addon_slug"
|
||||
}
|
||||
|
||||
// Install add-on
|
||||
{
|
||||
"type": "install_addon",
|
||||
"addon_slug": "required_addon_slug",
|
||||
"version": "optional_version"
|
||||
}
|
||||
|
||||
// Control add-on
|
||||
{
|
||||
"type": "control_addon",
|
||||
"addon_slug": "required_addon_slug",
|
||||
"action": "start|stop|restart"
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### List All Add-ons
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/addons', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const addons = await response.json();
|
||||
```
|
||||
|
||||
### Install Add-on
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/addons/mosquitto/install', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"version": "latest"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Configure Add-on
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/addons/mosquitto/config', {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"logins": [
|
||||
{
|
||||
"username": "mqtt_user",
|
||||
"password": "mqtt_password"
|
||||
}
|
||||
],
|
||||
"customize": {
|
||||
"active": true,
|
||||
"folder": "mosquitto"
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Add-on List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"addons": [
|
||||
{
|
||||
"slug": "addon_slug",
|
||||
"name": "Add-on Name",
|
||||
"version": "1.0.0",
|
||||
"state": "started",
|
||||
"repository": "core",
|
||||
"installed": true,
|
||||
"update_available": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Add-on Info Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"addon": {
|
||||
"slug": "addon_slug",
|
||||
"name": "Add-on Name",
|
||||
"version": "1.0.0",
|
||||
"description": "Add-on description",
|
||||
"long_description": "Detailed description",
|
||||
"repository": "core",
|
||||
"installed": true,
|
||||
"state": "started",
|
||||
"webui": "http://[HOST]:[PORT:80]",
|
||||
"boot": "auto",
|
||||
"options": {
|
||||
// Add-on specific options
|
||||
},
|
||||
"schema": {
|
||||
// Add-on options schema
|
||||
},
|
||||
"ports": {
|
||||
"80/tcp": 8080
|
||||
},
|
||||
"ingress": true,
|
||||
"ingress_port": 8099
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Add-on Stats Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"stats": {
|
||||
"cpu_percent": 2.5,
|
||||
"memory_usage": 128974848,
|
||||
"memory_limit": 536870912,
|
||||
"network_rx": 1234,
|
||||
"network_tx": 5678,
|
||||
"blk_read": 12345,
|
||||
"blk_write": 67890
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Add-on not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request
|
||||
- `409`: Add-on operation failed
|
||||
- `422`: Invalid configuration
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 50 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `ADDON_RATE_LIMIT`
|
||||
- `ADDON_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Always check add-on compatibility
|
||||
2. Back up configurations before updates
|
||||
3. Monitor resource usage
|
||||
4. Use appropriate update strategies
|
||||
5. Implement proper error handling
|
||||
6. Test configurations in safe environment
|
||||
7. Handle rate limiting gracefully
|
||||
8. Keep add-ons updated
|
||||
|
||||
## Add-on Security
|
||||
|
||||
- Use secure passwords
|
||||
- Regularly update add-ons
|
||||
- Monitor add-on logs
|
||||
- Restrict network access
|
||||
- Use SSL/TLS when available
|
||||
- Follow principle of least privilege
|
||||
|
||||
## See Also
|
||||
|
||||
- [Package Management](package.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
@@ -1,236 +0,0 @@
|
||||
# Package Management Tool
|
||||
|
||||
The Package Management tool provides functionality to manage Home Assistant Community Store (HACS) packages through the MCP interface.
|
||||
|
||||
## Features
|
||||
|
||||
- List available packages
|
||||
- Install/update/remove packages
|
||||
- Search packages
|
||||
- Get package information
|
||||
- Manage package repositories
|
||||
- Track package updates
|
||||
- View package documentation
|
||||
- Monitor package status
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/packages
|
||||
GET /api/packages/{package_id}
|
||||
POST /api/packages/{package_id}/install
|
||||
POST /api/packages/{package_id}/uninstall
|
||||
POST /api/packages/{package_id}/update
|
||||
GET /api/packages/search
|
||||
GET /api/packages/categories
|
||||
GET /api/packages/repositories
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// List packages
|
||||
{
|
||||
"type": "get_packages",
|
||||
"category": "optional_category"
|
||||
}
|
||||
|
||||
// Search packages
|
||||
{
|
||||
"type": "search_packages",
|
||||
"query": "search_query",
|
||||
"category": "optional_category"
|
||||
}
|
||||
|
||||
// Install package
|
||||
{
|
||||
"type": "install_package",
|
||||
"package_id": "required_package_id",
|
||||
"version": "optional_version"
|
||||
}
|
||||
```
|
||||
|
||||
## Package Categories
|
||||
|
||||
- Integrations
|
||||
- Frontend
|
||||
- Themes
|
||||
- AppDaemon Apps
|
||||
- NetDaemon Apps
|
||||
- Python Scripts
|
||||
- Plugins
|
||||
|
||||
## Examples
|
||||
|
||||
### List All Packages
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/packages', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const packages = await response.json();
|
||||
```
|
||||
|
||||
### Search Packages
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/packages/search?q=weather&category=integrations', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const searchResults = await response.json();
|
||||
```
|
||||
|
||||
### Install Package
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/packages/custom-weather-card/install', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"version": "latest"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Package List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"packages": [
|
||||
{
|
||||
"id": "package_id",
|
||||
"name": "Package Name",
|
||||
"category": "integrations",
|
||||
"description": "Package description",
|
||||
"version": "1.0.0",
|
||||
"installed": true,
|
||||
"update_available": false,
|
||||
"stars": 150,
|
||||
"downloads": 10000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Package Info Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"package": {
|
||||
"id": "package_id",
|
||||
"name": "Package Name",
|
||||
"category": "integrations",
|
||||
"description": "Package description",
|
||||
"long_description": "Detailed description",
|
||||
"version": "1.0.0",
|
||||
"installed_version": "0.9.0",
|
||||
"available_version": "1.0.0",
|
||||
"installed": true,
|
||||
"update_available": true,
|
||||
"stars": 150,
|
||||
"downloads": 10000,
|
||||
"repository": "https://github.com/author/repo",
|
||||
"author": {
|
||||
"name": "Author Name",
|
||||
"url": "https://github.com/author"
|
||||
},
|
||||
"documentation": "https://github.com/author/repo/wiki",
|
||||
"dependencies": [
|
||||
"dependency1",
|
||||
"dependency2"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Search Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"results": [
|
||||
{
|
||||
"id": "package_id",
|
||||
"name": "Package Name",
|
||||
"category": "integrations",
|
||||
"description": "Package description",
|
||||
"version": "1.0.0",
|
||||
"score": 0.95
|
||||
}
|
||||
],
|
||||
"total": 42
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Package not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request
|
||||
- `409`: Package operation failed
|
||||
- `422`: Invalid configuration
|
||||
- `424`: Dependency error
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 50 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `PACKAGE_RATE_LIMIT`
|
||||
- `PACKAGE_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Check package compatibility
|
||||
2. Review package documentation
|
||||
3. Verify package dependencies
|
||||
4. Back up before updates
|
||||
5. Test in safe environment
|
||||
6. Monitor resource usage
|
||||
7. Keep packages updated
|
||||
8. Handle rate limiting gracefully
|
||||
|
||||
## Package Security
|
||||
|
||||
- Verify package sources
|
||||
- Review package permissions
|
||||
- Check package reputation
|
||||
- Monitor package activity
|
||||
- Keep dependencies updated
|
||||
- Follow security advisories
|
||||
|
||||
## See Also
|
||||
|
||||
- [Add-on Management](addon.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
@@ -1,321 +0,0 @@
|
||||
# Automation Configuration Tool
|
||||
|
||||
The Automation Configuration tool provides functionality to create, update, and manage Home Assistant automation configurations.
|
||||
|
||||
## Features
|
||||
|
||||
- Create new automations
|
||||
- Update existing automations
|
||||
- Delete automations
|
||||
- Duplicate automations
|
||||
- Import/Export automation configurations
|
||||
- Validate automation configurations
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
POST /api/automations
|
||||
PUT /api/automations/{automation_id}
|
||||
DELETE /api/automations/{automation_id}
|
||||
POST /api/automations/{automation_id}/duplicate
|
||||
POST /api/automations/validate
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// Create automation
|
||||
{
|
||||
"type": "create_automation",
|
||||
"automation": {
|
||||
// Automation configuration
|
||||
}
|
||||
}
|
||||
|
||||
// Update automation
|
||||
{
|
||||
"type": "update_automation",
|
||||
"automation_id": "required_automation_id",
|
||||
"automation": {
|
||||
// Updated configuration
|
||||
}
|
||||
}
|
||||
|
||||
// Delete automation
|
||||
{
|
||||
"type": "delete_automation",
|
||||
"automation_id": "required_automation_id"
|
||||
}
|
||||
```
|
||||
|
||||
## Automation Configuration
|
||||
|
||||
### Basic Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "morning_routine",
|
||||
"alias": "Morning Routine",
|
||||
"description": "Turn on lights and adjust temperature in the morning",
|
||||
"trigger": [
|
||||
{
|
||||
"platform": "time",
|
||||
"at": "07:00:00"
|
||||
}
|
||||
],
|
||||
"condition": [
|
||||
{
|
||||
"condition": "time",
|
||||
"weekday": ["mon", "tue", "wed", "thu", "fri"]
|
||||
}
|
||||
],
|
||||
"action": [
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.bedroom"
|
||||
},
|
||||
"data": {
|
||||
"brightness": 255,
|
||||
"transition": 300
|
||||
}
|
||||
}
|
||||
],
|
||||
"mode": "single"
|
||||
}
|
||||
```
|
||||
|
||||
### Trigger Types
|
||||
|
||||
```json
|
||||
// Time-based trigger
|
||||
{
|
||||
"platform": "time",
|
||||
"at": "07:00:00"
|
||||
}
|
||||
|
||||
// State-based trigger
|
||||
{
|
||||
"platform": "state",
|
||||
"entity_id": "binary_sensor.motion",
|
||||
"to": "on"
|
||||
}
|
||||
|
||||
// Event-based trigger
|
||||
{
|
||||
"platform": "event",
|
||||
"event_type": "custom_event"
|
||||
}
|
||||
|
||||
// Numeric state trigger
|
||||
{
|
||||
"platform": "numeric_state",
|
||||
"entity_id": "sensor.temperature",
|
||||
"above": 25
|
||||
}
|
||||
```
|
||||
|
||||
### Condition Types
|
||||
|
||||
```json
|
||||
// Time condition
|
||||
{
|
||||
"condition": "time",
|
||||
"after": "07:00:00",
|
||||
"before": "22:00:00"
|
||||
}
|
||||
|
||||
// State condition
|
||||
{
|
||||
"condition": "state",
|
||||
"entity_id": "device_tracker.phone",
|
||||
"state": "home"
|
||||
}
|
||||
|
||||
// Numeric state condition
|
||||
{
|
||||
"condition": "numeric_state",
|
||||
"entity_id": "sensor.temperature",
|
||||
"below": 25
|
||||
}
|
||||
```
|
||||
|
||||
### Action Types
|
||||
|
||||
```json
|
||||
// Service call action
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.bedroom"
|
||||
}
|
||||
}
|
||||
|
||||
// Delay action
|
||||
{
|
||||
"delay": "00:00:30"
|
||||
}
|
||||
|
||||
// Scene activation
|
||||
{
|
||||
"scene": "scene.evening_mode"
|
||||
}
|
||||
|
||||
// Conditional action
|
||||
{
|
||||
"choose": [
|
||||
{
|
||||
"conditions": [
|
||||
{
|
||||
"condition": "state",
|
||||
"entity_id": "sun.sun",
|
||||
"state": "below_horizon"
|
||||
}
|
||||
],
|
||||
"sequence": [
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.living_room"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Create New Automation
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/automations', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"alias": "Morning Routine",
|
||||
"description": "Turn on lights in the morning",
|
||||
"trigger": [
|
||||
{
|
||||
"platform": "time",
|
||||
"at": "07:00:00"
|
||||
}
|
||||
],
|
||||
"action": [
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.bedroom"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Update Existing Automation
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine', {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"alias": "Morning Routine",
|
||||
"trigger": [
|
||||
{
|
||||
"platform": "time",
|
||||
"at": "07:30:00" // Updated time
|
||||
}
|
||||
],
|
||||
"action": [
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.bedroom"
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Success Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"automation": {
|
||||
"id": "created_automation_id",
|
||||
// Full automation configuration
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Validation Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"valid": true,
|
||||
"warnings": [
|
||||
"No conditions specified"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Automation not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid configuration
|
||||
- `409`: Automation creation/update failed
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE",
|
||||
"validation_errors": [
|
||||
{
|
||||
"path": "trigger[0].platform",
|
||||
"message": "Invalid trigger platform"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Always validate configurations before saving
|
||||
2. Use descriptive aliases and descriptions
|
||||
3. Group related automations
|
||||
4. Test automations in a safe environment
|
||||
5. Document automation dependencies
|
||||
6. Use variables for reusable values
|
||||
7. Implement proper error handling
|
||||
8. Consider automation modes carefully
|
||||
|
||||
## See Also
|
||||
|
||||
- [Automation Management](automation.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
- [Scene Management](../history-state/scene.md)
|
||||
@@ -1,211 +0,0 @@
|
||||
# Automation Management Tool
|
||||
|
||||
The Automation Management tool provides functionality to manage and control Home Assistant automations.
|
||||
|
||||
## Features
|
||||
|
||||
- List all automations
|
||||
- Get automation details
|
||||
- Toggle automation state (enable/disable)
|
||||
- Trigger automations manually
|
||||
- Monitor automation execution
|
||||
- View automation history
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/automations
|
||||
GET /api/automations/{automation_id}
|
||||
POST /api/automations/{automation_id}/toggle
|
||||
POST /api/automations/{automation_id}/trigger
|
||||
GET /api/automations/{automation_id}/history
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// List automations
|
||||
{
|
||||
"type": "get_automations"
|
||||
}
|
||||
|
||||
// Toggle automation
|
||||
{
|
||||
"type": "toggle_automation",
|
||||
"automation_id": "required_automation_id"
|
||||
}
|
||||
|
||||
// Trigger automation
|
||||
{
|
||||
"type": "trigger_automation",
|
||||
"automation_id": "required_automation_id",
|
||||
"variables": {
|
||||
// Optional variables
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### List All Automations
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/automations', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const automations = await response.json();
|
||||
```
|
||||
|
||||
### Toggle Automation State
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine/toggle', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Trigger Automation Manually
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine/trigger', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"variables": {
|
||||
"brightness": 100,
|
||||
"temperature": 22
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Automation List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"automations": [
|
||||
{
|
||||
"id": "automation_id",
|
||||
"name": "Automation Name",
|
||||
"enabled": true,
|
||||
"last_triggered": "2024-02-05T12:00:00Z",
|
||||
"trigger_count": 42
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Automation Details Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"automation": {
|
||||
"id": "automation_id",
|
||||
"name": "Automation Name",
|
||||
"enabled": true,
|
||||
"triggers": [
|
||||
{
|
||||
"platform": "time",
|
||||
"at": "07:00:00"
|
||||
}
|
||||
],
|
||||
"conditions": [],
|
||||
"actions": [
|
||||
{
|
||||
"service": "light.turn_on",
|
||||
"target": {
|
||||
"entity_id": "light.bedroom"
|
||||
}
|
||||
}
|
||||
],
|
||||
"mode": "single",
|
||||
"max": 10,
|
||||
"last_triggered": "2024-02-05T12:00:00Z",
|
||||
"trigger_count": 42
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Automation History Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"history": [
|
||||
{
|
||||
"timestamp": "2024-02-05T12:00:00Z",
|
||||
"trigger": {
|
||||
"platform": "time",
|
||||
"at": "07:00:00"
|
||||
},
|
||||
"context": {
|
||||
"user_id": "user_123",
|
||||
"variables": {}
|
||||
},
|
||||
"result": "success"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Automation not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request
|
||||
- `409`: Automation execution failed
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 50 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `AUTOMATION_RATE_LIMIT`
|
||||
- `AUTOMATION_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Monitor automation execution history
|
||||
2. Use descriptive automation names
|
||||
3. Implement proper error handling
|
||||
4. Cache automation configurations when possible
|
||||
5. Handle rate limiting gracefully
|
||||
6. Test automations before enabling
|
||||
7. Use variables for flexible automation behavior
|
||||
|
||||
## See Also
|
||||
|
||||
- [Automation Configuration](automation-config.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
@@ -1,195 +0,0 @@
|
||||
# Device Control Tool
|
||||
|
||||
The Device Control tool provides functionality to control various types of devices in your Home Assistant instance.
|
||||
|
||||
## Supported Device Types
|
||||
|
||||
- Lights
|
||||
- Switches
|
||||
- Covers
|
||||
- Climate devices
|
||||
- Media players
|
||||
- And more...
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
POST /api/devices/{device_id}/control
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
{
|
||||
"type": "control_device",
|
||||
"device_id": "required_device_id",
|
||||
"domain": "required_domain",
|
||||
"service": "required_service",
|
||||
"data": {
|
||||
// Service-specific data
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Commands
|
||||
|
||||
### Lights
|
||||
|
||||
```typescript
|
||||
// Turn on/off
|
||||
POST /api/devices/light/{device_id}/control
|
||||
{
|
||||
"service": "turn_on", // or "turn_off"
|
||||
}
|
||||
|
||||
// Set brightness
|
||||
{
|
||||
"service": "turn_on",
|
||||
"data": {
|
||||
"brightness": 255 // 0-255
|
||||
}
|
||||
}
|
||||
|
||||
// Set color
|
||||
{
|
||||
"service": "turn_on",
|
||||
"data": {
|
||||
"rgb_color": [255, 0, 0] // Red
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Covers
|
||||
|
||||
```typescript
|
||||
// Open/close
|
||||
POST /api/devices/cover/{device_id}/control
|
||||
{
|
||||
"service": "open_cover", // or "close_cover"
|
||||
}
|
||||
|
||||
// Set position
|
||||
{
|
||||
"service": "set_cover_position",
|
||||
"data": {
|
||||
"position": 50 // 0-100
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Climate
|
||||
|
||||
```typescript
|
||||
// Set temperature
|
||||
POST /api/devices/climate/{device_id}/control
|
||||
{
|
||||
"service": "set_temperature",
|
||||
"data": {
|
||||
"temperature": 22.5
|
||||
}
|
||||
}
|
||||
|
||||
// Set mode
|
||||
{
|
||||
"service": "set_hvac_mode",
|
||||
"data": {
|
||||
"hvac_mode": "heat" // heat, cool, auto, off
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Control Light Brightness
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/devices/light/living_room/control', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"service": "turn_on",
|
||||
"data": {
|
||||
"brightness": 128
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Control Cover Position
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/devices/cover/bedroom/control', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"service": "set_cover_position",
|
||||
"data": {
|
||||
"position": 75
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Success Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
// Updated device attributes
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Device not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid service or parameters
|
||||
- `409`: Device unavailable or offline
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 100 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `DEVICE_CONTROL_RATE_LIMIT`
|
||||
- `DEVICE_CONTROL_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Validate device availability before sending commands
|
||||
2. Implement proper error handling
|
||||
3. Use appropriate retry strategies for failed commands
|
||||
4. Cache device capabilities when possible
|
||||
5. Handle rate limiting gracefully
|
||||
|
||||
## See Also
|
||||
|
||||
- [List Devices](list-devices.md)
|
||||
- [Device History](../history-state/history.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
@@ -1,139 +0,0 @@
|
||||
# List Devices Tool
|
||||
|
||||
The List Devices tool provides functionality to retrieve and manage device information from your Home Assistant instance.
|
||||
|
||||
## Features
|
||||
|
||||
- List all available Home Assistant devices
|
||||
- Group devices by domain
|
||||
- Get device states and attributes
|
||||
- Filter devices by various criteria
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/devices
|
||||
GET /api/devices/{domain}
|
||||
GET /api/devices/{device_id}/state
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// List all devices
|
||||
{
|
||||
"type": "list_devices",
|
||||
"domain": "optional_domain"
|
||||
}
|
||||
|
||||
// Get device state
|
||||
{
|
||||
"type": "get_device_state",
|
||||
"device_id": "required_device_id"
|
||||
}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
#### List All Devices
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/devices', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const devices = await response.json();
|
||||
```
|
||||
|
||||
#### Get Devices by Domain
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/devices/light', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const lightDevices = await response.json();
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Device List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"devices": [
|
||||
{
|
||||
"id": "device_id",
|
||||
"name": "Device Name",
|
||||
"domain": "light",
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255,
|
||||
"color_temp": 370
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Device State Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255,
|
||||
"color_temp": 370
|
||||
},
|
||||
"last_changed": "2024-02-05T12:00:00Z",
|
||||
"last_updated": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Device not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request parameters
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 100 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `DEVICE_LIST_RATE_LIMIT`
|
||||
- `DEVICE_LIST_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Cache device lists when possible
|
||||
2. Use domain filtering for better performance
|
||||
3. Implement proper error handling
|
||||
4. Handle rate limiting gracefully
|
||||
|
||||
## See Also
|
||||
|
||||
- [Device Control](control.md)
|
||||
- [Device History](../history-state/history.md)
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
@@ -1,251 +0,0 @@
|
||||
# SSE Statistics Tool
|
||||
|
||||
The SSE Statistics tool provides functionality to monitor and analyze Server-Sent Events (SSE) connections and performance in your Home Assistant MCP instance.
|
||||
|
||||
## Features
|
||||
|
||||
- Monitor active SSE connections
|
||||
- Track connection statistics
|
||||
- Analyze event delivery
|
||||
- Monitor resource usage
|
||||
- Connection management
|
||||
- Performance metrics
|
||||
- Historical data
|
||||
- Alert configuration
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/sse/stats
|
||||
GET /api/sse/connections
|
||||
GET /api/sse/connections/{connection_id}
|
||||
GET /api/sse/metrics
|
||||
GET /api/sse/history
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// Get SSE stats
|
||||
{
|
||||
"type": "get_sse_stats"
|
||||
}
|
||||
|
||||
// Get connection details
|
||||
{
|
||||
"type": "get_sse_connection",
|
||||
"connection_id": "required_connection_id"
|
||||
}
|
||||
|
||||
// Get performance metrics
|
||||
{
|
||||
"type": "get_sse_metrics",
|
||||
"period": "1h|24h|7d|30d"
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Get Current Statistics
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/sse/stats', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const stats = await response.json();
|
||||
```
|
||||
|
||||
### Get Connection Details
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/sse/connections/conn_123', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const connection = await response.json();
|
||||
```
|
||||
|
||||
### Get Performance Metrics
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/sse/metrics?period=24h', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const metrics = await response.json();
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Statistics Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"active_connections": 42,
|
||||
"total_events_sent": 12345,
|
||||
"events_per_second": 5.2,
|
||||
"memory_usage": 128974848,
|
||||
"cpu_usage": 2.5,
|
||||
"uptime": "PT24H",
|
||||
"event_backlog": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Connection Details Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"connection": {
|
||||
"id": "conn_123",
|
||||
"client_id": "client_456",
|
||||
"user_id": "user_789",
|
||||
"connected_at": "2024-02-05T12:00:00Z",
|
||||
"last_event_at": "2024-02-05T12:05:00Z",
|
||||
"events_sent": 150,
|
||||
"subscriptions": [
|
||||
{
|
||||
"event_type": "state_changed",
|
||||
"entity_id": "light.living_room"
|
||||
}
|
||||
],
|
||||
"state": "active",
|
||||
"ip_address": "192.168.1.100",
|
||||
"user_agent": "Mozilla/5.0 ..."
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Performance Metrics Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"metrics": {
|
||||
"connections": {
|
||||
"current": 42,
|
||||
"max": 100,
|
||||
"average": 35.5
|
||||
},
|
||||
"events": {
|
||||
"total": 12345,
|
||||
"rate": {
|
||||
"current": 5.2,
|
||||
"max": 15.0,
|
||||
"average": 4.8
|
||||
}
|
||||
},
|
||||
"latency": {
|
||||
"p50": 15,
|
||||
"p95": 45,
|
||||
"p99": 100
|
||||
},
|
||||
"resources": {
|
||||
"memory": {
|
||||
"current": 128974848,
|
||||
"max": 536870912
|
||||
},
|
||||
"cpu": {
|
||||
"current": 2.5,
|
||||
"max": 10.0,
|
||||
"average": 3.2
|
||||
}
|
||||
}
|
||||
},
|
||||
"period": "24h",
|
||||
"timestamp": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Connection not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request parameters
|
||||
- `503`: Service overloaded
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Monitoring Metrics
|
||||
|
||||
### Connection Metrics
|
||||
- Active connections
|
||||
- Connection duration
|
||||
- Connection state
|
||||
- Client information
|
||||
- Geographic distribution
|
||||
- Protocol version
|
||||
|
||||
### Event Metrics
|
||||
- Events per second
|
||||
- Event types distribution
|
||||
- Delivery success rate
|
||||
- Event latency
|
||||
- Queue size
|
||||
- Backlog size
|
||||
|
||||
### Resource Metrics
|
||||
- Memory usage
|
||||
- CPU usage
|
||||
- Network bandwidth
|
||||
- Disk I/O
|
||||
- Connection pool status
|
||||
- Thread pool status
|
||||
|
||||
## Alert Thresholds
|
||||
|
||||
- Connection limits
|
||||
- Event rate limits
|
||||
- Resource usage limits
|
||||
- Latency thresholds
|
||||
- Error rate thresholds
|
||||
- Backlog thresholds
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Monitor connection health
|
||||
2. Track resource usage
|
||||
3. Set up alerts
|
||||
4. Analyze usage patterns
|
||||
5. Optimize performance
|
||||
6. Plan capacity
|
||||
7. Implement failover
|
||||
8. Regular maintenance
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
- Connection pooling
|
||||
- Event batching
|
||||
- Resource throttling
|
||||
- Load balancing
|
||||
- Cache optimization
|
||||
- Connection cleanup
|
||||
|
||||
## See Also
|
||||
|
||||
- [Event Subscription](subscribe-events.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Automation Management](../automation/automation.md)
|
||||
@@ -1,253 +0,0 @@
|
||||
# Event Subscription Tool
|
||||
|
||||
The Event Subscription tool provides functionality to subscribe to and monitor real-time events from your Home Assistant instance.
|
||||
|
||||
## Features
|
||||
|
||||
- Subscribe to Home Assistant events
|
||||
- Monitor specific entities
|
||||
- Domain-based monitoring
|
||||
- Event filtering
|
||||
- Real-time updates
|
||||
- Event history
|
||||
- Custom event handling
|
||||
- Connection management
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
POST /api/events/subscribe
|
||||
DELETE /api/events/unsubscribe
|
||||
GET /api/events/subscriptions
|
||||
GET /api/events/history
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// Subscribe to events
|
||||
{
|
||||
"type": "subscribe_events",
|
||||
"event_type": "optional_event_type",
|
||||
"entity_id": "optional_entity_id",
|
||||
"domain": "optional_domain"
|
||||
}
|
||||
|
||||
// Unsubscribe from events
|
||||
{
|
||||
"type": "unsubscribe_events",
|
||||
"subscription_id": "required_subscription_id"
|
||||
}
|
||||
```
|
||||
|
||||
### Server-Sent Events (SSE)
|
||||
|
||||
```typescript
|
||||
GET /api/events/stream?event_type=state_changed&entity_id=light.living_room
|
||||
```
|
||||
|
||||
## Event Types
|
||||
|
||||
- `state_changed`: Entity state changes
|
||||
- `automation_triggered`: Automation executions
|
||||
- `scene_activated`: Scene activations
|
||||
- `device_registered`: New device registrations
|
||||
- `service_registered`: New service registrations
|
||||
- `homeassistant_start`: System startup
|
||||
- `homeassistant_stop`: System shutdown
|
||||
- Custom events
|
||||
|
||||
## Examples
|
||||
|
||||
### Subscribe to All State Changes
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"event_type": "state_changed"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Monitor Specific Entity
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"event_type": "state_changed",
|
||||
"entity_id": "light.living_room"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Domain-Based Monitoring
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"event_type": "state_changed",
|
||||
"domain": "light"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### SSE Connection Example
|
||||
|
||||
```typescript
|
||||
const eventSource = new EventSource(
|
||||
'http://your-ha-mcp/api/events/stream?event_type=state_changed&entity_id=light.living_room',
|
||||
{
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('Event received:', data);
|
||||
};
|
||||
|
||||
eventSource.onerror = (error) => {
|
||||
console.error('SSE error:', error);
|
||||
eventSource.close();
|
||||
};
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Subscription Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"subscription_id": "sub_123",
|
||||
"event_type": "state_changed",
|
||||
"entity_id": "light.living_room",
|
||||
"created_at": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Event Message Format
|
||||
|
||||
```json
|
||||
{
|
||||
"event_type": "state_changed",
|
||||
"entity_id": "light.living_room",
|
||||
"data": {
|
||||
"old_state": {
|
||||
"state": "off",
|
||||
"attributes": {},
|
||||
"last_changed": "2024-02-05T11:55:00Z"
|
||||
},
|
||||
"new_state": {
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255
|
||||
},
|
||||
"last_changed": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
},
|
||||
"origin": "LOCAL",
|
||||
"time_fired": "2024-02-05T12:00:00Z",
|
||||
"context": {
|
||||
"id": "context_123",
|
||||
"parent_id": null,
|
||||
"user_id": "user_123"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Subscriptions List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"subscriptions": [
|
||||
{
|
||||
"id": "sub_123",
|
||||
"event_type": "state_changed",
|
||||
"entity_id": "light.living_room",
|
||||
"created_at": "2024-02-05T12:00:00Z",
|
||||
"last_event": "2024-02-05T12:05:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Event type not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid subscription parameters
|
||||
- `409`: Subscription already exists
|
||||
- `429`: Too many subscriptions
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limits:
|
||||
- Maximum subscriptions: 100 per client
|
||||
- Maximum event rate: 1000 events per minute
|
||||
- Configurable through environment variables:
|
||||
- `EVENT_SUB_MAX_SUBSCRIPTIONS`
|
||||
- `EVENT_SUB_RATE_LIMIT`
|
||||
- `EVENT_SUB_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Use specific event types when possible
|
||||
2. Implement proper error handling
|
||||
3. Handle connection interruptions
|
||||
4. Process events asynchronously
|
||||
5. Implement backoff strategies
|
||||
6. Monitor subscription health
|
||||
7. Clean up unused subscriptions
|
||||
8. Handle rate limiting gracefully
|
||||
|
||||
## Connection Management
|
||||
|
||||
- Implement heartbeat monitoring
|
||||
- Use reconnection strategies
|
||||
- Handle connection timeouts
|
||||
- Monitor connection quality
|
||||
- Implement fallback mechanisms
|
||||
- Clean up resources properly
|
||||
|
||||
## See Also
|
||||
|
||||
- [SSE Statistics](sse-stats.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Automation Management](../automation/automation.md)
|
||||
@@ -1,167 +0,0 @@
|
||||
# Device History Tool
|
||||
|
||||
The Device History tool allows you to retrieve historical state information for devices in your Home Assistant instance.
|
||||
|
||||
## Features
|
||||
|
||||
- Fetch device state history
|
||||
- Filter by time range
|
||||
- Get significant changes
|
||||
- Aggregate data by time periods
|
||||
- Export historical data
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/history/{device_id}
|
||||
GET /api/history/{device_id}/period/{start_time}
|
||||
GET /api/history/{device_id}/period/{start_time}/{end_time}
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
{
|
||||
"type": "get_history",
|
||||
"device_id": "required_device_id",
|
||||
"start_time": "optional_iso_timestamp",
|
||||
"end_time": "optional_iso_timestamp",
|
||||
"significant_changes_only": false
|
||||
}
|
||||
```
|
||||
|
||||
## Query Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `start_time` | ISO timestamp | Start of the period to fetch history for |
|
||||
| `end_time` | ISO timestamp | End of the period to fetch history for |
|
||||
| `significant_changes_only` | boolean | Only return significant state changes |
|
||||
| `minimal_response` | boolean | Return minimal state information |
|
||||
| `no_attributes` | boolean | Exclude attribute data from response |
|
||||
|
||||
## Examples
|
||||
|
||||
### Get Recent History
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/history/light.living_room', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const history = await response.json();
|
||||
```
|
||||
|
||||
### Get History for Specific Period
|
||||
|
||||
```typescript
|
||||
const startTime = '2024-02-01T00:00:00Z';
|
||||
const endTime = '2024-02-02T00:00:00Z';
|
||||
const response = await fetch(
|
||||
`http://your-ha-mcp/api/history/light.living_room/period/${startTime}/${endTime}`,
|
||||
{
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
}
|
||||
);
|
||||
const history = await response.json();
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### History Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"history": [
|
||||
{
|
||||
"state": "on",
|
||||
"attributes": {
|
||||
"brightness": 255
|
||||
},
|
||||
"last_changed": "2024-02-05T12:00:00Z",
|
||||
"last_updated": "2024-02-05T12:00:00Z"
|
||||
},
|
||||
{
|
||||
"state": "off",
|
||||
"last_changed": "2024-02-05T13:00:00Z",
|
||||
"last_updated": "2024-02-05T13:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Aggregated History Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"aggregates": {
|
||||
"daily": [
|
||||
{
|
||||
"date": "2024-02-05",
|
||||
"on_time": "PT5H30M",
|
||||
"off_time": "PT18H30M",
|
||||
"changes": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Device not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid parameters
|
||||
- `416`: Time range too large
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 50 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `HISTORY_RATE_LIMIT`
|
||||
- `HISTORY_RATE_WINDOW`
|
||||
|
||||
## Data Retention
|
||||
|
||||
- Default retention period: 30 days
|
||||
- Configurable through environment variables:
|
||||
- `HISTORY_RETENTION_DAYS`
|
||||
- Older data may be automatically aggregated
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Use appropriate time ranges to avoid large responses
|
||||
2. Enable `significant_changes_only` for better performance
|
||||
3. Use `minimal_response` when full state data isn't needed
|
||||
4. Implement proper error handling
|
||||
5. Cache frequently accessed historical data
|
||||
6. Handle rate limiting gracefully
|
||||
|
||||
## See Also
|
||||
|
||||
- [List Devices](../device-management/list-devices.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Scene Management](scene.md)
|
||||
@@ -1,215 +0,0 @@
|
||||
# Scene Management Tool
|
||||
|
||||
The Scene Management tool provides functionality to manage and control scenes in your Home Assistant instance.
|
||||
|
||||
## Features
|
||||
|
||||
- List available scenes
|
||||
- Activate scenes
|
||||
- Create new scenes
|
||||
- Update existing scenes
|
||||
- Delete scenes
|
||||
- Get scene state information
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
GET /api/scenes
|
||||
GET /api/scenes/{scene_id}
|
||||
POST /api/scenes/{scene_id}/activate
|
||||
POST /api/scenes
|
||||
PUT /api/scenes/{scene_id}
|
||||
DELETE /api/scenes/{scene_id}
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// List scenes
|
||||
{
|
||||
"type": "get_scenes"
|
||||
}
|
||||
|
||||
// Activate scene
|
||||
{
|
||||
"type": "activate_scene",
|
||||
"scene_id": "required_scene_id"
|
||||
}
|
||||
|
||||
// Create/Update scene
|
||||
{
|
||||
"type": "create_scene",
|
||||
"scene": {
|
||||
"name": "required_scene_name",
|
||||
"entities": {
|
||||
// Entity states
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Scene Configuration
|
||||
|
||||
### Scene Definition
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Movie Night",
|
||||
"entities": {
|
||||
"light.living_room": {
|
||||
"state": "on",
|
||||
"brightness": 50,
|
||||
"color_temp": 2700
|
||||
},
|
||||
"cover.living_room": {
|
||||
"state": "closed"
|
||||
},
|
||||
"media_player.tv": {
|
||||
"state": "on",
|
||||
"source": "HDMI 1"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### List All Scenes
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/scenes', {
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
const scenes = await response.json();
|
||||
```
|
||||
|
||||
### Activate a Scene
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/scenes/movie_night/activate', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token'
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Create a New Scene
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/scenes', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"name": "Movie Night",
|
||||
"entities": {
|
||||
"light.living_room": {
|
||||
"state": "on",
|
||||
"brightness": 50
|
||||
},
|
||||
"cover.living_room": {
|
||||
"state": "closed"
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Scene List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"scenes": [
|
||||
{
|
||||
"id": "scene_id",
|
||||
"name": "Scene Name",
|
||||
"entities": {
|
||||
// Entity configurations
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Scene Activation Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"scene_id": "activated_scene_id",
|
||||
"status": "activated",
|
||||
"timestamp": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Scene not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid scene configuration
|
||||
- `409`: Scene activation failed
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 50 requests per 15 minutes
|
||||
- Configurable through environment variables:
|
||||
- `SCENE_RATE_LIMIT`
|
||||
- `SCENE_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Validate entity availability before creating scenes
|
||||
2. Use meaningful scene names
|
||||
3. Group related entities in scenes
|
||||
4. Implement proper error handling
|
||||
5. Cache scene configurations when possible
|
||||
6. Handle rate limiting gracefully
|
||||
|
||||
## Scene Transitions
|
||||
|
||||
Scenes can include transition settings for smooth state changes:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Sunset Mode",
|
||||
"entities": {
|
||||
"light.living_room": {
|
||||
"state": "on",
|
||||
"brightness": 128,
|
||||
"transition": 5 // 5 seconds
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Device History](history.md)
|
||||
- [Automation Management](../automation/automation.md)
|
||||
@@ -1,42 +0,0 @@
|
||||
# Tools Overview
|
||||
|
||||
The Home Assistant MCP Server provides a variety of tools to help you manage and interact with your home automation system.
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Device Management
|
||||
- [List Devices](device-management/list-devices.md) - View and manage connected devices
|
||||
- [Device Control](device-management/control.md) - Control device states and settings
|
||||
|
||||
### History & State
|
||||
- [History](history-state/history.md) - View and analyze historical data
|
||||
- [Scene Management](history-state/scene.md) - Create and manage scenes
|
||||
|
||||
### Automation
|
||||
- [Automation Management](automation/automation.md) - Create and manage automations
|
||||
- [Automation Configuration](automation/automation-config.md) - Configure automation settings
|
||||
|
||||
### Add-ons & Packages
|
||||
- [Add-on Management](addons-packages/addon.md) - Manage server add-ons
|
||||
- [Package Management](addons-packages/package.md) - Handle package installations
|
||||
|
||||
### Notifications
|
||||
- [Notify](notifications/notify.md) - Send and manage notifications
|
||||
|
||||
### Events
|
||||
- [Event Subscription](events/subscribe-events.md) - Subscribe to system events
|
||||
- [SSE Statistics](events/sse-stats.md) - Monitor Server-Sent Events statistics
|
||||
|
||||
## Getting Started
|
||||
|
||||
To get started with these tools:
|
||||
|
||||
1. Ensure you have the MCP Server properly installed and configured
|
||||
2. Check the specific tool documentation for detailed usage instructions
|
||||
3. Use the API endpoints or command-line interface as needed
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Review the [API Documentation](../api/index.md) for programmatic access
|
||||
- Check [Configuration](../config/index.md) for tool-specific settings
|
||||
- See [Examples](../examples/index.md) for practical use cases
|
||||
@@ -1,249 +0,0 @@
|
||||
# Notification Tool
|
||||
|
||||
The Notification tool provides functionality to send notifications through various services in your Home Assistant instance.
|
||||
|
||||
## Features
|
||||
|
||||
- Send notifications
|
||||
- Support for multiple notification services
|
||||
- Custom notification data
|
||||
- Rich media support
|
||||
- Notification templates
|
||||
- Delivery tracking
|
||||
- Priority levels
|
||||
- Notification groups
|
||||
|
||||
## Usage
|
||||
|
||||
### REST API
|
||||
|
||||
```typescript
|
||||
POST /api/notify
|
||||
POST /api/notify/{service_id}
|
||||
GET /api/notify/services
|
||||
GET /api/notify/history
|
||||
```
|
||||
|
||||
### WebSocket
|
||||
|
||||
```typescript
|
||||
// Send notification
|
||||
{
|
||||
"type": "send_notification",
|
||||
"service": "required_service_id",
|
||||
"message": "required_message",
|
||||
"title": "optional_title",
|
||||
"data": {
|
||||
// Service-specific data
|
||||
}
|
||||
}
|
||||
|
||||
// Get notification services
|
||||
{
|
||||
"type": "get_notification_services"
|
||||
}
|
||||
```
|
||||
|
||||
## Supported Services
|
||||
|
||||
- Mobile App
|
||||
- Email
|
||||
- SMS
|
||||
- Telegram
|
||||
- Discord
|
||||
- Slack
|
||||
- Push Notifications
|
||||
- Custom Services
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Notification
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/notify/mobile_app', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"message": "Motion detected in living room",
|
||||
"title": "Security Alert"
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Rich Notification
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/notify/mobile_app', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"message": "Motion detected in living room",
|
||||
"title": "Security Alert",
|
||||
"data": {
|
||||
"image": "https://your-camera-snapshot.jpg",
|
||||
"actions": [
|
||||
{
|
||||
"action": "view_camera",
|
||||
"title": "View Camera"
|
||||
},
|
||||
{
|
||||
"action": "dismiss",
|
||||
"title": "Dismiss"
|
||||
}
|
||||
],
|
||||
"priority": "high",
|
||||
"ttl": 3600,
|
||||
"group": "security"
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Service-Specific Example (Telegram)
|
||||
|
||||
```typescript
|
||||
const response = await fetch('http://your-ha-mcp/api/notify/telegram', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': 'Bearer your_access_token',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"message": "Temperature is too high!",
|
||||
"title": "Climate Alert",
|
||||
"data": {
|
||||
"parse_mode": "markdown",
|
||||
"inline_keyboard": [
|
||||
[
|
||||
{
|
||||
"text": "Turn On AC",
|
||||
"callback_data": "turn_on_ac"
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
## Response Format
|
||||
|
||||
### Success Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"notification_id": "notification_123",
|
||||
"status": "sent",
|
||||
"timestamp": "2024-02-05T12:00:00Z",
|
||||
"service": "mobile_app"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Services List Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"services": [
|
||||
{
|
||||
"id": "mobile_app",
|
||||
"name": "Mobile App",
|
||||
"enabled": true,
|
||||
"features": [
|
||||
"actions",
|
||||
"images",
|
||||
"sound"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Notification History Response
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"history": [
|
||||
{
|
||||
"id": "notification_123",
|
||||
"service": "mobile_app",
|
||||
"message": "Motion detected",
|
||||
"title": "Security Alert",
|
||||
"timestamp": "2024-02-05T12:00:00Z",
|
||||
"status": "delivered"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Codes
|
||||
|
||||
- `404`: Service not found
|
||||
- `401`: Unauthorized
|
||||
- `400`: Invalid request
|
||||
- `408`: Delivery timeout
|
||||
- `422`: Invalid notification data
|
||||
|
||||
### Error Response Format
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"message": "Error description",
|
||||
"error_code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Default limit: 100 notifications per hour
|
||||
- Configurable through environment variables:
|
||||
- `NOTIFY_RATE_LIMIT`
|
||||
- `NOTIFY_RATE_WINDOW`
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Use appropriate priority levels
|
||||
2. Group related notifications
|
||||
3. Include relevant context
|
||||
4. Implement proper error handling
|
||||
5. Use templates for consistency
|
||||
6. Consider time zones
|
||||
7. Respect user preferences
|
||||
8. Handle rate limiting gracefully
|
||||
|
||||
## Notification Templates
|
||||
|
||||
```typescript
|
||||
// Template example
|
||||
{
|
||||
"template": "security_alert",
|
||||
"data": {
|
||||
"location": "living_room",
|
||||
"event_type": "motion",
|
||||
"timestamp": "2024-02-05T12:00:00Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [Event Subscription](../events/subscribe-events.md)
|
||||
- [Device Control](../device-management/control.md)
|
||||
- [Automation Management](../automation/automation.md)
|
||||
@@ -1,374 +0,0 @@
|
||||
---
|
||||
layout: default
|
||||
title: Troubleshooting
|
||||
nav_order: 6
|
||||
---
|
||||
|
||||
# Troubleshooting Guide 🔧
|
||||
|
||||
This guide helps you diagnose and resolve common issues with MCP Server.
|
||||
|
||||
## Quick Diagnostics
|
||||
|
||||
### Health Check
|
||||
|
||||
First, verify the server's health:
|
||||
|
||||
```bash
|
||||
curl http://localhost:3000/health
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "healthy",
|
||||
"version": "1.0.0",
|
||||
"uptime": 3600,
|
||||
"homeAssistant": {
|
||||
"connected": true,
|
||||
"version": "2024.1.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Common Issues
|
||||
|
||||
### 1. Connection Issues
|
||||
|
||||
#### Cannot Connect to MCP Server
|
||||
|
||||
**Symptoms:**
|
||||
- Server not responding
|
||||
- Connection refused errors
|
||||
- Timeout errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Check if the server is running:
|
||||
```bash
|
||||
# For Docker installation
|
||||
docker compose ps
|
||||
|
||||
# For manual installation
|
||||
ps aux | grep mcp
|
||||
```
|
||||
|
||||
2. Verify port availability:
|
||||
```bash
|
||||
# Check if port is in use
|
||||
netstat -tuln | grep 3000
|
||||
```
|
||||
|
||||
3. Check logs:
|
||||
```bash
|
||||
# Docker logs
|
||||
docker compose logs mcp
|
||||
|
||||
# Manual installation logs
|
||||
bun run dev
|
||||
```
|
||||
|
||||
#### Home Assistant Connection Failed
|
||||
|
||||
**Symptoms:**
|
||||
- "Connection Error" in health check
|
||||
- Cannot control devices
|
||||
- State updates not working
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify Home Assistant URL and token in `.env`:
|
||||
```env
|
||||
HA_URL=http://homeassistant:8123
|
||||
HA_TOKEN=your_long_lived_access_token
|
||||
```
|
||||
|
||||
2. Test Home Assistant connection:
|
||||
```bash
|
||||
curl -H "Authorization: Bearer YOUR_HA_TOKEN" \
|
||||
http://your-homeassistant:8123/api/
|
||||
```
|
||||
|
||||
3. Check network connectivity:
|
||||
```bash
|
||||
# For Docker setup
|
||||
docker compose exec mcp ping homeassistant
|
||||
```
|
||||
|
||||
### 2. Authentication Issues
|
||||
|
||||
#### Invalid Token
|
||||
|
||||
**Symptoms:**
|
||||
- 401 Unauthorized responses
|
||||
- "Invalid token" errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Generate a new token:
|
||||
```bash
|
||||
curl -X POST http://localhost:3000/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "your_username", "password": "your_password"}'
|
||||
```
|
||||
|
||||
2. Verify token format:
|
||||
```javascript
|
||||
// Token should be in format:
|
||||
Authorization: Bearer eyJhbGciOiJIUzI1NiIs...
|
||||
```
|
||||
|
||||
#### Rate Limiting
|
||||
|
||||
**Symptoms:**
|
||||
- 429 Too Many Requests
|
||||
- "Rate limit exceeded" errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Check current rate limit status:
|
||||
```bash
|
||||
curl -I http://localhost:3000/api/state
|
||||
```
|
||||
|
||||
2. Adjust rate limits in configuration:
|
||||
```yaml
|
||||
security:
|
||||
rateLimit: 100 # Increase if needed
|
||||
rateLimitWindow: 60000 # Window in milliseconds
|
||||
```
|
||||
|
||||
### 3. Real-time Updates Issues
|
||||
|
||||
#### SSE Connection Drops
|
||||
|
||||
**Symptoms:**
|
||||
- Frequent disconnections
|
||||
- Missing state updates
|
||||
- EventSource errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Implement proper reconnection logic:
|
||||
```javascript
|
||||
class SSEClient {
|
||||
constructor() {
|
||||
this.connect();
|
||||
}
|
||||
|
||||
connect() {
|
||||
this.eventSource = new EventSource('/subscribe_events');
|
||||
this.eventSource.onerror = this.handleError.bind(this);
|
||||
}
|
||||
|
||||
handleError(error) {
|
||||
console.error('SSE Error:', error);
|
||||
this.eventSource.close();
|
||||
setTimeout(() => this.connect(), 1000);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. Check network stability:
|
||||
```bash
|
||||
# Monitor connection stability
|
||||
ping -c 100 localhost
|
||||
```
|
||||
|
||||
### 4. Performance Issues
|
||||
|
||||
#### High Latency
|
||||
|
||||
**Symptoms:**
|
||||
- Slow response times
|
||||
- Command execution delays
|
||||
- UI lag
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Enable Redis caching:
|
||||
```env
|
||||
REDIS_ENABLED=true
|
||||
REDIS_URL=redis://localhost:6379
|
||||
```
|
||||
|
||||
2. Monitor system resources:
|
||||
```bash
|
||||
# Check CPU and memory usage
|
||||
docker stats
|
||||
|
||||
# Or for manual installation
|
||||
top -p $(pgrep -f mcp)
|
||||
```
|
||||
|
||||
3. Optimize database queries and caching:
|
||||
```typescript
|
||||
// Use batch operations
|
||||
const results = await Promise.all([
|
||||
cache.get('key1'),
|
||||
cache.get('key2')
|
||||
]);
|
||||
```
|
||||
|
||||
### 5. Device Control Issues
|
||||
|
||||
#### Commands Not Executing
|
||||
|
||||
**Symptoms:**
|
||||
- Commands appear successful but no device response
|
||||
- Inconsistent device states
|
||||
- Error messages from Home Assistant
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify device availability:
|
||||
```bash
|
||||
curl http://localhost:3000/api/state/light.living_room
|
||||
```
|
||||
|
||||
2. Check command syntax:
|
||||
```bash
|
||||
# Test basic command
|
||||
curl -X POST http://localhost:3000/api/command \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"command": "Turn on living room lights"}'
|
||||
```
|
||||
|
||||
3. Review Home Assistant logs:
|
||||
```bash
|
||||
docker compose exec homeassistant journalctl -f
|
||||
```
|
||||
|
||||
## Debugging Tools
|
||||
|
||||
### Log Analysis
|
||||
|
||||
Enable debug logging:
|
||||
|
||||
```env
|
||||
LOG_LEVEL=debug
|
||||
DEBUG=mcp:*
|
||||
```
|
||||
|
||||
### Network Debugging
|
||||
|
||||
Monitor network traffic:
|
||||
|
||||
```bash
|
||||
# TCP dump for API traffic
|
||||
tcpdump -i any port 3000 -w debug.pcap
|
||||
```
|
||||
|
||||
### Performance Profiling
|
||||
|
||||
Enable performance monitoring:
|
||||
|
||||
```env
|
||||
ENABLE_METRICS=true
|
||||
METRICS_PORT=9090
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you're still experiencing issues:
|
||||
|
||||
1. Check the [GitHub Issues](https://github.com/jango-blockchained/advanced-homeassistant-mcp/issues)
|
||||
2. Search [Discussions](https://github.com/jango-blockchained/advanced-homeassistant-mcp/discussions)
|
||||
3. Create a new issue with:
|
||||
- Detailed description
|
||||
- Logs
|
||||
- Configuration (sanitized)
|
||||
- Steps to reproduce
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Regular Health Checks
|
||||
|
||||
Run periodic health checks:
|
||||
|
||||
```bash
|
||||
# Create a cron job
|
||||
*/5 * * * * curl -f http://localhost:3000/health || notify-admin
|
||||
```
|
||||
|
||||
### Log Rotation
|
||||
|
||||
Configure log rotation:
|
||||
|
||||
```yaml
|
||||
logging:
|
||||
maxSize: "100m"
|
||||
maxFiles: "7d"
|
||||
compress: true
|
||||
```
|
||||
|
||||
### Backup Configuration
|
||||
|
||||
Regularly backup your configuration:
|
||||
|
||||
```bash
|
||||
# Backup script
|
||||
tar -czf mcp-backup-$(date +%Y%m%d).tar.gz \
|
||||
.env \
|
||||
config/ \
|
||||
data/
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### General Questions
|
||||
|
||||
#### Q: What is MCP Server?
|
||||
A: MCP Server is a bridge between Home Assistant and Language Learning Models, enabling natural language control and automation of your smart home devices.
|
||||
|
||||
#### Q: What are the system requirements?
|
||||
A: MCP Server requires:
|
||||
- Node.js 16 or higher
|
||||
- Home Assistant instance
|
||||
- 1GB RAM minimum
|
||||
- 1GB disk space
|
||||
|
||||
#### Q: How do I update MCP Server?
|
||||
A: For Docker installation:
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
For manual installation:
|
||||
```bash
|
||||
git pull
|
||||
bun install
|
||||
bun run build
|
||||
```
|
||||
|
||||
### Integration Questions
|
||||
|
||||
#### Q: Can I use MCP Server with any Home Assistant instance?
|
||||
A: Yes, MCP Server works with any Home Assistant instance that has the REST API enabled and a valid long-lived access token.
|
||||
|
||||
#### Q: Does MCP Server support all Home Assistant integrations?
|
||||
A: MCP Server supports all Home Assistant devices and services that are accessible via the REST API.
|
||||
|
||||
### Security Questions
|
||||
|
||||
#### Q: Is my Home Assistant token secure?
|
||||
A: Yes, your Home Assistant token is stored securely and only used for authenticated communication between MCP Server and your Home Assistant instance.
|
||||
|
||||
#### Q: Can I use MCP Server remotely?
|
||||
A: Yes, but we recommend using a secure connection (HTTPS) and proper authentication when exposing MCP Server to the internet.
|
||||
|
||||
### Troubleshooting Questions
|
||||
|
||||
#### Q: Why are my device states not updating?
|
||||
A: Check:
|
||||
1. Home Assistant connection
|
||||
2. WebSocket connection status
|
||||
3. Device availability in Home Assistant
|
||||
4. Network connectivity
|
||||
|
||||
#### Q: Why are my commands not working?
|
||||
A: Verify:
|
||||
1. Command syntax
|
||||
2. Device availability
|
||||
3. User permissions
|
||||
4. Home Assistant API access
|
||||
@@ -1,96 +0,0 @@
|
||||
# Usage Guide
|
||||
|
||||
This guide explains how to use the Home Assistant MCP Server for basic device management and integration.
|
||||
|
||||
## Basic Setup
|
||||
|
||||
1. **Starting the Server:**
|
||||
- Development mode: `bun run dev`
|
||||
- Production mode: `bun run start`
|
||||
|
||||
2. **Accessing the Server:**
|
||||
- Default URL: `http://localhost:3000`
|
||||
- Ensure Home Assistant credentials are configured in `.env`
|
||||
|
||||
## Device Control
|
||||
|
||||
### REST API Interactions
|
||||
|
||||
Basic device control can be performed via the REST API:
|
||||
|
||||
```typescript
|
||||
// Turn on a light
|
||||
fetch('http://localhost:3000/api/control', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
entity_id: 'light.living_room',
|
||||
command: 'turn_on',
|
||||
parameters: { brightness: 50 }
|
||||
})
|
||||
});
|
||||
```
|
||||
|
||||
### Supported Commands
|
||||
|
||||
- `turn_on`
|
||||
- `turn_off`
|
||||
- `toggle`
|
||||
- `set_brightness`
|
||||
|
||||
### Supported Entities
|
||||
|
||||
- Lights
|
||||
- Switches
|
||||
- Climate controls
|
||||
- Media players
|
||||
|
||||
## Real-Time Updates
|
||||
|
||||
### WebSocket Connection
|
||||
|
||||
Subscribe to real-time device state changes:
|
||||
|
||||
```typescript
|
||||
const ws = new WebSocket('ws://localhost:3000/events');
|
||||
ws.onmessage = (event) => {
|
||||
const deviceUpdate = JSON.parse(event.data);
|
||||
console.log('Device state changed:', deviceUpdate);
|
||||
};
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
All API requests require a valid JWT token in the Authorization header.
|
||||
|
||||
## Limitations
|
||||
|
||||
- Basic device control only
|
||||
- Limited error handling
|
||||
- Minimal third-party integrations
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
1. Verify Home Assistant connection
|
||||
2. Check JWT token validity
|
||||
3. Ensure correct entity IDs
|
||||
4. Review server logs for detailed errors
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure the server using environment variables in `.env`:
|
||||
|
||||
```
|
||||
HA_URL=http://homeassistant:8123
|
||||
HA_TOKEN=your_home_assistant_token
|
||||
JWT_SECRET=your_jwt_secret
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Explore the [API Documentation](api.md)
|
||||
- Check [Troubleshooting Guide](troubleshooting.md)
|
||||
- Review [Contributing Guidelines](contributing.md)
|
||||
@@ -4,8 +4,6 @@ import { DOMParser, Element, Document } from '@xmldom/xmldom';
|
||||
import dotenv from 'dotenv';
|
||||
import readline from 'readline';
|
||||
import chalk from 'chalk';
|
||||
import express from 'express';
|
||||
import bodyParser from 'body-parser';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
@@ -118,9 +116,8 @@ interface ModelConfig {
|
||||
// Update model listing to filter based on API key availability
|
||||
const AVAILABLE_MODELS: ModelConfig[] = [
|
||||
// OpenAI models always available
|
||||
{ name: 'gpt-4o', maxTokens: 4096, contextWindow: 128000 },
|
||||
{ name: 'gpt-4-turbo', maxTokens: 4096, contextWindow: 128000 },
|
||||
{ name: 'gpt-4', maxTokens: 8192, contextWindow: 128000 },
|
||||
{ name: 'gpt-4', maxTokens: 8192, contextWindow: 8192 },
|
||||
{ name: 'gpt-4-turbo-preview', maxTokens: 4096, contextWindow: 128000 },
|
||||
{ name: 'gpt-3.5-turbo', maxTokens: 4096, contextWindow: 16385 },
|
||||
{ name: 'gpt-3.5-turbo-16k', maxTokens: 16385, contextWindow: 16385 },
|
||||
|
||||
@@ -151,18 +148,12 @@ const logger = {
|
||||
|
||||
// Update default model selection in loadConfig
|
||||
function loadConfig(): AppConfig {
|
||||
// Use environment variable or default to gpt-4o
|
||||
const defaultModelName = process.env.OPENAI_MODEL || 'gpt-4o';
|
||||
let defaultModel = AVAILABLE_MODELS.find(m => m.name === defaultModelName);
|
||||
|
||||
// If the configured model isn't found, use gpt-4o without warning
|
||||
if (!defaultModel) {
|
||||
defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4o') || AVAILABLE_MODELS[0];
|
||||
}
|
||||
// Always use gpt-4 for now
|
||||
const defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4') || AVAILABLE_MODELS[0];
|
||||
|
||||
return {
|
||||
mcpServer: process.env.MCP_SERVER || 'http://localhost:3000',
|
||||
openaiModel: defaultModel.name, // Use the resolved model name
|
||||
openaiModel: defaultModel.name,
|
||||
maxRetries: parseInt(process.env.MAX_RETRIES || '3'),
|
||||
analysisTimeout: parseInt(process.env.ANALYSIS_TIMEOUT || '30000'),
|
||||
selectedModel: defaultModel
|
||||
@@ -194,8 +185,8 @@ async function executeMcpTool(toolName: string, parameters: Record<string, any>
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), config.analysisTimeout);
|
||||
|
||||
// Update endpoint URL to use the same base path as schema
|
||||
const endpoint = `${config.mcpServer}/mcp/execute`;
|
||||
// Update endpoint URL to use the correct API path
|
||||
const endpoint = `${config.mcpServer}/api/mcp/execute`;
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: "POST",
|
||||
@@ -258,43 +249,117 @@ function isMcpExecuteResponse(obj: any): obj is McpExecuteResponse {
|
||||
(obj.success === true || typeof obj.message === 'string');
|
||||
}
|
||||
|
||||
// Add mock data for testing
|
||||
const MOCK_HA_INFO = {
|
||||
devices: {
|
||||
light: [
|
||||
{ entity_id: 'light.living_room', state: 'on', attributes: { friendly_name: 'Living Room Light', brightness: 255 } },
|
||||
{ entity_id: 'light.kitchen', state: 'off', attributes: { friendly_name: 'Kitchen Light', brightness: 0 } }
|
||||
],
|
||||
switch: [
|
||||
{ entity_id: 'switch.tv', state: 'off', attributes: { friendly_name: 'TV Power' } }
|
||||
],
|
||||
sensor: [
|
||||
{ entity_id: 'sensor.temperature', state: '21.5', attributes: { friendly_name: 'Living Room Temperature', unit_of_measurement: '°C' } },
|
||||
{ entity_id: 'sensor.humidity', state: '45', attributes: { friendly_name: 'Living Room Humidity', unit_of_measurement: '%' } }
|
||||
],
|
||||
climate: [
|
||||
{ entity_id: 'climate.thermostat', state: 'heat', attributes: { friendly_name: 'Main Thermostat', current_temperature: 20, target_temp_high: 24 } }
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
interface HassState {
|
||||
entity_id: string;
|
||||
state: string;
|
||||
attributes: Record<string, any>;
|
||||
last_changed: string;
|
||||
last_updated: string;
|
||||
}
|
||||
|
||||
interface ServiceInfo {
|
||||
name: string;
|
||||
description: string;
|
||||
fields: Record<string, any>;
|
||||
}
|
||||
|
||||
interface ServiceDomain {
|
||||
domain: string;
|
||||
services: Record<string, ServiceInfo>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects comprehensive information about the Home Assistant instance using MCP tools
|
||||
*/
|
||||
async function collectHomeAssistantInfo(): Promise<any> {
|
||||
const info: Record<string, any> = {};
|
||||
const config = loadConfig();
|
||||
const hassHost = process.env.HASS_HOST;
|
||||
|
||||
// Update schema endpoint to be consistent
|
||||
const schemaResponse = await fetch(`${config.mcpServer}/mcp`, {
|
||||
try {
|
||||
// Check if we're in test mode
|
||||
if (process.env.HA_TEST_MODE === '1') {
|
||||
logger.info("Running in test mode with mock data");
|
||||
return MOCK_HA_INFO;
|
||||
}
|
||||
|
||||
// Get states from Home Assistant directly
|
||||
const statesResponse = await fetch(`${hassHost}/api/states`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${hassToken}`,
|
||||
'Accept': 'application/json'
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!schemaResponse.ok) {
|
||||
console.error(`Failed to fetch MCP schema: ${schemaResponse.status}`);
|
||||
return info;
|
||||
if (!statesResponse.ok) {
|
||||
throw new Error(`Failed to fetch states: ${statesResponse.status}`);
|
||||
}
|
||||
|
||||
const schema = await schemaResponse.json() as McpSchema;
|
||||
console.log("Available tools:", schema.tools.map(t => t.name));
|
||||
const states = await statesResponse.json() as HassState[];
|
||||
|
||||
// Execute list_devices to get basic device information
|
||||
console.log("Fetching device information...");
|
||||
try {
|
||||
const deviceInfo = await executeMcpTool('list_devices');
|
||||
if (deviceInfo && deviceInfo.success && deviceInfo.devices) {
|
||||
info.devices = deviceInfo.devices;
|
||||
// Group devices by domain
|
||||
const devices: Record<string, HassState[]> = {};
|
||||
for (const state of states) {
|
||||
const [domain] = state.entity_id.split('.');
|
||||
if (!devices[domain]) {
|
||||
devices[domain] = [];
|
||||
}
|
||||
devices[domain].push(state);
|
||||
}
|
||||
|
||||
info.devices = devices;
|
||||
info.device_summary = {
|
||||
total_devices: states.length,
|
||||
device_types: Object.keys(devices),
|
||||
by_domain: Object.fromEntries(
|
||||
Object.entries(devices).map(([domain, items]) => [domain, items.length])
|
||||
)
|
||||
};
|
||||
|
||||
const deviceCount = states.length;
|
||||
const domainCount = Object.keys(devices).length;
|
||||
|
||||
if (deviceCount > 0) {
|
||||
logger.success(`Found ${deviceCount} devices across ${domainCount} domains`);
|
||||
} else {
|
||||
console.warn(`Failed to list devices: ${deviceInfo?.message || 'Unknown error'}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn("Error fetching devices:", error);
|
||||
logger.warn('No devices found in Home Assistant');
|
||||
}
|
||||
|
||||
return info;
|
||||
} catch (error) {
|
||||
logger.error(`Error fetching devices: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
if (process.env.HA_TEST_MODE !== '1') {
|
||||
logger.warn(`Failed to connect to Home Assistant. Run with HA_TEST_MODE=1 to use test data.`);
|
||||
return {
|
||||
devices: {},
|
||||
device_summary: {
|
||||
total_devices: 0,
|
||||
device_types: [],
|
||||
by_domain: {}
|
||||
}
|
||||
};
|
||||
}
|
||||
return MOCK_HA_INFO;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -401,31 +466,66 @@ function getRelevantDeviceTypes(prompt: string): string[] {
|
||||
* Generates analysis and recommendations using the OpenAI API based on the Home Assistant data
|
||||
*/
|
||||
async function generateAnalysis(haInfo: any): Promise<SystemAnalysis> {
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
// Compress and summarize the data
|
||||
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
|
||||
const deviceSummary = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, any>, [domain, devices]) => {
|
||||
const deviceList = devices as any[];
|
||||
acc[domain] = {
|
||||
count: deviceList.length,
|
||||
active: deviceList.filter(d => d.state === 'on' || d.state === 'home').length,
|
||||
states: [...new Set(deviceList.map(d => d.state))],
|
||||
sample: deviceList.slice(0, 2).map(d => ({
|
||||
id: d.entity_id,
|
||||
state: d.state,
|
||||
name: d.attributes?.friendly_name
|
||||
}))
|
||||
// If in test mode, return mock analysis
|
||||
if (process.env.HA_TEST_MODE === '1') {
|
||||
logger.info("Generating mock analysis...");
|
||||
return {
|
||||
overview: {
|
||||
state: ["System running normally", "4 device types detected"],
|
||||
health: ["All systems operational", "No critical issues found"],
|
||||
configurations: ["Basic configuration detected", "Default settings in use"],
|
||||
integrations: ["Light", "Switch", "Sensor", "Climate"],
|
||||
issues: ["No major issues detected"]
|
||||
},
|
||||
performance: {
|
||||
resource_usage: ["Normal CPU usage", "Memory usage within limits"],
|
||||
response_times: ["Average response time: 0.5s"],
|
||||
optimization_areas: ["Consider grouping lights by room"]
|
||||
},
|
||||
security: {
|
||||
current_measures: ["Basic security measures in place"],
|
||||
vulnerabilities: ["No critical vulnerabilities detected"],
|
||||
recommendations: ["Enable 2FA if not already enabled"]
|
||||
},
|
||||
optimization: {
|
||||
performance_suggestions: ["Group frequently used devices"],
|
||||
config_optimizations: ["Consider creating room-based views"],
|
||||
integration_improvements: ["Add friendly names to all entities"],
|
||||
automation_opportunities: ["Create morning/evening routines"]
|
||||
},
|
||||
maintenance: {
|
||||
required_updates: ["No critical updates pending"],
|
||||
cleanup_tasks: ["Remove unused entities"],
|
||||
regular_tasks: ["Check sensor battery levels"]
|
||||
},
|
||||
entity_usage: {
|
||||
most_active: ["light.living_room", "sensor.temperature"],
|
||||
rarely_used: ["switch.tv"],
|
||||
potential_duplicates: []
|
||||
},
|
||||
automation_analysis: {
|
||||
inefficient_automations: [],
|
||||
potential_improvements: ["Add time-based light controls"],
|
||||
suggested_blueprints: ["Motion-activated lighting"],
|
||||
condition_optimizations: []
|
||||
},
|
||||
energy_management: {
|
||||
high_consumption: ["No high consumption devices detected"],
|
||||
monitoring_suggestions: ["Add power monitoring to main appliances"],
|
||||
tariff_optimizations: ["Consider time-of-use automation"]
|
||||
}
|
||||
};
|
||||
return acc;
|
||||
}, {}) : {};
|
||||
}
|
||||
|
||||
// Original analysis code for non-test mode
|
||||
const openai = getOpenAIClient();
|
||||
|
||||
const systemSummary = {
|
||||
total_devices: deviceTypes.reduce((sum, type) => sum + deviceSummary[type].count, 0),
|
||||
device_types: deviceTypes,
|
||||
device_summary: deviceSummary,
|
||||
active_devices: Object.values(deviceSummary).reduce((sum: number, info: any) => sum + info.active, 0)
|
||||
total_devices: haInfo.device_summary?.total_devices || 0,
|
||||
device_types: haInfo.device_summary?.device_types || [],
|
||||
device_summary: haInfo.device_summary?.by_domain || {}
|
||||
};
|
||||
|
||||
const prompt = `Analyze this Home Assistant system and provide insights in XML format:
|
||||
@@ -578,100 +678,92 @@ Generate your response in this EXACT format:
|
||||
}
|
||||
}
|
||||
|
||||
async function getUserInput(question: string): Promise<string> {
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
rl.question(question, (answer) => {
|
||||
rl.close();
|
||||
resolve(answer);
|
||||
});
|
||||
});
|
||||
interface AutomationConfig {
|
||||
id?: string;
|
||||
alias?: string;
|
||||
description?: string;
|
||||
trigger?: Array<{
|
||||
platform: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
condition?: Array<{
|
||||
condition: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
action?: Array<{
|
||||
service?: string;
|
||||
[key: string]: any;
|
||||
}>;
|
||||
mode?: string;
|
||||
}
|
||||
|
||||
// Update chunk size calculation
|
||||
const MAX_CHARACTERS = 8000; // ~2000 tokens (4 chars/token)
|
||||
|
||||
// Update model handling in retry
|
||||
async function handleCustomPrompt(haInfo: any): Promise<void> {
|
||||
try {
|
||||
// Add device metadata
|
||||
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
|
||||
const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
|
||||
acc[domain] = (devices as any[]).length;
|
||||
return acc;
|
||||
}, {}) : {};
|
||||
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
|
||||
|
||||
const userPrompt = await getUserInput("Enter your custom prompt: ");
|
||||
if (!userPrompt) {
|
||||
console.log("No prompt provided. Exiting...");
|
||||
return;
|
||||
}
|
||||
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: config.selectedModel.name,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
|
||||
Current system has ${totalDevices} devices across ${deviceTypes.length} types: ${JSON.stringify(deviceStates)}`
|
||||
},
|
||||
{ role: "user", content: userPrompt },
|
||||
],
|
||||
max_tokens: config.selectedModel.maxTokens,
|
||||
temperature: 0.3,
|
||||
});
|
||||
|
||||
console.log("\nAnalysis Results:\n");
|
||||
console.log(completion.choices[0].message?.content || "No response generated");
|
||||
|
||||
} catch (error) {
|
||||
console.error("Error processing custom prompt:", error);
|
||||
|
||||
// Retry with simplified prompt if there's an error
|
||||
try {
|
||||
const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
const retryCompletion = await openai.chat.completions.create({
|
||||
model: config.selectedModel.name,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
|
||||
},
|
||||
{ role: "user", content: retryPrompt },
|
||||
],
|
||||
max_tokens: config.selectedModel.maxTokens,
|
||||
temperature: 0.3,
|
||||
});
|
||||
|
||||
console.log("\nAnalysis Results:\n");
|
||||
console.log(retryCompletion.choices[0].message?.content || "No response generated");
|
||||
} catch (retryError) {
|
||||
console.error("Error during retry:", retryError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update automation handling
|
||||
async function handleAutomationOptimization(haInfo: any): Promise<void> {
|
||||
try {
|
||||
const result = await executeMcpTool('automation', { action: 'list' });
|
||||
if (!result?.success) {
|
||||
logger.error(`Failed to retrieve automations: ${result?.message || 'Unknown error'}`);
|
||||
return;
|
||||
const hassHost = process.env.HASS_HOST;
|
||||
|
||||
// Get automations directly from Home Assistant
|
||||
const automationsResponse = await fetch(`${hassHost}/api/states`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${hassToken}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!automationsResponse.ok) {
|
||||
throw new Error(`Failed to fetch automations: ${automationsResponse.status}`);
|
||||
}
|
||||
|
||||
const automations = result.automations || [];
|
||||
const states = await automationsResponse.json() as HassState[];
|
||||
const automations = states.filter(state => state.entity_id.startsWith('automation.'));
|
||||
|
||||
// Get services to understand what actions are available
|
||||
const servicesResponse = await fetch(`${hassHost}/api/services`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${hassToken}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
let availableServices: Record<string, any> = {};
|
||||
if (servicesResponse.ok) {
|
||||
const services = await servicesResponse.json() as ServiceDomain[];
|
||||
availableServices = services.reduce((acc: Record<string, any>, service: ServiceDomain) => {
|
||||
if (service.domain && service.services) {
|
||||
acc[service.domain] = service.services;
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
logger.debug(`Retrieved services from ${Object.keys(availableServices).length} domains`);
|
||||
}
|
||||
|
||||
// Enrich automation data with service information
|
||||
const enrichedAutomations = automations.map(automation => {
|
||||
const actions = automation.attributes?.action || [];
|
||||
const enrichedActions = actions.map((action: any) => {
|
||||
if (action.service) {
|
||||
const [domain, service] = action.service.split('.');
|
||||
const serviceInfo = availableServices[domain]?.[service];
|
||||
return {
|
||||
...action,
|
||||
service_info: serviceInfo
|
||||
};
|
||||
}
|
||||
return action;
|
||||
});
|
||||
|
||||
return {
|
||||
...automation,
|
||||
config: {
|
||||
id: automation.entity_id.split('.')[1],
|
||||
alias: automation.attributes?.friendly_name,
|
||||
trigger: automation.attributes?.trigger || [],
|
||||
condition: automation.attributes?.condition || [],
|
||||
action: enrichedActions,
|
||||
mode: automation.attributes?.mode || 'single'
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
if (automations.length === 0) {
|
||||
console.log(chalk.bold.underline("\nAutomation Optimization Report"));
|
||||
console.log(chalk.yellow("No automations found in the system. Consider creating some automations to improve your Home Assistant experience."));
|
||||
@@ -679,7 +771,7 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
|
||||
}
|
||||
|
||||
logger.info(`Analyzing ${automations.length} automations...`);
|
||||
const optimizationXml = await analyzeAutomations(automations);
|
||||
const optimizationXml = await analyzeAutomations(enrichedAutomations);
|
||||
|
||||
const parser = new DOMParser();
|
||||
const xmlDoc = parser.parseFromString(optimizationXml, "text/xml");
|
||||
@@ -721,51 +813,85 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
|
||||
}
|
||||
}
|
||||
|
||||
// Add new automation optimization function
|
||||
async function analyzeAutomations(automations: any[]): Promise<string> {
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
// Compress automation data by only including essential fields
|
||||
const compressedAutomations = automations.map(automation => ({
|
||||
id: automation.entity_id,
|
||||
name: automation.attributes?.friendly_name || automation.entity_id,
|
||||
state: automation.state,
|
||||
last_triggered: automation.attributes?.last_triggered,
|
||||
mode: automation.attributes?.mode,
|
||||
trigger_count: automation.attributes?.trigger?.length || 0,
|
||||
action_count: automation.attributes?.action?.length || 0
|
||||
}));
|
||||
// Create a more detailed summary of automations
|
||||
const automationSummary = {
|
||||
total: automations.length,
|
||||
active: automations.filter(a => a.state === 'on').length,
|
||||
by_type: automations.reduce((acc: Record<string, number>, auto) => {
|
||||
const type = auto.attributes?.mode || 'single';
|
||||
acc[type] = (acc[type] || 0) + 1;
|
||||
return acc;
|
||||
}, {}),
|
||||
recently_triggered: automations.filter(a => {
|
||||
const lastTriggered = a.attributes?.last_triggered;
|
||||
if (!lastTriggered) return false;
|
||||
const lastTriggerDate = new Date(lastTriggered);
|
||||
const oneDayAgo = new Date();
|
||||
oneDayAgo.setDate(oneDayAgo.getDate() - 1);
|
||||
return lastTriggerDate > oneDayAgo;
|
||||
}).length,
|
||||
trigger_types: automations.reduce((acc: Record<string, number>, auto) => {
|
||||
const triggers = auto.config?.trigger || [];
|
||||
triggers.forEach((trigger: any) => {
|
||||
const type = trigger.platform || 'unknown';
|
||||
acc[type] = (acc[type] || 0) + 1;
|
||||
});
|
||||
return acc;
|
||||
}, {}),
|
||||
action_types: automations.reduce((acc: Record<string, number>, auto) => {
|
||||
const actions = auto.config?.action || [];
|
||||
actions.forEach((action: any) => {
|
||||
const type = action.service?.split('.')[0] || 'unknown';
|
||||
acc[type] = (acc[type] || 0) + 1;
|
||||
});
|
||||
return acc;
|
||||
}, {}),
|
||||
service_domains: Array.from(new Set(automations.flatMap(auto =>
|
||||
(auto.config?.action || [])
|
||||
.map((action: any) => action.service?.split('.')[0])
|
||||
.filter(Boolean)
|
||||
))).sort(),
|
||||
names: automations.map(a => a.attributes?.friendly_name || a.entity_id.split('.')[1]).slice(0, 10)
|
||||
};
|
||||
|
||||
const prompt = `Analyze these Home Assistant automations and provide optimization suggestions in XML format:
|
||||
${JSON.stringify(compressedAutomations, null, 2)}
|
||||
${JSON.stringify(automationSummary, null, 2)}
|
||||
|
||||
Key metrics:
|
||||
- Total automations: ${automationSummary.total}
|
||||
- Active automations: ${automationSummary.active}
|
||||
- Recently triggered: ${automationSummary.recently_triggered}
|
||||
- Automation modes: ${JSON.stringify(automationSummary.by_type)}
|
||||
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
|
||||
- Action types: ${JSON.stringify(automationSummary.action_types)}
|
||||
- Service domains used: ${automationSummary.service_domains.join(', ')}
|
||||
|
||||
Generate your response in this EXACT format:
|
||||
<analysis>
|
||||
<findings>
|
||||
<item>Finding 1</item>
|
||||
<item>Finding 2</item>
|
||||
<!-- Add more findings as needed -->
|
||||
</findings>
|
||||
<recommendations>
|
||||
<item>Recommendation 1</item>
|
||||
<item>Recommendation 2</item>
|
||||
<!-- Add more recommendations as needed -->
|
||||
</recommendations>
|
||||
<blueprints>
|
||||
<item>Blueprint suggestion 1</item>
|
||||
<item>Blueprint suggestion 2</item>
|
||||
<!-- Add more blueprint suggestions as needed -->
|
||||
</blueprints>
|
||||
</analysis>
|
||||
|
||||
If no optimizations are needed, return empty item lists but maintain the XML structure.
|
||||
|
||||
Focus on:
|
||||
1. Identifying patterns and potential improvements
|
||||
2. Suggesting energy-saving optimizations
|
||||
1. Identifying patterns and potential improvements based on trigger and action types
|
||||
2. Suggesting energy-saving optimizations based on the services being used
|
||||
3. Recommending error handling improvements
|
||||
4. Suggesting relevant blueprints`;
|
||||
4. Suggesting relevant blueprints for common automation patterns
|
||||
5. Analyzing the distribution of automation types and suggesting optimizations`;
|
||||
|
||||
try {
|
||||
const completion = await openai.chat.completions.create({
|
||||
@@ -773,12 +899,12 @@ Focus on:
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a Home Assistant automation expert. Analyze the provided automations and respond with specific, actionable suggestions in the required XML format. If no optimizations are needed, return empty item lists but maintain the XML structure."
|
||||
content: "You are a Home Assistant automation expert. Analyze the provided automation summary and respond with specific, actionable suggestions in the required XML format."
|
||||
},
|
||||
{ role: "user", content: prompt }
|
||||
],
|
||||
temperature: 0.2,
|
||||
max_tokens: Math.min(config.selectedModel.maxTokens, 4000)
|
||||
max_tokens: Math.min(config.selectedModel.maxTokens, 2048)
|
||||
});
|
||||
|
||||
const response = completion.choices[0].message?.content || "";
|
||||
@@ -819,62 +945,164 @@ Focus on:
|
||||
}
|
||||
}
|
||||
|
||||
// Update model selection prompt count dynamically
|
||||
async function selectModel(): Promise<ModelConfig> {
|
||||
console.log(chalk.bold.underline("\nAvailable Models:"));
|
||||
AVAILABLE_MODELS.forEach((model, index) => {
|
||||
console.log(
|
||||
`${index + 1}. ${chalk.blue(model.name.padEnd(20))} ` +
|
||||
`Context: ${chalk.yellow(model.contextWindow.toLocaleString().padStart(6))} tokens | ` +
|
||||
`Max output: ${chalk.green(model.maxTokens.toLocaleString().padStart(5))} tokens`
|
||||
);
|
||||
// Add new handleCustomPrompt function
|
||||
async function handleCustomPrompt(haInfo: any, customPrompt: string): Promise<void> {
|
||||
try {
|
||||
// Add device metadata
|
||||
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
|
||||
const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
|
||||
acc[domain] = (devices as any[]).length;
|
||||
return acc;
|
||||
}, {}) : {};
|
||||
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
|
||||
|
||||
// Get automation information
|
||||
const automations = haInfo.devices?.automation || [];
|
||||
const automationDetails = automations.map((auto: any) => ({
|
||||
name: auto.attributes?.friendly_name || auto.entity_id.split('.')[1],
|
||||
state: auto.state,
|
||||
last_triggered: auto.attributes?.last_triggered,
|
||||
mode: auto.attributes?.mode,
|
||||
triggers: auto.attributes?.trigger?.map((t: any) => ({
|
||||
platform: t.platform,
|
||||
...t
|
||||
})) || [],
|
||||
conditions: auto.attributes?.condition?.map((c: any) => ({
|
||||
condition: c.condition,
|
||||
...c
|
||||
})) || [],
|
||||
actions: auto.attributes?.action?.map((a: any) => ({
|
||||
service: a.service,
|
||||
...a
|
||||
})) || []
|
||||
}));
|
||||
|
||||
const automationSummary = {
|
||||
total: automations.length,
|
||||
active: automations.filter((a: any) => a.state === 'on').length,
|
||||
trigger_types: automations.reduce((acc: Record<string, number>, auto: any) => {
|
||||
const triggers = auto.attributes?.trigger || [];
|
||||
triggers.forEach((trigger: any) => {
|
||||
const type = trigger.platform || 'unknown';
|
||||
acc[type] = (acc[type] || 0) + 1;
|
||||
});
|
||||
return acc;
|
||||
}, {}),
|
||||
action_types: automations.reduce((acc: Record<string, number>, auto: any) => {
|
||||
const actions = auto.attributes?.action || [];
|
||||
actions.forEach((action: any) => {
|
||||
const type = action.service?.split('.')[0] || 'unknown';
|
||||
acc[type] = (acc[type] || 0) + 1;
|
||||
});
|
||||
return acc;
|
||||
}, {}),
|
||||
service_domains: Array.from(new Set(automations.flatMap((auto: any) =>
|
||||
(auto.attributes?.action || [])
|
||||
.map((action: any) => action.service?.split('.')[0])
|
||||
.filter(Boolean)
|
||||
))).sort()
|
||||
};
|
||||
|
||||
// Create a summary of the devices
|
||||
const deviceSummary = Object.entries(deviceStates)
|
||||
.map(([domain, count]) => `${domain}: ${count}`)
|
||||
.join(', ');
|
||||
|
||||
if (process.env.HA_TEST_MODE === '1') {
|
||||
console.log("\nTest Mode Analysis Results:\n");
|
||||
console.log("Based on your Home Assistant setup with:");
|
||||
console.log(`- ${totalDevices} total devices`);
|
||||
console.log(`- Device types: ${deviceTypes.join(', ')}`);
|
||||
console.log("\nAnalysis for prompt: " + customPrompt);
|
||||
console.log("1. Current State:");
|
||||
console.log(" - All devices are functioning normally");
|
||||
console.log(" - System is responsive and stable");
|
||||
console.log("\n2. Recommendations:");
|
||||
console.log(" - Consider grouping devices by room");
|
||||
console.log(" - Add automation for frequently used devices");
|
||||
console.log(" - Monitor power usage of main appliances");
|
||||
console.log("\n3. Optimization Opportunities:");
|
||||
console.log(" - Create scenes for different times of day");
|
||||
console.log(" - Set up presence detection for automatic control");
|
||||
return;
|
||||
}
|
||||
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: config.selectedModel.name,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
|
||||
Current system has ${totalDevices} devices across ${deviceTypes.length} types.
|
||||
Device distribution: ${deviceSummary}
|
||||
|
||||
Automation Summary:
|
||||
- Total automations: ${automationSummary.total}
|
||||
- Active automations: ${automationSummary.active}
|
||||
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
|
||||
- Action types: ${JSON.stringify(automationSummary.action_types)}
|
||||
- Service domains used: ${automationSummary.service_domains.join(', ')}
|
||||
|
||||
Detailed Automation List:
|
||||
${JSON.stringify(automationDetails, null, 2)}`
|
||||
},
|
||||
{ role: "user", content: customPrompt },
|
||||
],
|
||||
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
|
||||
temperature: 0.3,
|
||||
});
|
||||
|
||||
const maxOption = AVAILABLE_MODELS.length;
|
||||
const choice = await getUserInput(`\nSelect model (1-${maxOption}): `);
|
||||
const selectedIndex = parseInt(choice) - 1;
|
||||
console.log("\nAnalysis Results:\n");
|
||||
console.log(completion.choices[0].message?.content || "No response generated");
|
||||
|
||||
if (isNaN(selectedIndex) || selectedIndex < 0 || selectedIndex >= AVAILABLE_MODELS.length) {
|
||||
console.log(chalk.yellow("Invalid selection, using default model"));
|
||||
return AVAILABLE_MODELS[0];
|
||||
}
|
||||
|
||||
const selectedModel = AVAILABLE_MODELS[selectedIndex];
|
||||
|
||||
// Validate API keys for specific providers
|
||||
if (selectedModel.name.startsWith('deepseek')) {
|
||||
if (!process.env.DEEPSEEK_API_KEY) {
|
||||
logger.error("DeepSeek models require DEEPSEEK_API_KEY in .env");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Verify DeepSeek connection
|
||||
try {
|
||||
await getOpenAIClient().models.list();
|
||||
} catch (error) {
|
||||
logger.error(`DeepSeek connection failed: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
console.error("Error processing custom prompt:", error);
|
||||
|
||||
if (process.env.HA_TEST_MODE === '1') {
|
||||
console.log("\nTest Mode Fallback Analysis:\n");
|
||||
console.log("1. System Overview:");
|
||||
console.log(" - Basic configuration detected");
|
||||
console.log(" - All core services operational");
|
||||
console.log("\n2. Suggestions:");
|
||||
console.log(" - Review device naming conventions");
|
||||
console.log(" - Consider adding automation blueprints");
|
||||
return;
|
||||
}
|
||||
|
||||
if (selectedModel.name.startsWith('gpt-4-o') && !process.env.OPENAI_API_KEY) {
|
||||
logger.error("OpenAI models require OPENAI_API_KEY in .env");
|
||||
process.exit(1);
|
||||
}
|
||||
// Retry with simplified prompt if there's an error
|
||||
try {
|
||||
const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
|
||||
const openai = getOpenAIClient();
|
||||
const config = loadConfig();
|
||||
|
||||
return selectedModel;
|
||||
const retryCompletion = await openai.chat.completions.create({
|
||||
model: config.selectedModel.name,
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
|
||||
},
|
||||
{ role: "user", content: retryPrompt },
|
||||
],
|
||||
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
|
||||
temperature: 0.3,
|
||||
});
|
||||
|
||||
console.log("\nAnalysis Results:\n");
|
||||
console.log(retryCompletion.choices[0].message?.content || "No response generated");
|
||||
} catch (retryError) {
|
||||
console.error("Error during retry:", retryError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enhanced main function with progress indicators
|
||||
async function main() {
|
||||
let config = loadConfig();
|
||||
|
||||
// Model selection
|
||||
config.selectedModel = await selectModel();
|
||||
logger.info(`Selected model: ${chalk.blue(config.selectedModel.name)} ` +
|
||||
`(Context: ${config.selectedModel.contextWindow.toLocaleString()} tokens, ` +
|
||||
`Output: ${config.selectedModel.maxTokens.toLocaleString()} tokens)`);
|
||||
|
||||
logger.info(`Starting analysis with ${config.selectedModel.name} model...`);
|
||||
|
||||
try {
|
||||
@@ -888,12 +1116,20 @@ async function main() {
|
||||
|
||||
logger.success(`Collected data from ${Object.keys(haInfo.devices).length} device types`);
|
||||
|
||||
const mode = await getUserInput(
|
||||
"\nSelect mode:\n1. Standard Analysis\n2. Custom Prompt\n3. Automation Optimization\nEnter choice (1-3): "
|
||||
);
|
||||
// Get mode from command line argument or default to 1
|
||||
const mode = process.argv[2] || "1";
|
||||
|
||||
console.log("\nAvailable modes:");
|
||||
console.log("1. Standard Analysis");
|
||||
console.log("2. Custom Prompt");
|
||||
console.log("3. Automation Optimization");
|
||||
console.log(`Selected mode: ${mode}\n`);
|
||||
|
||||
if (mode === "2") {
|
||||
await handleCustomPrompt(haInfo);
|
||||
// For custom prompt mode, get the prompt from remaining arguments
|
||||
const customPrompt = process.argv.slice(3).join(" ") || "Analyze my Home Assistant setup";
|
||||
console.log(`Custom prompt: ${customPrompt}\n`);
|
||||
await handleCustomPrompt(haInfo, customPrompt);
|
||||
} else if (mode === "3") {
|
||||
await handleAutomationOptimization(haInfo);
|
||||
} else {
|
||||
@@ -938,22 +1174,39 @@ function getItems(xmlDoc: Document, path: string): string[] {
|
||||
.map(item => (item as Element).textContent || "");
|
||||
}
|
||||
|
||||
// Add environment check for processor type
|
||||
// Replace the Express server initialization at the bottom with Bun's server
|
||||
if (process.env.PROCESSOR_TYPE === 'openai') {
|
||||
// Initialize Express server only for OpenAI
|
||||
const app = express();
|
||||
const port = process.env.PORT || 3000;
|
||||
// Initialize Bun server for OpenAI
|
||||
const server = Bun.serve({
|
||||
port: process.env.PORT || 3000,
|
||||
async fetch(req) {
|
||||
const url = new URL(req.url);
|
||||
|
||||
app.use(bodyParser.json());
|
||||
// Handle chat endpoint
|
||||
if (url.pathname === '/chat' && req.method === 'POST') {
|
||||
try {
|
||||
const body = await req.json();
|
||||
// Handle chat logic here
|
||||
return new Response(JSON.stringify({ success: true }), {
|
||||
headers: { 'Content-Type': 'application/json' }
|
||||
});
|
||||
} catch (error) {
|
||||
return new Response(JSON.stringify({
|
||||
success: false,
|
||||
error: error.message
|
||||
}), {
|
||||
status: 400,
|
||||
headers: { 'Content-Type': 'application/json' }
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Keep existing OpenAI routes
|
||||
app.post('/chat', async (req, res) => {
|
||||
// ... existing OpenAI handler code ...
|
||||
// Handle 404 for unknown routes
|
||||
return new Response('Not Found', { status: 404 });
|
||||
},
|
||||
});
|
||||
|
||||
app.listen(port, () => {
|
||||
console.log(`[OpenAI Server] Running on port ${port}`);
|
||||
});
|
||||
console.log(`[OpenAI Server] Running on port ${server.port}`);
|
||||
} else {
|
||||
console.log('[Claude Mode] Using stdio communication');
|
||||
}
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
import { SpeechToText, TranscriptionResult, WakeWordEvent } from '../src/speech/speechToText';
|
||||
import path from 'path';
|
||||
import recorder from 'node-record-lpcm16';
|
||||
import { Writable } from 'stream';
|
||||
|
||||
async function main() {
|
||||
// Initialize the speech-to-text service
|
||||
const speech = new SpeechToText('fast-whisper');
|
||||
const speech = new SpeechToText({
|
||||
modelPath: 'base.en',
|
||||
modelType: 'whisper',
|
||||
containerName: 'fast-whisper'
|
||||
});
|
||||
|
||||
// Check if the service is available
|
||||
const isHealthy = await speech.checkHealth();
|
||||
@@ -45,12 +51,51 @@ async function main() {
|
||||
console.error('❌ Error:', error.message);
|
||||
});
|
||||
|
||||
// Create audio directory if it doesn't exist
|
||||
const audioDir = path.join(__dirname, '..', 'audio');
|
||||
if (!require('fs').existsSync(audioDir)) {
|
||||
require('fs').mkdirSync(audioDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Start microphone recording
|
||||
console.log('Starting microphone recording...');
|
||||
let audioBuffer = Buffer.alloc(0);
|
||||
|
||||
const audioStream = new Writable({
|
||||
write(chunk: Buffer, encoding, callback) {
|
||||
audioBuffer = Buffer.concat([audioBuffer, chunk]);
|
||||
callback();
|
||||
}
|
||||
});
|
||||
|
||||
const recording = recorder.record({
|
||||
sampleRate: 16000,
|
||||
channels: 1,
|
||||
audioType: 'wav'
|
||||
});
|
||||
|
||||
recording.stream().pipe(audioStream);
|
||||
|
||||
// Process audio every 5 seconds
|
||||
setInterval(async () => {
|
||||
if (audioBuffer.length > 0) {
|
||||
try {
|
||||
const result = await speech.transcribe(audioBuffer);
|
||||
console.log('\n🎤 Live transcription:', result);
|
||||
// Reset buffer after processing
|
||||
audioBuffer = Buffer.alloc(0);
|
||||
} catch (error) {
|
||||
console.error('❌ Transcription error:', error);
|
||||
}
|
||||
}
|
||||
}, 5000);
|
||||
|
||||
// Example of manual transcription
|
||||
async function transcribeFile(filepath: string) {
|
||||
try {
|
||||
console.log(`\n🎯 Manually transcribing: ${filepath}`);
|
||||
const result = await speech.transcribeAudio(filepath, {
|
||||
model: 'base.en', // You can change this to tiny.en, small.en, medium.en, or large-v2
|
||||
model: 'base.en',
|
||||
language: 'en',
|
||||
temperature: 0,
|
||||
beamSize: 5
|
||||
@@ -63,22 +108,13 @@ async function main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Create audio directory if it doesn't exist
|
||||
const audioDir = path.join(__dirname, '..', 'audio');
|
||||
if (!require('fs').existsSync(audioDir)) {
|
||||
require('fs').mkdirSync(audioDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Start wake word detection
|
||||
speech.startWakeWordDetection(audioDir);
|
||||
|
||||
// Example: You can also manually transcribe files
|
||||
// Uncomment the following line and replace with your audio file:
|
||||
// await transcribeFile('/path/to/your/audio.wav');
|
||||
|
||||
// Keep the process running
|
||||
// Handle cleanup on exit
|
||||
process.on('SIGINT', () => {
|
||||
console.log('\nStopping speech service...');
|
||||
recording.stop();
|
||||
speech.stopWakeWordDetection();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
249
mkdocs.yml
249
mkdocs.yml
@@ -1,249 +0,0 @@
|
||||
site_name: MCP Server for Home Assistant
|
||||
site_url: https://jango-blockchained.github.io/advanced-homeassistant-mcp
|
||||
repo_url: https://github.com/jango-blockchained/advanced-homeassistant-mcp
|
||||
site_description: Home Assistant MCP Server Documentation
|
||||
# Add this to handle GitHub Pages serving from a subdirectory
|
||||
site_dir: site/advanced-homeassistant-mcp
|
||||
|
||||
theme:
|
||||
name: material
|
||||
logo: assets/images/logo.png
|
||||
favicon: assets/images/favicon.ico
|
||||
|
||||
# Modern Features
|
||||
features:
|
||||
# Navigation Enhancements
|
||||
- navigation.tabs
|
||||
- navigation.tabs.sticky
|
||||
- navigation.indexes
|
||||
- navigation.sections
|
||||
- navigation.expand
|
||||
- navigation.path
|
||||
- navigation.footer
|
||||
- navigation.prune
|
||||
- navigation.tracking
|
||||
- navigation.instant
|
||||
|
||||
# UI Elements
|
||||
- header.autohide
|
||||
- toc.integrate
|
||||
- toc.follow
|
||||
- announce.dismiss
|
||||
|
||||
# Search Features
|
||||
- search.suggest
|
||||
- search.highlight
|
||||
- search.share
|
||||
|
||||
# Code Features
|
||||
- content.code.annotate
|
||||
- content.code.copy
|
||||
- content.code.select
|
||||
- content.tabs.link
|
||||
- content.tooltips
|
||||
|
||||
# Theme Configuration
|
||||
palette:
|
||||
# Dark mode as primary
|
||||
- media: "(prefers-color-scheme: dark)"
|
||||
scheme: slate
|
||||
primary: deep-purple
|
||||
accent: purple
|
||||
toggle:
|
||||
icon: material/weather-sunny
|
||||
name: Switch to light mode
|
||||
# Light mode as secondary
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
primary: deep-purple
|
||||
accent: purple
|
||||
toggle:
|
||||
icon: material/weather-night
|
||||
name: Switch to dark mode
|
||||
|
||||
font:
|
||||
text: Roboto
|
||||
code: Roboto Mono
|
||||
|
||||
icon:
|
||||
repo: fontawesome/brands/github
|
||||
edit: material/pencil
|
||||
view: material/eye
|
||||
|
||||
markdown_extensions:
|
||||
# Modern Code Highlighting
|
||||
- pymdownx.highlight:
|
||||
anchor_linenums: true
|
||||
line_spans: __span
|
||||
pygments_lang_class: true
|
||||
- pymdownx.inlinehilite
|
||||
- pymdownx.snippets
|
||||
|
||||
# Advanced Formatting
|
||||
- pymdownx.critic
|
||||
- pymdownx.caret
|
||||
- pymdownx.keys
|
||||
- pymdownx.mark
|
||||
- pymdownx.tilde
|
||||
|
||||
# Interactive Elements
|
||||
- pymdownx.details
|
||||
- pymdownx.tabbed:
|
||||
alternate_style: true
|
||||
- pymdownx.tasklist:
|
||||
custom_checkbox: true
|
||||
|
||||
# Diagrams & Formatting
|
||||
- pymdownx.superfences:
|
||||
custom_fences:
|
||||
- name: mermaid
|
||||
class: mermaid
|
||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||
- pymdownx.arithmatex:
|
||||
generic: true
|
||||
|
||||
# Additional Extensions
|
||||
- admonition
|
||||
- attr_list
|
||||
- md_in_html
|
||||
- pymdownx.emoji:
|
||||
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||
- footnotes
|
||||
- tables
|
||||
- def_list
|
||||
- abbr
|
||||
|
||||
plugins:
|
||||
# Core Plugins
|
||||
- search:
|
||||
separator: '[\s\-,:!=\[\]()"/]+|(?!\b)(?=[A-Z][a-z])|\.(?!\d)|&[lg]t;'
|
||||
- minify:
|
||||
minify_html: true
|
||||
- mkdocstrings
|
||||
|
||||
# Advanced Features
|
||||
- social:
|
||||
cards: false
|
||||
- tags
|
||||
- offline
|
||||
|
||||
# Version Management
|
||||
- git-revision-date-localized:
|
||||
enable_creation_date: true
|
||||
type: date
|
||||
|
||||
extra:
|
||||
# Consent Management
|
||||
consent:
|
||||
title: Cookie consent
|
||||
description: >-
|
||||
We use cookies to recognize your repeated visits and preferences, as well
|
||||
as to measure the effectiveness of our documentation and whether users
|
||||
find what they're searching for. With your consent, you're helping us to
|
||||
make our documentation better.
|
||||
actions:
|
||||
- accept
|
||||
- reject
|
||||
- manage
|
||||
|
||||
# Version Management
|
||||
version:
|
||||
provider: mike
|
||||
default: latest
|
||||
|
||||
# Social Links
|
||||
social:
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/jango-blockchained/homeassistant-mcp
|
||||
- icon: fontawesome/brands/docker
|
||||
link: https://hub.docker.com/r/jangoblockchained/homeassistant-mcp
|
||||
|
||||
# Status Indicators
|
||||
status:
|
||||
new: Recently added
|
||||
deprecated: Deprecated
|
||||
beta: Beta
|
||||
|
||||
# Analytics
|
||||
analytics:
|
||||
provider: google
|
||||
property: !ENV GOOGLE_ANALYTICS_KEY
|
||||
feedback:
|
||||
title: Was this page helpful?
|
||||
ratings:
|
||||
- icon: material/emoticon-happy-outline
|
||||
name: This page was helpful
|
||||
data: 1
|
||||
note: >-
|
||||
Thanks for your feedback!
|
||||
- icon: material/emoticon-sad-outline
|
||||
name: This page could be improved
|
||||
data: 0
|
||||
note: >-
|
||||
Thanks for your feedback! Please consider creating an issue to help us improve.
|
||||
|
||||
extra_css:
|
||||
- stylesheets/extra.css
|
||||
|
||||
extra_javascript:
|
||||
- javascripts/mathjax.js
|
||||
- https://polyfill.io/v3/polyfill.min.js?features=es6
|
||||
- https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
|
||||
- javascripts/extra.js
|
||||
|
||||
copyright: Copyright © 2025 jango-blockchained
|
||||
|
||||
# Keep existing nav structure
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Getting Started:
|
||||
- Overview: getting-started/index.md
|
||||
- Installation: getting-started/installation.md
|
||||
- Quick Start: getting-started/quickstart.md
|
||||
- Configuration: getting-started/configuration.md
|
||||
- Docker Setup: getting-started/docker.md
|
||||
- API Reference:
|
||||
- Overview: api/index.md
|
||||
- Core API: api/core.md
|
||||
- SSE API: api/sse.md
|
||||
- API Documentation: api.md
|
||||
- Usage: usage.md
|
||||
- Configuration:
|
||||
- Overview: config/index.md
|
||||
- System Configuration: configuration.md
|
||||
- Security: security.md
|
||||
- Tools:
|
||||
- Overview: tools/index.md
|
||||
- Device Management:
|
||||
- List Devices: tools/device-management/list-devices.md
|
||||
- Device Control: tools/device-management/control.md
|
||||
- History & State:
|
||||
- History: tools/history-state/history.md
|
||||
- Scene Management: tools/history-state/scene.md
|
||||
- Automation:
|
||||
- Automation Management: tools/automation/automation.md
|
||||
- Automation Configuration: tools/automation/automation-config.md
|
||||
- Add-ons & Packages:
|
||||
- Add-on Management: tools/addons-packages/addon.md
|
||||
- Package Management: tools/addons-packages/package.md
|
||||
- Notifications:
|
||||
- Notify: tools/notifications/notify.md
|
||||
- Events:
|
||||
- Event Subscription: tools/events/subscribe-events.md
|
||||
- SSE Statistics: tools/events/sse-stats.md
|
||||
- Development:
|
||||
- Overview: development/index.md
|
||||
- Environment Setup: development/environment.md
|
||||
- Architecture: architecture.md
|
||||
- Contributing: contributing.md
|
||||
- Testing: testing.md
|
||||
- Best Practices: development/best-practices.md
|
||||
- Interfaces: development/interfaces.md
|
||||
- Tool Development: development/tools.md
|
||||
- Test Migration Guide: development/test-migration-guide.md
|
||||
- Troubleshooting: troubleshooting.md
|
||||
- Deployment: deployment.md
|
||||
- Roadmap: roadmap.md
|
||||
- Examples:
|
||||
- Overview: examples/index.md
|
||||
14
package.json
14
package.json
@@ -7,7 +7,7 @@
|
||||
"scripts": {
|
||||
"start": "bun run dist/index.js",
|
||||
"dev": "bun --hot --watch src/index.ts",
|
||||
"build": "bun build ./src/index.ts --outdir ./dist --target node --minify",
|
||||
"build": "bun build ./src/index.ts --outdir ./dist --target bun --minify",
|
||||
"test": "bun test",
|
||||
"test:watch": "bun test --watch",
|
||||
"test:coverage": "bun test --coverage",
|
||||
@@ -21,7 +21,7 @@
|
||||
"profile": "bun --inspect src/index.ts",
|
||||
"clean": "rm -rf dist .bun coverage",
|
||||
"typecheck": "bun x tsc --noEmit",
|
||||
"example:speech": "bun run examples/speech-to-text-example.ts"
|
||||
"example:speech": "bun run extra/speech-to-text-example.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@elysiajs/cors": "^1.2.0",
|
||||
@@ -31,12 +31,14 @@
|
||||
"@types/sanitize-html": "^2.9.5",
|
||||
"@types/ws": "^8.5.10",
|
||||
"@xmldom/xmldom": "^0.9.7",
|
||||
"dotenv": "^16.4.5",
|
||||
"chalk": "^5.4.1",
|
||||
"dotenv": "^16.4.7",
|
||||
"elysia": "^1.2.11",
|
||||
"helmet": "^7.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"node-fetch": "^3.3.2",
|
||||
"openai": "^4.82.0",
|
||||
"node-record-lpcm16": "^1.0.1",
|
||||
"openai": "^4.83.0",
|
||||
"sanitize-html": "^2.11.0",
|
||||
"typescript": "^5.3.3",
|
||||
"winston": "^3.11.0",
|
||||
@@ -45,6 +47,10 @@
|
||||
"zod": "^3.22.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@jest/globals": "^29.7.0",
|
||||
"@types/bun": "latest",
|
||||
"@types/express": "^5.0.0",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.1.0",
|
||||
"@typescript-eslint/parser": "^7.1.0",
|
||||
|
||||
97
scripts/setup-env.sh
Executable file
97
scripts/setup-env.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored messages
|
||||
print_message() {
|
||||
local color=$1
|
||||
local message=$2
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Function to check if a file exists
|
||||
check_file() {
|
||||
if [ -f "$1" ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to copy environment file
|
||||
copy_env_file() {
|
||||
local source=$1
|
||||
local target=$2
|
||||
if [ -f "$target" ]; then
|
||||
print_message "$YELLOW" "Warning: $target already exists. Skipping..."
|
||||
else
|
||||
cp "$source" "$target"
|
||||
if [ $? -eq 0 ]; then
|
||||
print_message "$GREEN" "Created $target successfully"
|
||||
else
|
||||
print_message "$RED" "Error: Failed to create $target"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Main script
|
||||
print_message "$GREEN" "Setting up environment files..."
|
||||
|
||||
# Check if .env.example exists
|
||||
if ! check_file ".env.example"; then
|
||||
print_message "$RED" "Error: .env.example not found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Setup base environment file
|
||||
if [ "$1" = "--force" ]; then
|
||||
cp .env.example .env
|
||||
print_message "$GREEN" "Forced creation of .env file"
|
||||
else
|
||||
copy_env_file ".env.example" ".env"
|
||||
fi
|
||||
|
||||
# Determine environment
|
||||
ENV=${NODE_ENV:-development}
|
||||
case "$ENV" in
|
||||
"development"|"dev")
|
||||
ENV_FILE=".env.dev"
|
||||
;;
|
||||
"production"|"prod")
|
||||
ENV_FILE=".env.prod"
|
||||
;;
|
||||
"test")
|
||||
ENV_FILE=".env.test"
|
||||
;;
|
||||
*)
|
||||
print_message "$RED" "Error: Invalid environment: $ENV"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Copy environment-specific file
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
if [ "$1" = "--force" ]; then
|
||||
cp "$ENV_FILE" .env
|
||||
print_message "$GREEN" "Forced override of .env with $ENV_FILE"
|
||||
else
|
||||
print_message "$YELLOW" "Do you want to override .env with $ENV_FILE? [y/N] "
|
||||
read -r response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
cp "$ENV_FILE" .env
|
||||
print_message "$GREEN" "Copied $ENV_FILE to .env"
|
||||
else
|
||||
print_message "$YELLOW" "Keeping existing .env file"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
print_message "$YELLOW" "Warning: $ENV_FILE not found. Using default .env"
|
||||
fi
|
||||
|
||||
print_message "$GREEN" "Environment setup complete!"
|
||||
print_message "$YELLOW" "Remember to set your HASS_TOKEN in .env"
|
||||
32
scripts/setup.sh
Normal file
32
scripts/setup.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copy template if .env doesn't exist
|
||||
if [ ! -f .env ]; then
|
||||
cp .env.example .env
|
||||
echo "Created .env file from template. Please update your credentials!"
|
||||
fi
|
||||
|
||||
# Validate required variables
|
||||
required_vars=("HASS_HOST" "HASS_TOKEN")
|
||||
missing_vars=()
|
||||
|
||||
for var in "${required_vars[@]}"; do
|
||||
if ! grep -q "^$var=" .env; then
|
||||
missing_vars+=("$var")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_vars[@]} -ne 0 ]; then
|
||||
echo "ERROR: Missing required variables in .env:"
|
||||
printf '%s\n' "${missing_vars[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Docker version compatibility
|
||||
docker_version=$(docker --version | awk '{print $3}' | cut -d',' -f1)
|
||||
if [ "$(printf '%s\n' "20.10.0" "$docker_version" | sort -V | head -n1)" != "20.10.0" ]; then
|
||||
echo "ERROR: Docker version 20.10.0 or higher required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Environment validation successful"
|
||||
@@ -115,7 +115,7 @@ router.get("/subscribe_events", middleware.wsRateLimiter, (req, res) => {
|
||||
res.writeHead(200, {
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache",
|
||||
Connection: "keep-alive",
|
||||
"Connection": "keep-alive",
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
});
|
||||
|
||||
|
||||
@@ -1,23 +1,5 @@
|
||||
import { config } from "dotenv";
|
||||
import { resolve } from "path";
|
||||
import { z } from "zod";
|
||||
|
||||
/**
|
||||
* Load environment variables based on NODE_ENV
|
||||
* Development: .env.development
|
||||
* Test: .env.test
|
||||
* Production: .env
|
||||
*/
|
||||
const envFile =
|
||||
process.env.NODE_ENV === "production"
|
||||
? ".env"
|
||||
: process.env.NODE_ENV === "test"
|
||||
? ".env.test"
|
||||
: ".env.development";
|
||||
|
||||
console.log(`Loading environment from ${envFile}`);
|
||||
config({ path: resolve(process.cwd(), envFile) });
|
||||
|
||||
/**
|
||||
* Application configuration object
|
||||
* Contains all configuration settings for the application
|
||||
@@ -30,7 +12,7 @@ export const AppConfigSchema = z.object({
|
||||
.default("development"),
|
||||
|
||||
/** Home Assistant Configuration */
|
||||
HASS_HOST: z.string().default("http://192.168.178.63:8123"),
|
||||
HASS_HOST: z.string().default("http://homeassistant.local:8123"),
|
||||
HASS_TOKEN: z.string().optional(),
|
||||
|
||||
/** Speech Features Configuration */
|
||||
@@ -49,7 +31,7 @@ export const AppConfigSchema = z.object({
|
||||
}),
|
||||
|
||||
/** Security Configuration */
|
||||
JWT_SECRET: z.string().default("your-secret-key"),
|
||||
JWT_SECRET: z.string().default("your-secret-key-must-be-32-char-min"),
|
||||
RATE_LIMIT: z.object({
|
||||
/** Time window for rate limiting in milliseconds */
|
||||
windowMs: z.number().default(15 * 60 * 1000), // 15 minutes
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
export const BOILERPLATE_CONFIG = {
|
||||
configuration: {
|
||||
LOG_LEVEL: {
|
||||
type: "string" as const,
|
||||
default: "debug",
|
||||
description: "Logging level",
|
||||
enum: ["error", "warn", "info", "debug", "trace"],
|
||||
},
|
||||
CACHE_DIRECTORY: {
|
||||
type: "string" as const,
|
||||
default: ".cache",
|
||||
description: "Directory for cache files",
|
||||
},
|
||||
CONFIG_DIRECTORY: {
|
||||
type: "string" as const,
|
||||
default: ".config",
|
||||
description: "Directory for configuration files",
|
||||
},
|
||||
DATA_DIRECTORY: {
|
||||
type: "string" as const,
|
||||
default: ".data",
|
||||
description: "Directory for data files",
|
||||
},
|
||||
},
|
||||
internal: {
|
||||
boilerplate: {
|
||||
configuration: {
|
||||
LOG_LEVEL: "debug",
|
||||
CACHE_DIRECTORY: ".cache",
|
||||
CONFIG_DIRECTORY: ".config",
|
||||
DATA_DIRECTORY: ".data",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -11,6 +11,7 @@ const envFile =
|
||||
|
||||
config({ path: resolve(process.cwd(), envFile) });
|
||||
|
||||
// Base configuration for Home Assistant
|
||||
export const HASS_CONFIG = {
|
||||
// Base configuration
|
||||
BASE_URL: process.env.HASS_HOST || "http://localhost:8123",
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
import { config } from "dotenv";
|
||||
import { resolve } from "path";
|
||||
import { loadEnvironmentVariables } from "./loadEnv";
|
||||
|
||||
// Load environment variables based on NODE_ENV
|
||||
const envFile =
|
||||
process.env.NODE_ENV === "production"
|
||||
? ".env"
|
||||
: process.env.NODE_ENV === "test"
|
||||
? ".env.test"
|
||||
: ".env.development";
|
||||
|
||||
console.log(`Loading environment from ${envFile}`);
|
||||
config({ path: resolve(process.cwd(), envFile) });
|
||||
// Load environment variables from the appropriate files
|
||||
loadEnvironmentVariables();
|
||||
|
||||
// Home Assistant Configuration
|
||||
export const HASS_CONFIG = {
|
||||
|
||||
59
src/config/loadEnv.ts
Normal file
59
src/config/loadEnv.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import { config as dotenvConfig } from "dotenv";
|
||||
import { file } from "bun";
|
||||
import path from "path";
|
||||
|
||||
/**
|
||||
* Maps NODE_ENV values to their corresponding environment file names
|
||||
*/
|
||||
const ENV_FILE_MAPPING: Record<string, string> = {
|
||||
production: ".env.prod",
|
||||
development: ".env.dev",
|
||||
test: ".env.test",
|
||||
};
|
||||
|
||||
/**
|
||||
* Loads environment variables from the appropriate files based on NODE_ENV.
|
||||
* First loads environment-specific file, then overrides with generic .env if it exists.
|
||||
*/
|
||||
export async function loadEnvironmentVariables() {
|
||||
// Determine the current environment (default to 'development')
|
||||
const nodeEnv = (process.env.NODE_ENV || "development").toLowerCase();
|
||||
|
||||
// Get the environment-specific file name
|
||||
const envSpecificFile = ENV_FILE_MAPPING[nodeEnv];
|
||||
if (!envSpecificFile) {
|
||||
console.warn(`Unknown NODE_ENV value: ${nodeEnv}. Using .env.dev as fallback.`);
|
||||
}
|
||||
|
||||
const envFile = envSpecificFile || ".env.dev";
|
||||
const envPath = path.resolve(process.cwd(), envFile);
|
||||
|
||||
// Load the environment-specific file if it exists
|
||||
try {
|
||||
const envFileExists = await file(envPath).exists();
|
||||
if (envFileExists) {
|
||||
dotenvConfig({ path: envPath });
|
||||
console.log(`Loaded environment variables from ${envFile}`);
|
||||
} else {
|
||||
console.warn(`Environment-specific file ${envFile} not found.`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`Error checking environment file ${envFile}:`, error);
|
||||
}
|
||||
|
||||
// Finally, check if there is a generic .env file present
|
||||
// If so, load it with the override option, so its values take precedence
|
||||
const genericEnvPath = path.resolve(process.cwd(), ".env");
|
||||
try {
|
||||
const genericEnvExists = await file(genericEnvPath).exists();
|
||||
if (genericEnvExists) {
|
||||
dotenvConfig({ path: genericEnvPath, override: true });
|
||||
console.log("Loaded and overrode with generic .env file");
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn(`Error checking generic .env file:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
// Export the environment file mapping for reference
|
||||
export const ENV_FILES = ENV_FILE_MAPPING;
|
||||
74
src/hass/types.ts
Normal file
74
src/hass/types.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import type { WebSocket } from 'ws';
|
||||
|
||||
export interface HassInstanceImpl {
|
||||
baseUrl: string;
|
||||
token: string;
|
||||
connect(): Promise<void>;
|
||||
disconnect(): Promise<void>;
|
||||
getStates(): Promise<any[]>;
|
||||
callService(domain: string, service: string, data?: any): Promise<void>;
|
||||
fetchStates(): Promise<any[]>;
|
||||
fetchState(entityId: string): Promise<any>;
|
||||
subscribeEvents(callback: (event: any) => void, eventType?: string): Promise<number>;
|
||||
unsubscribeEvents(subscriptionId: number): Promise<void>;
|
||||
}
|
||||
|
||||
export interface HassWebSocketClient {
|
||||
url: string;
|
||||
token: string;
|
||||
socket: WebSocket | null;
|
||||
connect(): Promise<void>;
|
||||
disconnect(): Promise<void>;
|
||||
send(message: any): Promise<void>;
|
||||
subscribe(callback: (data: any) => void): () => void;
|
||||
}
|
||||
|
||||
export interface HassState {
|
||||
entity_id: string;
|
||||
state: string;
|
||||
attributes: Record<string, any>;
|
||||
last_changed: string;
|
||||
last_updated: string;
|
||||
context: {
|
||||
id: string;
|
||||
parent_id: string | null;
|
||||
user_id: string | null;
|
||||
};
|
||||
}
|
||||
|
||||
export interface HassServiceCall {
|
||||
domain: string;
|
||||
service: string;
|
||||
target?: {
|
||||
entity_id?: string | string[];
|
||||
device_id?: string | string[];
|
||||
area_id?: string | string[];
|
||||
};
|
||||
service_data?: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface HassEvent {
|
||||
event_type: string;
|
||||
data: any;
|
||||
origin: string;
|
||||
time_fired: string;
|
||||
context: {
|
||||
id: string;
|
||||
parent_id: string | null;
|
||||
user_id: string | null;
|
||||
};
|
||||
}
|
||||
|
||||
export type MockFunction<T extends (...args: any[]) => any> = {
|
||||
(...args: Parameters<T>): ReturnType<T>;
|
||||
mock: {
|
||||
calls: Parameters<T>[];
|
||||
results: { type: 'return' | 'throw'; value: any }[];
|
||||
instances: any[];
|
||||
mockImplementation(fn: T): MockFunction<T>;
|
||||
mockReturnValue(value: ReturnType<T>): MockFunction<T>;
|
||||
mockResolvedValue(value: Awaited<ReturnType<T>>): MockFunction<T>;
|
||||
mockRejectedValue(value: any): MockFunction<T>;
|
||||
mockReset(): void;
|
||||
};
|
||||
};
|
||||
30
src/index.ts
30
src/index.ts
@@ -1,6 +1,4 @@
|
||||
import "./polyfills.js";
|
||||
import { config } from "dotenv";
|
||||
import { resolve } from "path";
|
||||
import { file } from "bun";
|
||||
import { Elysia } from "elysia";
|
||||
import { cors } from "@elysiajs/cors";
|
||||
import { swagger } from "@elysiajs/swagger";
|
||||
@@ -27,17 +25,11 @@ import {
|
||||
} from "./commands.js";
|
||||
import { speechService } from "./speech/index.js";
|
||||
import { APP_CONFIG } from "./config/app.config.js";
|
||||
import { loadEnvironmentVariables } from "./config/loadEnv.js";
|
||||
import { MCP_SCHEMA } from "./mcp/schema.js";
|
||||
|
||||
// Load environment variables based on NODE_ENV
|
||||
const envFile =
|
||||
process.env.NODE_ENV === "production"
|
||||
? ".env"
|
||||
: process.env.NODE_ENV === "test"
|
||||
? ".env.test"
|
||||
: ".env.development";
|
||||
|
||||
console.log(`Loading environment from ${envFile}`);
|
||||
config({ path: resolve(process.cwd(), envFile) });
|
||||
await loadEnvironmentVariables();
|
||||
|
||||
// Configuration
|
||||
const HASS_TOKEN = process.env.HASS_TOKEN;
|
||||
@@ -126,6 +118,20 @@ const app = new Elysia()
|
||||
.use(sanitizeInput)
|
||||
.use(errorHandler);
|
||||
|
||||
// Mount API routes
|
||||
app.get("/api/mcp", () => MCP_SCHEMA);
|
||||
app.post("/api/mcp/execute", async ({ body }: { body: { tool: string; parameters: Record<string, unknown> } }) => {
|
||||
const { tool: toolName, parameters } = body;
|
||||
const tool = tools.find((t) => t.name === toolName);
|
||||
if (!tool) {
|
||||
return {
|
||||
success: false,
|
||||
message: `Tool '${toolName}' not found`,
|
||||
};
|
||||
}
|
||||
return await tool.execute(parameters);
|
||||
});
|
||||
|
||||
// Health check endpoint
|
||||
app.get("/health", () => ({
|
||||
status: "ok",
|
||||
|
||||
@@ -1,292 +1,93 @@
|
||||
import { JSONSchemaType } from "ajv";
|
||||
import { Entity, StateChangedEvent } from "../types/hass.js";
|
||||
import { z } from 'zod';
|
||||
|
||||
// Define base types for automation components
|
||||
type TriggerType = {
|
||||
platform: string;
|
||||
event?: string | null;
|
||||
entity_id?: string | null;
|
||||
to?: string | null;
|
||||
from?: string | null;
|
||||
offset?: string | null;
|
||||
[key: string]: any;
|
||||
// Entity Schema
|
||||
const entitySchema = z.object({
|
||||
entity_id: z.string().regex(/^[a-z0-9_]+\.[a-z0-9_]+$/),
|
||||
state: z.string(),
|
||||
attributes: z.record(z.any()),
|
||||
last_changed: z.string(),
|
||||
last_updated: z.string(),
|
||||
context: z.object({
|
||||
id: z.string(),
|
||||
parent_id: z.string().nullable(),
|
||||
user_id: z.string().nullable()
|
||||
})
|
||||
});
|
||||
|
||||
// Service Schema
|
||||
const serviceSchema = z.object({
|
||||
domain: z.string().min(1),
|
||||
service: z.string().min(1),
|
||||
target: z.object({
|
||||
entity_id: z.union([z.string(), z.array(z.string())]),
|
||||
device_id: z.union([z.string(), z.array(z.string())]).optional(),
|
||||
area_id: z.union([z.string(), z.array(z.string())]).optional()
|
||||
}).optional(),
|
||||
service_data: z.record(z.any()).optional()
|
||||
});
|
||||
|
||||
// State Changed Event Schema
|
||||
const stateChangedEventSchema = z.object({
|
||||
event_type: z.literal('state_changed'),
|
||||
data: z.object({
|
||||
entity_id: z.string(),
|
||||
old_state: z.union([entitySchema, z.null()]),
|
||||
new_state: entitySchema
|
||||
}),
|
||||
origin: z.string(),
|
||||
time_fired: z.string(),
|
||||
context: z.object({
|
||||
id: z.string(),
|
||||
parent_id: z.string().nullable(),
|
||||
user_id: z.string().nullable()
|
||||
})
|
||||
});
|
||||
|
||||
// Config Schema
|
||||
const configSchema = z.object({
|
||||
location_name: z.string(),
|
||||
time_zone: z.string(),
|
||||
components: z.array(z.string()),
|
||||
version: z.string()
|
||||
});
|
||||
|
||||
// Device Control Schema
|
||||
const deviceControlSchema = z.object({
|
||||
domain: z.string().min(1),
|
||||
command: z.string().min(1),
|
||||
entity_id: z.union([z.string(), z.array(z.string())]),
|
||||
parameters: z.record(z.any()).optional()
|
||||
}).refine(data => {
|
||||
if (typeof data.entity_id === 'string') {
|
||||
return data.entity_id.startsWith(data.domain + '.');
|
||||
}
|
||||
return data.entity_id.every(id => id.startsWith(data.domain + '.'));
|
||||
}, {
|
||||
message: 'entity_id must match the domain'
|
||||
});
|
||||
|
||||
// Validation functions
|
||||
export const validateEntity = (data: unknown) => {
|
||||
const result = entitySchema.safeParse(data);
|
||||
return { success: result.success, error: result.success ? undefined : result.error };
|
||||
};
|
||||
|
||||
type ConditionType = {
|
||||
condition: string;
|
||||
conditions?: Array<Record<string, any>> | null;
|
||||
[key: string]: any;
|
||||
export const validateService = (data: unknown) => {
|
||||
const result = serviceSchema.safeParse(data);
|
||||
return { success: result.success, error: result.success ? undefined : result.error };
|
||||
};
|
||||
|
||||
type ActionType = {
|
||||
service: string;
|
||||
target?: {
|
||||
entity_id?: string | string[] | null;
|
||||
[key: string]: any;
|
||||
} | null;
|
||||
data?: Record<string, any> | null;
|
||||
[key: string]: any;
|
||||
export const validateStateChangedEvent = (data: unknown) => {
|
||||
const result = stateChangedEventSchema.safeParse(data);
|
||||
return { success: result.success, error: result.success ? undefined : result.error };
|
||||
};
|
||||
|
||||
type AutomationType = {
|
||||
alias: string;
|
||||
description?: string | null;
|
||||
mode?: ("single" | "parallel" | "queued" | "restart") | null;
|
||||
trigger: TriggerType[];
|
||||
condition?: ConditionType[] | null;
|
||||
action: ActionType[];
|
||||
export const validateConfig = (data: unknown) => {
|
||||
const result = configSchema.safeParse(data);
|
||||
return { success: result.success, error: result.success ? undefined : result.error };
|
||||
};
|
||||
|
||||
type DeviceControlType = {
|
||||
domain:
|
||||
| "light"
|
||||
| "switch"
|
||||
| "climate"
|
||||
| "cover"
|
||||
| "fan"
|
||||
| "scene"
|
||||
| "script"
|
||||
| "media_player";
|
||||
command: string;
|
||||
entity_id: string | string[];
|
||||
parameters?: Record<string, any> | null;
|
||||
export const validateDeviceControl = (data: unknown) => {
|
||||
const result = deviceControlSchema.safeParse(data);
|
||||
return { success: result.success, error: result.success ? undefined : result.error };
|
||||
};
|
||||
|
||||
// Define missing types
|
||||
export interface Service {
|
||||
name: string;
|
||||
description: string;
|
||||
target?: {
|
||||
entity?: string[];
|
||||
device?: string[];
|
||||
area?: string[];
|
||||
} | null;
|
||||
fields: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface Config {
|
||||
components: string[];
|
||||
config_dir: string;
|
||||
elevation: number;
|
||||
latitude: number;
|
||||
longitude: number;
|
||||
location_name: string;
|
||||
time_zone: string;
|
||||
unit_system: {
|
||||
length: string;
|
||||
mass: string;
|
||||
temperature: string;
|
||||
volume: string;
|
||||
};
|
||||
version: string;
|
||||
}
|
||||
|
||||
// Define base schemas
|
||||
const contextSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
id: { type: "string" },
|
||||
parent_id: { type: "string", nullable: true },
|
||||
user_id: { type: "string", nullable: true },
|
||||
},
|
||||
required: ["id", "parent_id", "user_id"],
|
||||
additionalProperties: false,
|
||||
} as const;
|
||||
|
||||
// Entity schema
|
||||
export const entitySchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
entity_id: { type: "string" },
|
||||
state: { type: "string" },
|
||||
attributes: {
|
||||
type: "object",
|
||||
additionalProperties: true,
|
||||
},
|
||||
last_changed: { type: "string" },
|
||||
last_updated: { type: "string" },
|
||||
context: contextSchema,
|
||||
},
|
||||
required: [
|
||||
"entity_id",
|
||||
"state",
|
||||
"attributes",
|
||||
"last_changed",
|
||||
"last_updated",
|
||||
"context",
|
||||
],
|
||||
additionalProperties: false,
|
||||
} as const;
|
||||
|
||||
// Service schema
|
||||
export const serviceSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
name: { type: "string" },
|
||||
description: { type: "string" },
|
||||
target: {
|
||||
type: "object",
|
||||
nullable: true,
|
||||
properties: {
|
||||
entity: { type: "array", items: { type: "string" }, nullable: true },
|
||||
device: { type: "array", items: { type: "string" }, nullable: true },
|
||||
area: { type: "array", items: { type: "string" }, nullable: true },
|
||||
},
|
||||
required: [],
|
||||
additionalProperties: false,
|
||||
},
|
||||
fields: {
|
||||
type: "object",
|
||||
additionalProperties: true,
|
||||
},
|
||||
},
|
||||
required: ["name", "description", "fields"],
|
||||
additionalProperties: false,
|
||||
} as const;
|
||||
|
||||
// Define the trigger schema without type assertion
|
||||
export const triggerSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
platform: { type: "string" },
|
||||
event: { type: "string", nullable: true },
|
||||
entity_id: { type: "string", nullable: true },
|
||||
to: { type: "string", nullable: true },
|
||||
from: { type: "string", nullable: true },
|
||||
offset: { type: "string", nullable: true },
|
||||
},
|
||||
required: ["platform"],
|
||||
additionalProperties: true,
|
||||
};
|
||||
|
||||
// Define the automation schema
|
||||
export const automationSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
alias: { type: "string" },
|
||||
description: { type: "string", nullable: true },
|
||||
mode: {
|
||||
type: "string",
|
||||
enum: ["single", "parallel", "queued", "restart"],
|
||||
nullable: true,
|
||||
},
|
||||
trigger: {
|
||||
type: "array",
|
||||
items: triggerSchema,
|
||||
},
|
||||
condition: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
additionalProperties: true,
|
||||
},
|
||||
nullable: true,
|
||||
},
|
||||
action: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "object",
|
||||
additionalProperties: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ["alias", "trigger", "action"],
|
||||
additionalProperties: false,
|
||||
};
|
||||
|
||||
export const deviceControlSchema: JSONSchemaType<DeviceControlType> = {
|
||||
type: "object",
|
||||
properties: {
|
||||
domain: {
|
||||
type: "string",
|
||||
enum: [
|
||||
"light",
|
||||
"switch",
|
||||
"climate",
|
||||
"cover",
|
||||
"fan",
|
||||
"scene",
|
||||
"script",
|
||||
"media_player",
|
||||
],
|
||||
},
|
||||
command: { type: "string" },
|
||||
entity_id: {
|
||||
anyOf: [
|
||||
{ type: "string" },
|
||||
{
|
||||
type: "array",
|
||||
items: { type: "string" },
|
||||
},
|
||||
],
|
||||
},
|
||||
parameters: {
|
||||
type: "object",
|
||||
nullable: true,
|
||||
additionalProperties: true,
|
||||
},
|
||||
},
|
||||
required: ["domain", "command", "entity_id"],
|
||||
additionalProperties: false,
|
||||
};
|
||||
|
||||
// State changed event schema
|
||||
export const stateChangedEventSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
event_type: { type: "string", const: "state_changed" },
|
||||
data: {
|
||||
type: "object",
|
||||
properties: {
|
||||
entity_id: { type: "string" },
|
||||
new_state: { ...entitySchema, nullable: true },
|
||||
old_state: { ...entitySchema, nullable: true },
|
||||
},
|
||||
required: ["entity_id", "new_state", "old_state"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
origin: { type: "string" },
|
||||
time_fired: { type: "string" },
|
||||
context: contextSchema,
|
||||
},
|
||||
required: ["event_type", "data", "origin", "time_fired", "context"],
|
||||
additionalProperties: false,
|
||||
} as const;
|
||||
|
||||
// Config schema
|
||||
export const configSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
components: { type: "array", items: { type: "string" } },
|
||||
config_dir: { type: "string" },
|
||||
elevation: { type: "number" },
|
||||
latitude: { type: "number" },
|
||||
longitude: { type: "number" },
|
||||
location_name: { type: "string" },
|
||||
time_zone: { type: "string" },
|
||||
unit_system: {
|
||||
type: "object",
|
||||
properties: {
|
||||
length: { type: "string" },
|
||||
mass: { type: "string" },
|
||||
temperature: { type: "string" },
|
||||
volume: { type: "string" },
|
||||
},
|
||||
required: ["length", "mass", "temperature", "volume"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
version: { type: "string" },
|
||||
},
|
||||
required: [
|
||||
"components",
|
||||
"config_dir",
|
||||
"elevation",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"location_name",
|
||||
"time_zone",
|
||||
"unit_system",
|
||||
"version",
|
||||
],
|
||||
additionalProperties: false,
|
||||
} as const;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
test audio content
|
||||
@@ -21,20 +21,72 @@ export const listDevicesTool: Tool = {
|
||||
}
|
||||
|
||||
const states = (await response.json()) as HassState[];
|
||||
const devices: Record<string, HassState[]> = {
|
||||
light: states.filter(state => state.entity_id.startsWith('light.')),
|
||||
climate: states.filter(state => state.entity_id.startsWith('climate.'))
|
||||
const devices: Record<string, HassState[]> = {};
|
||||
|
||||
// Group devices by domain
|
||||
states.forEach(state => {
|
||||
const [domain] = state.entity_id.split('.');
|
||||
if (!devices[domain]) {
|
||||
devices[domain] = [];
|
||||
}
|
||||
devices[domain].push(state);
|
||||
});
|
||||
|
||||
// Calculate device statistics
|
||||
const deviceStats = Object.entries(devices).map(([domain, entities]) => {
|
||||
const activeStates = ['on', 'home', 'unlocked', 'open'];
|
||||
const active = entities.filter(e => activeStates.includes(e.state)).length;
|
||||
const uniqueStates = [...new Set(entities.map(e => e.state))];
|
||||
|
||||
return {
|
||||
domain,
|
||||
count: entities.length,
|
||||
active,
|
||||
inactive: entities.length - active,
|
||||
states: uniqueStates,
|
||||
sample: entities.slice(0, 2).map(e => ({
|
||||
id: e.entity_id,
|
||||
state: e.state,
|
||||
name: e.attributes?.friendly_name || e.entity_id
|
||||
}))
|
||||
};
|
||||
});
|
||||
|
||||
const totalDevices = states.length;
|
||||
const deviceTypes = Object.keys(devices);
|
||||
|
||||
const deviceSummary = {
|
||||
total_devices: totalDevices,
|
||||
device_types: deviceTypes,
|
||||
by_domain: Object.fromEntries(
|
||||
deviceStats.map(stat => [
|
||||
stat.domain,
|
||||
{
|
||||
count: stat.count,
|
||||
active: stat.active,
|
||||
states: stat.states,
|
||||
sample: stat.sample
|
||||
}
|
||||
])
|
||||
)
|
||||
};
|
||||
|
||||
return {
|
||||
success: true,
|
||||
devices,
|
||||
device_summary: deviceSummary
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in list devices tool:', error);
|
||||
return {
|
||||
success: false,
|
||||
message:
|
||||
error instanceof Error ? error.message : "Unknown error occurred",
|
||||
message: error instanceof Error ? error.message : "Unknown error occurred",
|
||||
devices: {},
|
||||
device_summary: {
|
||||
total_devices: 0,
|
||||
device_types: [],
|
||||
by_domain: {}
|
||||
}
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
22
src/types/node-record-lpcm16.d.ts
vendored
Normal file
22
src/types/node-record-lpcm16.d.ts
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
declare module 'node-record-lpcm16' {
|
||||
import { Readable } from 'stream';
|
||||
|
||||
interface RecordOptions {
|
||||
sampleRate?: number;
|
||||
channels?: number;
|
||||
audioType?: string;
|
||||
threshold?: number;
|
||||
thresholdStart?: number;
|
||||
thresholdEnd?: number;
|
||||
silence?: number;
|
||||
verbose?: boolean;
|
||||
recordProgram?: string;
|
||||
}
|
||||
|
||||
interface Recording {
|
||||
stream(): Readable;
|
||||
stop(): void;
|
||||
}
|
||||
|
||||
export function record(options?: RecordOptions): Recording;
|
||||
}
|
||||
@@ -1,183 +1,259 @@
|
||||
import WebSocket from "ws";
|
||||
import { EventEmitter } from "events";
|
||||
|
||||
interface HassMessage {
|
||||
type: string;
|
||||
id?: number;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
interface HassAuthMessage extends HassMessage {
|
||||
type: "auth";
|
||||
access_token: string;
|
||||
}
|
||||
|
||||
interface HassEventMessage extends HassMessage {
|
||||
type: "event";
|
||||
event: {
|
||||
event_type: string;
|
||||
data: any;
|
||||
};
|
||||
}
|
||||
|
||||
interface HassSubscribeMessage extends HassMessage {
|
||||
type: "subscribe_events";
|
||||
event_type?: string;
|
||||
}
|
||||
|
||||
interface HassUnsubscribeMessage extends HassMessage {
|
||||
type: "unsubscribe_events";
|
||||
subscription: number;
|
||||
}
|
||||
|
||||
interface HassResultMessage extends HassMessage {
|
||||
type: "result";
|
||||
success: boolean;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export class HassWebSocketClient extends EventEmitter {
|
||||
private ws: WebSocket | null = null;
|
||||
private messageId = 1;
|
||||
private authenticated = false;
|
||||
private messageId = 1;
|
||||
private subscriptions = new Map<number, (data: any) => void>();
|
||||
private url: string;
|
||||
private token: string;
|
||||
private reconnectAttempts = 0;
|
||||
private maxReconnectAttempts = 5;
|
||||
private reconnectDelay = 1000;
|
||||
private subscriptions = new Map<string, (data: any) => void>();
|
||||
private maxReconnectAttempts = 3;
|
||||
|
||||
constructor(
|
||||
private url: string,
|
||||
private token: string,
|
||||
private options: {
|
||||
autoReconnect?: boolean;
|
||||
maxReconnectAttempts?: number;
|
||||
reconnectDelay?: number;
|
||||
} = {},
|
||||
) {
|
||||
constructor(url: string, token: string) {
|
||||
super();
|
||||
this.maxReconnectAttempts = options.maxReconnectAttempts || 5;
|
||||
this.reconnectDelay = options.reconnectDelay || 1000;
|
||||
this.url = url;
|
||||
this.token = token;
|
||||
}
|
||||
|
||||
public async connect(): Promise<void> {
|
||||
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
this.ws = new WebSocket(this.url);
|
||||
|
||||
this.ws.on("open", () => {
|
||||
this.ws.onopen = () => {
|
||||
this.emit('connect');
|
||||
this.authenticate();
|
||||
});
|
||||
|
||||
this.ws.on("message", (data: string) => {
|
||||
const message = JSON.parse(data);
|
||||
this.handleMessage(message);
|
||||
});
|
||||
|
||||
this.ws.on("close", () => {
|
||||
this.handleDisconnect();
|
||||
});
|
||||
|
||||
this.ws.on("error", (error) => {
|
||||
this.emit("error", error);
|
||||
reject(error);
|
||||
});
|
||||
|
||||
this.once("auth_ok", () => {
|
||||
this.authenticated = true;
|
||||
this.reconnectAttempts = 0;
|
||||
resolve();
|
||||
});
|
||||
};
|
||||
|
||||
this.once("auth_invalid", () => {
|
||||
reject(new Error("Authentication failed"));
|
||||
});
|
||||
this.ws.onclose = () => {
|
||||
this.authenticated = false;
|
||||
this.emit('disconnect');
|
||||
this.handleReconnect();
|
||||
};
|
||||
|
||||
this.ws.onerror = (event: WebSocket.ErrorEvent) => {
|
||||
const error = event.error || new Error(event.message || 'WebSocket error');
|
||||
this.emit('error', error);
|
||||
if (!this.authenticated) {
|
||||
reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
this.ws.onmessage = (event: WebSocket.MessageEvent) => {
|
||||
if (typeof event.data === 'string') {
|
||||
this.handleMessage(event.data);
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private authenticate(): void {
|
||||
this.send({
|
||||
type: "auth",
|
||||
access_token: this.token,
|
||||
});
|
||||
public isConnected(): boolean {
|
||||
return this.ws !== null && this.ws.readyState === WebSocket.OPEN;
|
||||
}
|
||||
|
||||
private handleMessage(message: any): void {
|
||||
switch (message.type) {
|
||||
case "auth_required":
|
||||
this.authenticate();
|
||||
break;
|
||||
case "auth_ok":
|
||||
this.emit("auth_ok");
|
||||
break;
|
||||
case "auth_invalid":
|
||||
this.emit("auth_invalid");
|
||||
break;
|
||||
case "event":
|
||||
this.handleEvent(message);
|
||||
break;
|
||||
case "result":
|
||||
this.emit(`result_${message.id}`, message);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private handleEvent(message: any): void {
|
||||
const subscription = this.subscriptions.get(message.event.event_type);
|
||||
if (subscription) {
|
||||
subscription(message.event.data);
|
||||
}
|
||||
this.emit("event", message.event);
|
||||
}
|
||||
|
||||
private handleDisconnect(): void {
|
||||
this.authenticated = false;
|
||||
this.emit("disconnected");
|
||||
|
||||
if (
|
||||
this.options.autoReconnect &&
|
||||
this.reconnectAttempts < this.maxReconnectAttempts
|
||||
) {
|
||||
setTimeout(
|
||||
() => {
|
||||
this.reconnectAttempts++;
|
||||
this.connect().catch((error) => {
|
||||
this.emit("error", error);
|
||||
});
|
||||
},
|
||||
this.reconnectDelay * Math.pow(2, this.reconnectAttempts),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public async subscribeEvents(
|
||||
eventType: string,
|
||||
callback: (data: any) => void,
|
||||
): Promise<number> {
|
||||
if (!this.authenticated) {
|
||||
throw new Error("Not authenticated");
|
||||
}
|
||||
|
||||
const id = this.messageId++;
|
||||
this.subscriptions.set(eventType, callback);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
this.send({
|
||||
id,
|
||||
type: "subscribe_events",
|
||||
event_type: eventType,
|
||||
});
|
||||
|
||||
this.once(`result_${id}`, (message) => {
|
||||
if (message.success) {
|
||||
resolve(id);
|
||||
} else {
|
||||
reject(new Error(message.error?.message || "Subscription failed"));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
public async unsubscribeEvents(subscription: number): Promise<void> {
|
||||
if (!this.authenticated) {
|
||||
throw new Error("Not authenticated");
|
||||
}
|
||||
|
||||
const id = this.messageId++;
|
||||
return new Promise((resolve, reject) => {
|
||||
this.send({
|
||||
id,
|
||||
type: "unsubscribe_events",
|
||||
subscription,
|
||||
});
|
||||
|
||||
this.once(`result_${id}`, (message) => {
|
||||
if (message.success) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(message.error?.message || "Unsubscribe failed"));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
private send(message: any): void {
|
||||
if (this.ws?.readyState === WebSocket.OPEN) {
|
||||
this.ws.send(JSON.stringify(message));
|
||||
}
|
||||
public isAuthenticated(): boolean {
|
||||
return this.authenticated;
|
||||
}
|
||||
|
||||
public disconnect(): void {
|
||||
if (this.ws) {
|
||||
this.ws.close();
|
||||
this.ws = null;
|
||||
this.authenticated = false;
|
||||
}
|
||||
}
|
||||
|
||||
private authenticate(): void {
|
||||
const authMessage: HassAuthMessage = {
|
||||
type: "auth",
|
||||
access_token: this.token
|
||||
};
|
||||
this.send(authMessage);
|
||||
}
|
||||
|
||||
private handleMessage(data: string): void {
|
||||
try {
|
||||
const message = JSON.parse(data) as HassMessage;
|
||||
|
||||
switch (message.type) {
|
||||
case "auth_ok":
|
||||
this.authenticated = true;
|
||||
this.emit('authenticated', message);
|
||||
break;
|
||||
|
||||
case "auth_invalid":
|
||||
this.authenticated = false;
|
||||
this.emit('auth_failed', message);
|
||||
this.disconnect();
|
||||
break;
|
||||
|
||||
case "event":
|
||||
this.handleEvent(message as HassEventMessage);
|
||||
break;
|
||||
|
||||
case "result": {
|
||||
const resultMessage = message as HassResultMessage;
|
||||
if (resultMessage.success) {
|
||||
this.emit('result', resultMessage);
|
||||
} else {
|
||||
this.emit('error', new Error(resultMessage.error || 'Unknown error'));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
this.emit('error', new Error(`Unknown message type: ${message.type}`));
|
||||
}
|
||||
} catch (error) {
|
||||
this.emit('error', error);
|
||||
}
|
||||
}
|
||||
|
||||
private handleEvent(message: HassEventMessage): void {
|
||||
this.emit('event', message.event);
|
||||
const callback = this.subscriptions.get(message.id || 0);
|
||||
if (callback) {
|
||||
callback(message.event.data);
|
||||
}
|
||||
}
|
||||
|
||||
public async subscribeEvents(eventType: string | undefined, callback: (data: any) => void): Promise<number> {
|
||||
if (!this.authenticated) {
|
||||
throw new Error('Not authenticated');
|
||||
}
|
||||
|
||||
const id = this.messageId++;
|
||||
const message: HassSubscribeMessage = {
|
||||
id,
|
||||
type: "subscribe_events",
|
||||
event_type: eventType
|
||||
};
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const handleResult = (result: HassResultMessage) => {
|
||||
if (result.id === id) {
|
||||
this.removeListener('result', handleResult);
|
||||
this.removeListener('error', handleError);
|
||||
|
||||
if (result.success) {
|
||||
this.subscriptions.set(id, callback);
|
||||
resolve(id);
|
||||
} else {
|
||||
reject(new Error(result.error || 'Failed to subscribe'));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleError = (error: Error) => {
|
||||
this.removeListener('result', handleResult);
|
||||
this.removeListener('error', handleError);
|
||||
reject(error);
|
||||
};
|
||||
|
||||
this.on('result', handleResult);
|
||||
this.on('error', handleError);
|
||||
|
||||
this.send(message);
|
||||
});
|
||||
}
|
||||
|
||||
public async unsubscribeEvents(subscription: number): Promise<boolean> {
|
||||
if (!this.authenticated) {
|
||||
throw new Error('Not authenticated');
|
||||
}
|
||||
|
||||
const message: HassUnsubscribeMessage = {
|
||||
id: this.messageId++,
|
||||
type: "unsubscribe_events",
|
||||
subscription
|
||||
};
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const handleResult = (result: HassResultMessage) => {
|
||||
if (result.id === message.id) {
|
||||
this.removeListener('result', handleResult);
|
||||
this.removeListener('error', handleError);
|
||||
|
||||
if (result.success) {
|
||||
this.subscriptions.delete(subscription);
|
||||
resolve(true);
|
||||
} else {
|
||||
reject(new Error(result.error || 'Failed to unsubscribe'));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleError = (error: Error) => {
|
||||
this.removeListener('result', handleResult);
|
||||
this.removeListener('error', handleError);
|
||||
reject(error);
|
||||
};
|
||||
|
||||
this.on('result', handleResult);
|
||||
this.on('error', handleError);
|
||||
|
||||
this.send(message);
|
||||
});
|
||||
}
|
||||
|
||||
private send(message: HassMessage): void {
|
||||
if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {
|
||||
throw new Error('WebSocket is not connected');
|
||||
}
|
||||
this.ws.send(JSON.stringify(message));
|
||||
}
|
||||
|
||||
private handleReconnect(): void {
|
||||
if (this.reconnectAttempts < this.maxReconnectAttempts) {
|
||||
this.reconnectAttempts++;
|
||||
setTimeout(() => {
|
||||
this.connect().catch(() => { });
|
||||
}, 1000 * Math.pow(2, this.reconnectAttempts));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "esnext",
|
||||
"module": "esnext",
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"lib": [
|
||||
"esnext",
|
||||
"dom"
|
||||
],
|
||||
"strict": false,
|
||||
"strict": true,
|
||||
"strictNullChecks": false,
|
||||
"strictFunctionTypes": false,
|
||||
"strictPropertyInitialization": false,
|
||||
@@ -15,7 +15,7 @@
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "bundler",
|
||||
"moduleResolution": "node",
|
||||
"allowImportingTsExtensions": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
@@ -27,15 +27,16 @@
|
||||
"@types/ws",
|
||||
"@types/jsonwebtoken",
|
||||
"@types/sanitize-html",
|
||||
"@types/jest"
|
||||
"@types/jest",
|
||||
"@types/express"
|
||||
],
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": [
|
||||
"./src/*"
|
||||
"src/*"
|
||||
],
|
||||
"@test/*": [
|
||||
"__tests__/*"
|
||||
"test/*"
|
||||
]
|
||||
},
|
||||
"experimentalDecorators": true,
|
||||
@@ -45,10 +46,12 @@
|
||||
"declarationMap": true,
|
||||
"allowUnreachableCode": true,
|
||||
"allowUnusedLabels": true,
|
||||
"suppressImplicitAnyIndexErrors": true
|
||||
"outDir": "dist",
|
||||
"rootDir": "."
|
||||
},
|
||||
"include": [
|
||||
"src/**/*",
|
||||
"test/**/*",
|
||||
"__tests__/**/*",
|
||||
"*.d.ts"
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user