Compare commits

...

5 Commits

Author SHA1 Message Date
jango-blockchained
14a309d7d6 chore: Add Smithery AI badge to project README
- Update README.md with Smithery AI project badge
- Enhance project metadata and external recognition
2025-02-10 03:42:40 +01:00
jango-blockchained
8dbb2286dc feat: Enhance MCP tool execution and device listing with advanced filtering
- Refactor MCP execution endpoint to improve error handling and result reporting
- Update health check endpoint with MCP version and supported tools
- Extend list_devices tool with optional domain, area, and floor filtering
- Improve device listing response with more detailed device metadata
- Standardize tool import and initialization in main index file
2025-02-10 03:36:42 +01:00
jango-blockchained
b6bd53b01a feat: Enhance speech and AI configuration with advanced environment settings
- Update `.env.example` with comprehensive speech and AI configuration options
- Modify Docker Compose speech configuration for more flexible audio and ASR settings
- Enhance Dockerfile to support Python virtual environment and speech dependencies
- Refactor environment loading to use Bun's file system utilities
- Improve device listing tool with more detailed device statistics
- Add support for multiple AI models and dynamic configuration
2025-02-10 03:28:58 +01:00
jango-blockchained
986b1949cd Remove documentation from main branch (moved to gh-pages) 2025-02-08 17:26:20 +01:00
jango-blockchained
1e81e4db53 chore: Update configuration defaults and Docker port handling
- Modify Dockerfile to use dynamic port configuration
- Update Home Assistant host default to use local hostname
- Enhance JWT secret default length requirement
- Remove boilerplate and test setup configuration files
2025-02-07 22:30:49 +01:00
69 changed files with 847 additions and 9413 deletions

View File

@@ -3,6 +3,7 @@ NODE_ENV=development
PORT=3000
DEBUG=false
LOG_LEVEL=info
MCP_SERVER=http://localhost:3000
# Home Assistant Configuration
HASS_HOST=http://homeassistant.local:8123
@@ -40,18 +41,18 @@ MAX_REQUEST_SIZE=1048576
MAX_REQUEST_FIELDS=1000
# AI Configuration
PROCESSOR_TYPE=claude
PROCESSOR_TYPE=openai
OPENAI_API_KEY=your_openai_api_key
OPENAI_MODEL=gpt-3.5-turbo
MAX_RETRIES=3
ANALYSIS_TIMEOUT=30000
# Speech Features Configuration
ENABLE_SPEECH_FEATURES=false
ENABLE_WAKE_WORD=false
ENABLE_SPEECH_TO_TEXT=false
ENABLE_SPEECH_FEATURES=true
ENABLE_WAKE_WORD=true
ENABLE_SPEECH_TO_TEXT=true
WHISPER_MODEL_PATH=/models
WHISPER_MODEL_TYPE=tiny
WHISPER_MODEL_TYPE=base
# Audio Configuration
NOISE_THRESHOLD=0.05
@@ -62,6 +63,13 @@ CHANNELS=1
CHUNK_SIZE=1024
PULSE_SERVER=unix:/run/user/1000/pulse/native
# Whisper Configuration
ASR_MODEL=base
ASR_ENGINE=faster_whisper
WHISPER_BEAM_SIZE=5
COMPUTE_TYPE=float32
LANGUAGE=en
# SSE Configuration
SSE_MAX_CLIENTS=50
SSE_RECONNECT_TIMEOUT=5000
@@ -78,5 +86,11 @@ TEST_PORT=3001
# Version
VERSION=0.1.0
# Advanced (Docker)
COMPOSE_PROJECT_NAME=mcp
# Docker Configuration
COMPOSE_PROJECT_NAME=mcp
# Resource Limits
FAST_WHISPER_CPU_LIMIT=4.0
FAST_WHISPER_MEMORY_LIMIT=2G
MCP_CPU_LIMIT=1.0
MCP_MEMORY_LIMIT=512M

View File

@@ -11,10 +11,33 @@ RUN npm install -g bun@1.0.25
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
pulseaudio \
alsa-utils \
python3-full \
python3-pip \
python3-dev \
python3-venv \
portaudio19-dev \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean \
&& rm -rf /var/cache/apt/*
# Create and activate virtual environment
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
# Upgrade pip in virtual environment
RUN /opt/venv/bin/python -m pip install --upgrade pip
# Install Python packages in virtual environment
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
numpy \
sounddevice \
openwakeword \
faster-whisper \
requests
# Set build-time environment variables
ENV NODE_ENV=production \
NODE_OPTIONS="--max-old-space-size=2048" \
@@ -38,23 +61,69 @@ FROM node:20-slim as runner
# Install bun in production image
RUN npm install -g bun@1.0.25
# Install runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
pulseaudio \
alsa-utils \
libasound2 \
libasound2-plugins \
python3-full \
python3-pip \
python3-dev \
python3-venv \
portaudio19-dev \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean \
&& rm -rf /var/cache/apt/*
# Configure ALSA
COPY docker/speech/asound.conf /etc/asound.conf
# Create and activate virtual environment
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
# Upgrade pip in virtual environment
RUN /opt/venv/bin/python -m pip install --upgrade pip
# Install Python packages in virtual environment
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
numpy \
sounddevice \
openwakeword \
faster-whisper \
requests
# Set Python path to use virtual environment
ENV PYTHONPATH="/opt/venv/lib/python3.11/site-packages:$PYTHONPATH"
# Set production environment variables
ENV NODE_ENV=production \
NODE_OPTIONS="--max-old-space-size=1024"
# Create a non-root user
# Create a non-root user and add to audio group
RUN addgroup --system --gid 1001 nodejs && \
adduser --system --uid 1001 bunjs
adduser --system --uid 1001 --gid 1001 bunjs && \
adduser bunjs audio
WORKDIR /app
# Copy Python virtual environment from builder
COPY --from=builder --chown=bunjs:nodejs /opt/venv /opt/venv
# Copy source files
COPY --chown=bunjs:nodejs . .
# Copy only the necessary files from builder
COPY --from=builder --chown=bunjs:nodejs /app/dist ./dist
COPY --from=builder --chown=bunjs:nodejs /app/node_modules ./node_modules
COPY --chown=bunjs:nodejs package.json ./
# Create logs directory with proper permissions
RUN mkdir -p /app/logs && chown -R bunjs:nodejs /app/logs
# Ensure audio setup script is executable
RUN chmod +x /app/docker/speech/setup-audio.sh
# Create logs and audio directories with proper permissions
RUN mkdir -p /app/logs /app/audio && chown -R bunjs:nodejs /app/logs /app/audio
# Switch to non-root user
USER bunjs
@@ -64,7 +133,7 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:4000/health || exit 1
# Expose port
EXPOSE 4000
EXPOSE ${PORT:-4000}
# Start the application with optimized flags
CMD ["bun", "--smol", "run", "start"]
# Start the application with audio setup
CMD ["/bin/bash", "-c", "/app/docker/speech/setup-audio.sh & bun --smol run start"]

View File

@@ -1,6 +1,6 @@
# MCP Server for Home Assistant 🏠🤖
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) [![Bun](https://img.shields.io/badge/bun-%3E%3D1.0.26-black)](https://bun.sh) [![TypeScript](https://img.shields.io/badge/typescript-%5E5.0.0-blue.svg)](https://www.typescriptlang.org)
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) [![Bun](https://img.shields.io/badge/bun-%3E%3D1.0.26-black)](https://bun.sh) [![TypeScript](https://img.shields.io/badge/typescript-%5E5.0.0-blue.svg)](https://www.typescriptlang.org) [![smithery badge](https://smithery.ai/badge/@jango-blockchained/advanced-homeassistant-mcp)](https://smithery.ai/server/@jango-blockchained/advanced-homeassistant-mcp)
## Overview 🌐

View File

@@ -4,17 +4,27 @@ services:
homeassistant-mcp:
image: homeassistant-mcp:latest
environment:
# Speech Feature Flags
- ENABLE_SPEECH_FEATURES=${ENABLE_SPEECH_FEATURES:-true}
- ENABLE_WAKE_WORD=${ENABLE_WAKE_WORD:-true}
- ENABLE_SPEECH_TO_TEXT=${ENABLE_SPEECH_TO_TEXT:-true}
# Audio Configuration
- NOISE_THRESHOLD=${NOISE_THRESHOLD:-0.05}
- MIN_SPEECH_DURATION=${MIN_SPEECH_DURATION:-1.0}
- SILENCE_DURATION=${SILENCE_DURATION:-0.5}
- SAMPLE_RATE=${SAMPLE_RATE:-16000}
- CHANNELS=${CHANNELS:-1}
- CHUNK_SIZE=${CHUNK_SIZE:-1024}
- PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
fast-whisper:
image: onerahmet/openai-whisper-asr-webservice:latest
volumes:
- whisper-models:/models
- audio-data:/audio
environment:
- ASR_MODEL=base
- ASR_MODEL=${WHISPER_MODEL_TYPE:-base}
- ASR_ENGINE=faster_whisper
- WHISPER_BEAM_SIZE=5
- COMPUTE_TYPE=float32
@@ -27,7 +37,7 @@ services:
cpus: '4.0'
memory: 2G
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:9000/asr/health" ]
test: [ "CMD", "curl", "-f", "http://localhost:9000/health" ]
interval: 30s
timeout: 10s
retries: 3
@@ -40,10 +50,23 @@ services:
volumes:
- /run/user/1000/pulse/native:/run/user/1000/pulse/native
environment:
- PULSE_SERVER=unix:/run/user/1000/pulse/native
- PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
- PULSE_COOKIE=/run/user/1000/pulse/cookie
- PYTHONUNBUFFERED=1
- OPENWAKEWORD_MODEL=hey_jarvis
- OPENWAKEWORD_THRESHOLD=0.5
- MICROPHONE_COMMAND=arecord -D hw:0,0 -f S16_LE -c 1 -r 16000 -t raw
group_add:
- audio
- "${AUDIO_GID:-29}"
network_mode: host
privileged: true
entrypoint: >
/bin/bash -c " apt-get update && apt-get install -y pulseaudio alsa-utils && rm -rf /var/lib/apt/lists/* && /run.sh"
healthcheck:
test: [ "CMD-SHELL", "pactl info > /dev/null 2>&1 || exit 1" ]
interval: 30s
timeout: 10s
retries: 3
volumes:
whisper-models:

35
docker/speech/asound.conf Normal file
View File

@@ -0,0 +1,35 @@
pcm.!default {
type pulse
fallback "sysdefault"
hint {
show on
description "Default ALSA Output (currently PulseAudio Sound Server)"
}
}
ctl.!default {
type pulse
fallback "sysdefault"
}
# Use PulseAudio by default
pcm.pulse {
type pulse
}
ctl.pulse {
type pulse
}
# Explicit device for recording
pcm.microphone {
type hw
card 0
device 0
}
# Default capture device
pcm.!default {
type pulse
hint.description "Default Audio Device"
}

View File

@@ -30,6 +30,9 @@ MAX_MODEL_LOAD_RETRIES = 3
MODEL_LOAD_RETRY_DELAY = 5 # seconds
MODEL_DOWNLOAD_TIMEOUT = 600 # 10 minutes timeout for model download
# ALSA device configuration
AUDIO_DEVICE = 'hw:0,0' # Use ALSA hardware device directly
# Audio processing parameters
NOISE_THRESHOLD = 0.08 # Increased threshold for better noise filtering
MIN_SPEECH_DURATION = 2.0 # Longer minimum duration to avoid fragments
@@ -44,7 +47,7 @@ WAKE_WORD_ENABLED = os.environ.get('ENABLE_WAKE_WORD', 'false').lower() == 'true
SPEECH_ENABLED = os.environ.get('ENABLE_SPEECH_FEATURES', 'true').lower() == 'true'
# Wake word models to use (only if wake word is enabled)
WAKE_WORDS = ["alexa"] # Using 'alexa' as temporary replacement for 'gaja'
WAKE_WORDS = ["hey_jarvis"] # Using hey_jarvis as it's more similar to "hey gaja"
WAKE_WORD_ALIAS = "gaja" # What we print when wake word is detected
# Home Assistant Configuration
@@ -235,7 +238,22 @@ class AudioProcessor:
self.buffer = np.zeros(SAMPLE_RATE * BUFFER_DURATION)
self.buffer_lock = threading.Lock()
self.last_transcription_time = 0
self.stream = None
try:
logger.info(f"Opening audio device: {AUDIO_DEVICE}")
self.stream = sd.InputStream(
device=AUDIO_DEVICE,
samplerate=SAMPLE_RATE,
channels=CHANNELS,
dtype=np.int16,
blocksize=CHUNK_SIZE,
callback=self._audio_callback
)
logger.info("Audio stream initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize audio stream: {e}")
raise
self.speech_detected = False
self.silence_frames = 0
self.speech_frames = 0
@@ -272,7 +290,7 @@ class AudioProcessor:
return True
return False
def audio_callback(self, indata, frames, time, status):
def _audio_callback(self, indata, frames, time, status):
"""Callback for audio input"""
if status:
logger.warning(f"Audio callback status: {status}")
@@ -382,7 +400,7 @@ class AudioProcessor:
channels=CHANNELS,
samplerate=SAMPLE_RATE,
blocksize=CHUNK_SIZE,
callback=self.audio_callback
callback=self._audio_callback
):
logger.info("Audio input stream started successfully")
logger.info("Listening for audio input...")

View File

@@ -1,23 +0,0 @@
source "https://rubygems.org"
gem "github-pages", group: :jekyll_plugins
gem "jekyll-theme-minimal"
gem "jekyll-relative-links"
gem "jekyll-seo-tag"
gem "jekyll-remote-theme"
gem "jekyll-github-metadata"
gem "faraday-retry"
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", ">= 1"
gem "tzinfo-data"
end
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
# do not have a Java counterpart.
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
# Add webrick for Ruby 3.0+
gem "webrick", "~> 1.7"

View File

@@ -1,78 +0,0 @@
title: Model Context Protocol (MCP)
description: A bridge between Home Assistant and Language Learning Models
theme: jekyll-theme-minimal
markdown: kramdown
# Repository settings
repository: jango-blockchained/advanced-homeassistant-mcp
github: [metadata]
# Add base URL and URL settings
baseurl: "/advanced-homeassistant-mcp" # the subpath of your site
url: "https://jango-blockchained.github.io" # the base hostname & protocol
# Theme settings
logo: /assets/img/logo.png # path to logo (create this if you want a logo)
show_downloads: true # show download buttons for your repo
plugins:
- jekyll-relative-links
- jekyll-seo-tag
- jekyll-remote-theme
- jekyll-github-metadata
# Enable relative links
relative_links:
enabled: true
collections: true
# Navigation structure
header_pages:
- index.md
- getting-started.md
- api.md
- usage.md
- tools/tools.md
- development/development.md
- troubleshooting.md
- contributing.md
- roadmap.md
# Collections
collections:
tools:
output: true
permalink: /:collection/:name
development:
output: true
permalink: /:collection/:name
# Default layouts
defaults:
- scope:
path: ""
type: "pages"
values:
layout: "default"
- scope:
path: "tools"
type: "tools"
values:
layout: "default"
- scope:
path: "development"
type: "development"
values:
layout: "default"
# Exclude files from processing
exclude:
- Gemfile
- Gemfile.lock
- node_modules
- vendor
# Sass settings
sass:
style: compressed
sass_dir: _sass

View File

@@ -1,52 +0,0 @@
<!DOCTYPE html>
<html lang="{{ site.lang | default: " en-US" }}">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
{% seo %}
<link rel="stylesheet" href="{{ " /assets/css/style.css?v=" | append: site.github.build_revision | relative_url }}">
</head>
<body>
<div class="wrapper">
<header>
<h1><a href="{{ " /" | absolute_url }}">{{ site.title | default: site.github.repository_name }}</a></h1>
{% if site.logo %}
<img src="{{site.logo | relative_url}}" alt="Logo" />
{% endif %}
<p>{{ site.description | default: site.github.project_tagline }}</p>
<p class="view"><a href="{{ site.github.repository_url }}">View the Project on GitHub <small>{{
site.github.repository_nwo }}</small></a></p>
<nav class="main-nav">
<h3>Documentation</h3>
<ul>
<li><a href="{{ '/getting-started' | relative_url }}">Getting Started</a></li>
<li><a href="{{ '/api' | relative_url }}">API Reference</a></li>
<li><a href="{{ '/sse-api' | relative_url }}">SSE API</a></li>
<li><a href="{{ '/architecture' | relative_url }}">Architecture</a></li>
<li><a href="{{ '/contributing' | relative_url }}">Contributing</a></li>
<li><a href="{{ '/troubleshooting' | relative_url }}">Troubleshooting</a></li>
</ul>
</nav>
</header>
<section>
{{ content }}
</section>
<footer>
{% if site.github.is_project_page %}
<p>This project is maintained by <a href="{{ site.github.owner_url }}">{{ site.github.owner_name }}</a></p>
{% endif %}
<p><small>Hosted on GitHub Pages &mdash; Theme by <a
href="https://github.com/orderedlist">orderedlist</a></small></p>
</footer>
</div>
<script src="{{ " /assets/js/scale.fix.js" | relative_url }}"></script>
</body>
</html>

View File

@@ -1,170 +0,0 @@
# Home Assistant MCP Server API Documentation
## Overview
This document provides a reference for the MCP Server API, which offers basic device control and state management for Home Assistant.
## Authentication
All API requests require a valid JWT token in the Authorization header:
```http
Authorization: Bearer YOUR_TOKEN
```
## Core Endpoints
### Device State Management
#### Get Device State
```http
GET /api/state/{entity_id}
```
**Response:**
```json
{
"entity_id": "light.living_room",
"state": "on",
"attributes": {
"brightness": 128
}
}
```
#### Update Device State
```http
POST /api/state
Content-Type: application/json
{
"entity_id": "light.living_room",
"state": "on",
"attributes": {
"brightness": 128
}
}
```
### Device Control
#### Execute Device Command
```http
POST /api/control
Content-Type: application/json
{
"entity_id": "light.living_room",
"command": "turn_on",
"parameters": {
"brightness": 50
}
}
```
## Real-Time Updates
### WebSocket Connection
Connect to real-time updates:
```javascript
const ws = new WebSocket('ws://localhost:3000/events');
ws.onmessage = (event) => {
const deviceUpdate = JSON.parse(event.data);
console.log('Device state changed:', deviceUpdate);
};
```
## Error Handling
### Common Error Responses
```json
{
"error": {
"code": "INVALID_REQUEST",
"message": "Invalid request parameters",
"details": "Entity ID not found or invalid command"
}
}
```
## Rate Limiting
Basic rate limiting is implemented:
- Maximum of 100 requests per minute
- Excess requests will receive a 429 Too Many Requests response
## Supported Operations
### Supported Commands
- `turn_on`
- `turn_off`
- `toggle`
- `set_brightness`
- `set_color`
### Supported Entities
- Lights
- Switches
- Climate controls
- Media players
## Limitations
- Limited to basic device control
- No advanced automation
- Minimal error handling
- Basic authentication
## Best Practices
1. Always include a valid JWT token
2. Handle potential errors in your client code
3. Use WebSocket for real-time updates when possible
4. Validate entity IDs before sending commands
## Example Client Usage
```typescript
async function controlDevice(entityId: string, command: string, params?: Record<string, unknown>) {
try {
const response = await fetch('/api/control', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify({
entity_id: entityId,
command,
parameters: params
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.message);
}
return await response.json();
} catch (error) {
console.error('Device control failed:', error);
throw error;
}
}
// Usage example
controlDevice('light.living_room', 'turn_on', { brightness: 50 })
.then(result => console.log('Device controlled successfully'))
.catch(error => console.error('Control failed', error));
```
## Future Development
Planned improvements:
- Enhanced error handling
- More comprehensive device support
- Improved authentication mechanisms
*API is subject to change. Always refer to the latest documentation.*

View File

@@ -1,326 +0,0 @@
---
layout: default
title: Core Functions
parent: API Reference
nav_order: 3
---
# Core Functions API 🔧
The Core Functions API provides the fundamental operations for interacting with Home Assistant devices and services through MCP Server.
## Device Control
### Get Device State
Retrieve the current state of devices.
```http
GET /api/state
GET /api/state/{entity_id}
```
Parameters:
- `entity_id` (optional): Specific device ID to query
```bash
# Get all states
curl http://localhost:3000/api/state
# Get specific device state
curl http://localhost:3000/api/state/light.living_room
```
Response:
```json
{
"entity_id": "light.living_room",
"state": "on",
"attributes": {
"brightness": 255,
"color_temp": 370,
"friendly_name": "Living Room Light"
},
"last_changed": "2024-01-20T15:30:00Z"
}
```
### Control Device
Execute device commands.
```http
POST /api/device/control
```
Request body:
```json
{
"entity_id": "light.living_room",
"action": "turn_on",
"parameters": {
"brightness": 200,
"color_temp": 400
}
}
```
Available actions:
- `turn_on`
- `turn_off`
- `toggle`
- `set_value`
Example with curl:
```bash
curl -X POST http://localhost:3000/api/device/control \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
-d '{
"entity_id": "light.living_room",
"action": "turn_on",
"parameters": {
"brightness": 200
}
}'
```
## Natural Language Commands
### Execute Command
Process natural language commands.
```http
POST /api/command
```
Request body:
```json
{
"command": "Turn on the living room lights and set them to 50% brightness"
}
```
Example usage:
```bash
curl -X POST http://localhost:3000/api/command \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
-d '{
"command": "Turn on the living room lights and set them to 50% brightness"
}'
```
Response:
```json
{
"success": true,
"actions": [
{
"entity_id": "light.living_room",
"action": "turn_on",
"parameters": {
"brightness": 127
},
"status": "completed"
}
],
"message": "Command executed successfully"
}
```
## Scene Management
### Create Scene
Define a new scene with multiple actions.
```http
POST /api/scene
```
Request body:
```json
{
"name": "Movie Night",
"description": "Perfect lighting for movie watching",
"actions": [
{
"entity_id": "light.living_room",
"action": "turn_on",
"parameters": {
"brightness": 50,
"color_temp": 500
}
},
{
"entity_id": "cover.living_room",
"action": "close"
}
]
}
```
### Activate Scene
Trigger a predefined scene.
```http
POST /api/scene/{scene_name}/activate
```
Example:
```bash
curl -X POST http://localhost:3000/api/scene/movie_night/activate \
-H "Authorization: Bearer YOUR_JWT_TOKEN"
```
## Groups
### Create Device Group
Create a group of devices for collective control.
```http
POST /api/group
```
Request body:
```json
{
"name": "Living Room",
"entities": [
"light.living_room_main",
"light.living_room_accent",
"switch.living_room_fan"
]
}
```
### Control Group
Control multiple devices in a group.
```http
POST /api/group/{group_name}/control
```
Request body:
```json
{
"action": "turn_off"
}
```
## System Operations
### Health Check
Check server status and connectivity.
```http
GET /health
```
Response:
```json
{
"status": "healthy",
"version": "1.0.0",
"uptime": 3600,
"homeAssistant": {
"connected": true,
"version": "2024.1.0"
}
}
```
### Configuration
Get current server configuration.
```http
GET /api/config
```
Response:
```json
{
"server": {
"port": 3000,
"host": "0.0.0.0",
"version": "1.0.0"
},
"homeAssistant": {
"url": "http://homeassistant:8123",
"connected": true
},
"features": {
"nlp": true,
"scenes": true,
"groups": true
}
}
```
## Error Handling
All endpoints follow standard HTTP status codes and return detailed error messages:
```json
{
"error": true,
"code": "INVALID_ENTITY",
"message": "Device 'light.nonexistent' not found",
"details": {
"entity_id": "light.nonexistent",
"available_entities": [
"light.living_room",
"light.kitchen"
]
}
}
```
Common error codes:
- `INVALID_ENTITY`: Device not found
- `INVALID_ACTION`: Unsupported action
- `INVALID_PARAMETERS`: Invalid command parameters
- `AUTHENTICATION_ERROR`: Invalid or missing token
- `CONNECTION_ERROR`: Home Assistant connection issue
## TypeScript Interfaces
```typescript
interface DeviceState {
entity_id: string;
state: string;
attributes: Record<string, any>;
last_changed: string;
}
interface DeviceCommand {
entity_id: string;
action: 'turn_on' | 'turn_off' | 'toggle' | 'set_value';
parameters?: Record<string, any>;
}
interface Scene {
name: string;
description?: string;
actions: DeviceCommand[];
}
interface Group {
name: string;
entities: string[];
}
```
## Related Resources
- [API Overview](index.md)
- [SSE API](sse.md)
- [Architecture](../architecture.md)
- [Examples](https://github.com/jango-blockchained/advanced-homeassistant-mcp/tree/main/examples)

View File

@@ -1,242 +0,0 @@
---
layout: default
title: API Overview
parent: API Reference
nav_order: 1
has_children: false
---
# API Documentation 📚
Welcome to the MCP Server API documentation. This guide covers all available endpoints, authentication methods, and integration patterns.
## API Overview
The MCP Server provides several API categories:
1. **Core API** - Basic device control and state management
2. **SSE API** - Real-time event subscriptions
3. **Scene API** - Scene management and automation
4. **Voice API** - Natural language command processing
## Authentication
All API endpoints require authentication using JWT tokens:
```bash
# Include the token in your requests
curl -H "Authorization: Bearer YOUR_JWT_TOKEN" http://localhost:3000/api/state
```
To obtain a token:
```bash
curl -X POST http://localhost:3000/auth/token \
-H "Content-Type: application/json" \
-d '{"username": "your_username", "password": "your_password"}'
```
## Core Endpoints
### Device State
```http
GET /api/state
```
Retrieve the current state of all devices:
```bash
curl http://localhost:3000/api/state
```
Response:
```json
{
"devices": [
{
"id": "light.living_room",
"state": "on",
"attributes": {
"brightness": 255,
"color_temp": 370
}
}
]
}
```
### Command Execution
```http
POST /api/command
```
Execute a natural language command:
```bash
curl -X POST http://localhost:3000/api/command \
-H "Content-Type: application/json" \
-d '{"command": "Turn on the kitchen lights"}'
```
Response:
```json
{
"success": true,
"action": "turn_on",
"device": "light.kitchen",
"message": "Kitchen lights turned on"
}
```
## Real-Time Events
### Event Subscription
```http
GET /subscribe_events
```
Subscribe to device state changes:
```javascript
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_TOKEN');
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('State changed:', data);
};
```
### Filtered Subscriptions
Subscribe to specific device types:
```http
GET /subscribe_events?domain=light
GET /subscribe_events?entity_id=light.living_room
```
## Scene Management
### Create Scene
```http
POST /api/scene
```
Create a new scene:
```bash
curl -X POST http://localhost:3000/api/scene \
-H "Content-Type: application/json" \
-d '{
"name": "Movie Night",
"actions": [
{"device": "light.living_room", "action": "dim", "value": 20},
{"device": "media_player.tv", "action": "on"}
]
}'
```
### Activate Scene
```http
POST /api/scene/activate
```
Activate an existing scene:
```bash
curl -X POST http://localhost:3000/api/scene/activate \
-H "Content-Type: application/json" \
-d '{"name": "Movie Night"}'
```
## Error Handling
The API uses standard HTTP status codes:
- `200` - Success
- `400` - Bad Request
- `401` - Unauthorized
- `404` - Not Found
- `500` - Server Error
Error responses include detailed messages:
```json
{
"error": true,
"message": "Device not found",
"code": "DEVICE_NOT_FOUND",
"details": {
"device_id": "light.nonexistent"
}
}
```
## Rate Limiting
API requests are rate-limited to prevent abuse:
```http
X-RateLimit-Limit: 100
X-RateLimit-Remaining: 99
X-RateLimit-Reset: 1640995200
```
When exceeded, returns `429 Too Many Requests`:
```json
{
"error": true,
"message": "Rate limit exceeded",
"reset": 1640995200
}
```
## WebSocket API
For bi-directional communication:
```javascript
const ws = new WebSocket('ws://localhost:3000/ws');
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('Received:', data);
};
ws.send(JSON.stringify({
type: 'command',
payload: {
command: 'Turn on lights'
}
}));
```
## API Versioning
The current API version is v1. Include the version in the URL:
```http
/api/v1/state
/api/v1/command
```
## Further Reading
- [SSE API Details](sse.md) - In-depth SSE documentation
- [Core Functions](core.md) - Detailed endpoint documentation
- [Architecture Overview](../architecture.md) - System design details
- [Troubleshooting](../troubleshooting.md) - Common issues and solutions
# API Reference
The Advanced Home Assistant MCP provides several APIs for integration and automation:
- [Core API](core.md) - Primary interface for system control
- [SSE API](sse.md) - Server-Sent Events for real-time updates
- [Core Functions](core.md) - Essential system functions

View File

@@ -1,266 +0,0 @@
---
layout: default
title: SSE API
parent: API Reference
nav_order: 2
---
# Server-Sent Events (SSE) API 📡
The SSE API provides real-time updates about device states and events from your Home Assistant setup. This guide covers how to use and implement SSE connections in your applications.
## Overview
Server-Sent Events (SSE) is a standard that enables servers to push real-time updates to clients over HTTP connections. MCP Server uses SSE to provide:
- Real-time device state updates
- Event notifications
- System status changes
- Command execution results
## Basic Usage
### Establishing a Connection
Create an EventSource connection to receive updates:
```javascript
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_JWT_TOKEN');
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('Received update:', data);
};
```
### Connection States
Handle different connection states:
```javascript
eventSource.onopen = () => {
console.log('Connection established');
};
eventSource.onerror = (error) => {
console.error('Connection error:', error);
// Implement reconnection logic if needed
};
```
## Event Types
### Device State Events
Subscribe to all device state changes:
```javascript
const stateEvents = new EventSource('http://localhost:3000/subscribe_events?type=state');
stateEvents.onmessage = (event) => {
const state = JSON.parse(event.data);
console.log('Device state changed:', state);
};
```
Example state event:
```json
{
"type": "state_changed",
"entity_id": "light.living_room",
"state": "on",
"attributes": {
"brightness": 255,
"color_temp": 370
},
"timestamp": "2024-01-20T15:30:00Z"
}
```
### Filtered Subscriptions
#### By Domain
Subscribe to specific device types:
```javascript
// Subscribe to only light events
const lightEvents = new EventSource('http://localhost:3000/subscribe_events?domain=light');
// Subscribe to multiple domains
const multiEvents = new EventSource('http://localhost:3000/subscribe_events?domain=light,switch,sensor');
```
#### By Entity ID
Subscribe to specific devices:
```javascript
// Single entity
const livingRoomLight = new EventSource(
'http://localhost:3000/subscribe_events?entity_id=light.living_room'
);
// Multiple entities
const kitchenDevices = new EventSource(
'http://localhost:3000/subscribe_events?entity_id=light.kitchen,switch.coffee_maker'
);
```
## Advanced Usage
### Connection Management
Implement robust connection handling:
```javascript
class SSEManager {
constructor(url, options = {}) {
this.url = url;
this.options = {
maxRetries: 3,
retryDelay: 1000,
...options
};
this.retryCount = 0;
this.connect();
}
connect() {
this.eventSource = new EventSource(this.url);
this.eventSource.onopen = () => {
this.retryCount = 0;
console.log('Connected to SSE stream');
};
this.eventSource.onerror = (error) => {
this.handleError(error);
};
this.eventSource.onmessage = (event) => {
this.handleMessage(event);
};
}
handleError(error) {
console.error('SSE Error:', error);
this.eventSource.close();
if (this.retryCount < this.options.maxRetries) {
this.retryCount++;
setTimeout(() => {
console.log(`Retrying connection (${this.retryCount}/${this.options.maxRetries})`);
this.connect();
}, this.options.retryDelay * this.retryCount);
}
}
handleMessage(event) {
try {
const data = JSON.parse(event.data);
// Handle the event data
console.log('Received:', data);
} catch (error) {
console.error('Error parsing SSE data:', error);
}
}
disconnect() {
if (this.eventSource) {
this.eventSource.close();
}
}
}
// Usage
const sseManager = new SSEManager('http://localhost:3000/subscribe_events?token=YOUR_TOKEN');
```
### Event Filtering
Filter events on the client side:
```javascript
class EventFilter {
constructor(conditions) {
this.conditions = conditions;
}
matches(event) {
return Object.entries(this.conditions).every(([key, value]) => {
if (Array.isArray(value)) {
return value.includes(event[key]);
}
return event[key] === value;
});
}
}
// Usage
const filter = new EventFilter({
domain: ['light', 'switch'],
state: 'on'
});
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
if (filter.matches(data)) {
console.log('Matched event:', data);
}
};
```
## Best Practices
1. **Authentication**
- Always include authentication tokens
- Implement token refresh mechanisms
- Handle authentication errors gracefully
2. **Error Handling**
- Implement progressive retry logic
- Log connection issues
- Notify users of connection status
3. **Resource Management**
- Close EventSource connections when not needed
- Limit the number of concurrent connections
- Use filtered subscriptions when possible
4. **Performance**
- Process events efficiently
- Batch UI updates
- Consider debouncing frequent updates
## Common Issues
### Connection Drops
If the connection drops, the EventSource will automatically attempt to reconnect. You can customize this behavior:
```javascript
eventSource.addEventListener('error', (error) => {
if (eventSource.readyState === EventSource.CLOSED) {
// Connection closed, implement custom retry logic
}
});
```
### Memory Leaks
Always clean up EventSource connections:
```javascript
// In a React component
useEffect(() => {
const eventSource = new EventSource('http://localhost:3000/subscribe_events');
return () => {
eventSource.close(); // Cleanup on unmount
};
}, []);
```
## Related Resources
- [API Overview](index.md)
- [Core Functions](core.md)
- [WebSocket API](index.md#websocket-api)
- [Troubleshooting](../troubleshooting.md)

View File

@@ -1,88 +0,0 @@
---
layout: default
title: Architecture
nav_order: 4
---
# Architecture Overview 🏗️
This document describes the architecture of the MCP Server, explaining how different components work together to provide a bridge between Home Assistant and custom automation tools.
## System Architecture
```mermaid
graph TD
subgraph "Client Layer"
WC[Web Clients]
MC[Mobile Clients]
end
subgraph "MCP Server"
API[API Gateway]
SSE[SSE Manager]
WS[WebSocket Server]
CM[Command Manager]
end
subgraph "Home Assistant"
HA[Home Assistant Core]
Dev[Devices & Services]
end
WC --> |HTTP/WS| API
MC --> |HTTP/WS| API
API --> |Events| SSE
API --> |Real-time| WS
API --> HA
HA --> API
```
## Core Components
### API Gateway
- Handles incoming HTTP and WebSocket requests
- Provides endpoints for device management
- Implements basic authentication and request validation
### SSE Manager
- Manages Server-Sent Events for real-time updates
- Broadcasts device state changes to connected clients
### WebSocket Server
- Provides real-time, bidirectional communication
- Supports basic device control and state monitoring
### Command Manager
- Processes device control requests
- Translates API commands to Home Assistant compatible formats
## Communication Flow
1. Client sends a request to the MCP Server API
2. API Gateway authenticates the request
3. Command Manager processes the request
4. Request is forwarded to Home Assistant
5. Response is sent back to the client via API or WebSocket
## Key Design Principles
- **Simplicity:** Lightweight, focused design
- **Flexibility:** Easily extendable architecture
- **Performance:** Efficient request handling
- **Security:** Basic authentication and validation
## Limitations
- Basic device control capabilities
- Limited advanced automation features
- Minimal third-party integrations
## Future Improvements
- Enhanced error handling
- More robust authentication
- Expanded device type support
*Architecture is subject to change as the project evolves.*

View File

@@ -1,54 +0,0 @@
@import "{{ site.theme }}";
// Custom styles
.main-nav {
margin-top: 20px;
ul {
list-style: none;
padding: 0;
margin: 0;
}
li {
margin-bottom: 8px;
}
a {
color: #267CB9;
text-decoration: none;
&:hover {
text-decoration: underline;
}
}
}
h1,
h2,
h3 {
color: #333;
}
code {
background-color: #f8f8f8;
border: 1px solid #ddd;
border-radius: 3px;
padding: 2px 5px;
}
pre {
background-color: #f8f8f8;
border: 1px solid #ddd;
border-radius: 3px;
padding: 10px;
overflow-x: auto;
}
.wrapper {
max-width: 960px;
}
section {
max-width: 700px;
}

View File

@@ -1,28 +0,0 @@
:root {
--md-primary-fg-color: #1a73e8;
--md-primary-fg-color--light: #5195ee;
--md-primary-fg-color--dark: #0d47a1;
}
.md-header {
box-shadow: 0 0 0.2rem rgba(0,0,0,.1), 0 0.2rem 0.4rem rgba(0,0,0,.2);
}
.md-main__inner {
margin-top: 1.5rem;
}
.md-typeset h1 {
font-weight: 700;
color: var(--md-primary-fg-color);
}
.md-typeset .admonition {
font-size: .8rem;
}
code {
background-color: rgba(175,184,193,0.2);
padding: .2em .4em;
border-radius: 6px;
}

View File

@@ -1,16 +0,0 @@
{
"mcpServers": {
"homeassistant-mcp": {
"command": "bun",
"args": [
"run",
"start",
"--port",
"8080"
],
"env": {
"NODE_ENV": "production"
}
}
}
}

View File

@@ -1,18 +0,0 @@
{
"mcpServers": {
"homeassistant-mcp": {
"command": "bun",
"args": [
"run",
"start",
"--enable-cline",
"--config",
"${configDir}/.env"
],
"env": {
"NODE_ENV": "production",
"CLINE_MODE": "true"
}
}
}
}

View File

@@ -1,30 +0,0 @@
# Configuration
This section covers the configuration options available in the Home Assistant MCP Server.
## Overview
The MCP Server can be configured through various configuration files and environment variables. This section will guide you through the available options and their usage.
## Configuration Files
The main configuration files are:
1. `.env` - Environment variables
2. `config.yaml` - Main configuration file
3. `devices.yaml` - Device-specific configurations
## Environment Variables
Key environment variables that can be set:
- `MCP_HOST` - Host address (default: 0.0.0.0)
- `MCP_PORT` - Port number (default: 8123)
- `MCP_LOG_LEVEL` - Logging level (default: INFO)
- `MCP_CONFIG_DIR` - Configuration directory path
## Next Steps
- See [System Configuration](../configuration.md) for detailed configuration options
- Check [Environment Setup](../getting-started/configuration.md) for initial setup
- Review [Security](../security.md) for security-related configurations

View File

@@ -1,429 +0,0 @@
# System Configuration
This document provides detailed information about configuring the Home Assistant MCP Server.
## Environment File Structure
The MCP Server uses a flexible environment configuration system with support for different environments and local overrides:
### Environment Files
1. `.env.example` - Template file containing all available configuration options with example values
- Use this as a reference to create your environment-specific configuration files
- Not loaded by the application
2. Environment-specific files (loaded based on NODE_ENV):
- `.env.dev` - Development environment (default)
- `.env.test` - Test environment
- `.env.prod` - Production environment
3. `.env` - Optional local override file
- If present, values in this file override those from the environment-specific file
- Useful for local development without modifying the environment-specific files
### File Loading Order
1. First, the environment-specific file is loaded based on NODE_ENV:
- `NODE_ENV=production``.env.prod`
- `NODE_ENV=development``.env.dev` (default)
- `NODE_ENV=test``.env.test`
2. Then, if a `.env` file exists, its values override any previously loaded values
Example setup:
```bash
# .env.dev - Development configuration
PORT=4000
HASS_HOST=http://homeassistant.local:8123
LOG_LEVEL=debug
# .env - Local overrides
PORT=3000 # Overrides PORT from .env.dev
HASS_HOST=http://localhost:8123 # Overrides HASS_HOST from .env.dev
```
## Configuration File Structure
The MCP Server uses environment variables for configuration, with support for different environments (development, test, production):
```bash
# .env, .env.development, or .env.test
PORT=4000
NODE_ENV=development
HASS_HOST=http://192.168.178.63:8123
HASS_TOKEN=your_token_here
JWT_SECRET=your_secret_key
```
## Server Settings
### Basic Server Configuration
- `PORT`: Server port number (default: 4000)
- `NODE_ENV`: Environment mode (development, production, test)
- `HASS_HOST`: Home Assistant instance URL
- `HASS_TOKEN`: Home Assistant long-lived access token
### Security Settings
- `JWT_SECRET`: Secret key for JWT token generation
- `RATE_LIMIT`: Rate limiting configuration
- `windowMs`: Time window in milliseconds (default: 15 minutes)
- `max`: Maximum requests per window (default: 100)
### WebSocket Settings
- `SSE`: Server-Sent Events configuration
- `MAX_CLIENTS`: Maximum concurrent clients (default: 1000)
- `PING_INTERVAL`: Keep-alive ping interval in ms (default: 30000)
### Speech Features (Optional)
- `ENABLE_SPEECH_FEATURES`: Enable speech processing features (default: false)
- `ENABLE_WAKE_WORD`: Enable wake word detection (default: false)
- `ENABLE_SPEECH_TO_TEXT`: Enable speech-to-text conversion (default: false)
- `WHISPER_MODEL_PATH`: Path to Whisper models directory (default: /models)
- `WHISPER_MODEL_TYPE`: Whisper model type (default: base)
- Available models: tiny.en, base.en, small.en, medium.en, large-v2
## Environment Variables
All configuration is managed through environment variables:
```bash
# Server
PORT=4000
NODE_ENV=development
# Home Assistant
HASS_HOST=http://your-hass-instance:8123
HASS_TOKEN=your_token_here
# Security
JWT_SECRET=your-secret-key
# Logging
LOG_LEVEL=info
LOG_DIR=logs
LOG_MAX_SIZE=20m
LOG_MAX_DAYS=14d
LOG_COMPRESS=true
LOG_REQUESTS=true
# Speech Features (Optional)
ENABLE_SPEECH_FEATURES=false
ENABLE_WAKE_WORD=false
ENABLE_SPEECH_TO_TEXT=false
WHISPER_MODEL_PATH=/models
WHISPER_MODEL_TYPE=base
```
## Advanced Configuration
### Security Rate Limiting
Rate limiting is enabled by default to protect against brute force attacks:
```typescript
RATE_LIMIT: {
windowMs: 15 * 60 * 1000, // 15 minutes
max: 100 // limit each IP to 100 requests per window
}
```
### Logging
The server uses Bun's built-in logging capabilities with additional configuration:
```typescript
LOGGING: {
LEVEL: "info", // debug, info, warn, error
DIR: "logs",
MAX_SIZE: "20m",
MAX_DAYS: "14d",
COMPRESS: true,
TIMESTAMP_FORMAT: "YYYY-MM-DD HH:mm:ss:ms",
LOG_REQUESTS: true
}
```
### Speech-to-Text Configuration
When speech features are enabled, you can configure the following options:
```typescript
SPEECH: {
ENABLED: false, // Master switch for all speech features
WAKE_WORD_ENABLED: false, // Enable wake word detection
SPEECH_TO_TEXT_ENABLED: false, // Enable speech-to-text
WHISPER_MODEL_PATH: "/models", // Path to Whisper models
WHISPER_MODEL_TYPE: "base", // Model type to use
}
```
Available Whisper models:
- `tiny.en`: Fastest, lowest accuracy
- `base.en`: Good balance of speed and accuracy
- `small.en`: Better accuracy, slower
- `medium.en`: High accuracy, much slower
- `large-v2`: Best accuracy, very slow
For production deployments, we recommend using system tools like `logrotate` for log management.
Example logrotate configuration (`/etc/logrotate.d/mcp-server`):
```
/var/log/mcp-server.log {
daily
rotate 7
compress
delaycompress
missingok
notifempty
create 644 mcp mcp
}
```
## Best Practices
1. Always use environment variables for sensitive information
2. Keep .env files secure and never commit them to version control
3. Use different environment files for development, test, and production
4. Enable SSL/TLS in production (preferably via reverse proxy)
5. Monitor log files for issues
6. Regularly rotate logs in production
7. Start with smaller Whisper models and upgrade if needed
8. Consider GPU acceleration for larger Whisper models
## Validation
The server validates configuration on startup using Zod schemas:
- Required fields are checked (e.g., HASS_TOKEN)
- Value types are verified
- Enums are validated (e.g., LOG_LEVEL, WHISPER_MODEL_TYPE)
- Default values are applied when not specified
## Troubleshooting
Common configuration issues:
1. Missing required environment variables
2. Invalid environment variable values
3. Permission issues with log directories
4. Rate limiting too restrictive
5. Speech model loading failures
6. Docker not available for speech features
7. Insufficient system resources for larger models
See the [Troubleshooting Guide](troubleshooting.md) for solutions.
# Configuration Guide
This document describes the environment configuration system for the Home Assistant MCP Server.
## Environment Setup
### Using the Setup Script
The MCP Server provides a setup script to help manage your environment configuration:
```bash
# Make the script executable
chmod +x scripts/setup-env.sh
# Basic usage (uses NODE_ENV or defaults to development)
./scripts/setup-env.sh
# Specify an environment
NODE_ENV=production ./scripts/setup-env.sh
# Force override existing files
./scripts/setup-env.sh --force
```
The setup script will:
1. Check for `.env.example` and create `.env` if it doesn't exist
2. Detect the environment (development/production/test)
3. Optionally override `.env` with environment-specific settings
4. Maintain your existing configuration unless forced to override
### Manual Setup
If you prefer to set up manually:
```bash
# Copy the example configuration
cp .env.example .env
# Then copy the appropriate environment override
cp .env.dev .env # For development
cp .env.prod .env # For production
cp .env.test .env # For testing
```
## Environment File Hierarchy
### Base Configuration Files
- `.env.example` - Template with all available options and documentation
- `.env` - Your main configuration file (copied from .env.example)
### Environment-Specific Files
- `.env.dev` - Development environment settings
- `.env.prod` - Production environment settings
- `.env.test` - Test environment settings
### Loading Order and Priority
Files are loaded in the following sequence, with later files overriding earlier ones:
1. `.env` (base configuration)
2. Environment-specific file based on NODE_ENV:
- `NODE_ENV=development``.env.dev`
- `NODE_ENV=production``.env.prod`
- `NODE_ENV=test``.env.test`
### Docker Environment Handling
When using Docker, the environment is loaded as follows:
1. `.env` file (base configuration)
2. `.env.${NODE_ENV}` file (environment-specific overrides)
3. Environment variables from docker-compose.yml
4. Command-line environment variables
Example docker-compose.yml configuration:
```yaml
services:
homeassistant-mcp:
env_file:
- .env
- .env.${NODE_ENV:-development}
environment:
- NODE_ENV=${NODE_ENV:-development}
- PORT=4000
- HASS_HOST
- HASS_TOKEN
- LOG_LEVEL=${LOG_LEVEL:-info}
```
Override examples:
```bash
# Override NODE_ENV
NODE_ENV=production docker compose up -d
# Override multiple variables
NODE_ENV=production LOG_LEVEL=debug docker compose up -d
```
## Configuration Options
### Required Settings
```bash
# Server Configuration
PORT=4000 # Server port number
NODE_ENV=development # Environment (development/production/test)
# Home Assistant
HASS_HOST=http://homeassistant.local:8123 # Home Assistant URL
HASS_TOKEN=your_token_here # Long-lived access token
# Security
JWT_SECRET=your_secret_key # JWT signing secret
```
### Optional Settings
#### Security
```bash
# Rate Limiting
RATE_LIMIT_WINDOW=900000 # Time window in ms (15 minutes)
RATE_LIMIT_MAX_REQUESTS=100 # Max requests per window
RATE_LIMIT_REGULAR=100 # Regular endpoint rate limit
RATE_LIMIT_WEBSOCKET=1000 # WebSocket connection rate limit
# CORS Configuration
CORS_ORIGINS=http://localhost:3000,http://localhost:8123
CORS_METHODS=GET,POST,PUT,DELETE,OPTIONS
CORS_ALLOWED_HEADERS=Content-Type,Authorization,X-Requested-With
CORS_EXPOSED_HEADERS=
CORS_CREDENTIALS=true
CORS_MAX_AGE=86400
# Cookie Security
COOKIE_SECRET=your_cookie_secret_key_min_32_chars
COOKIE_SECURE=true
COOKIE_HTTP_ONLY=true
COOKIE_SAME_SITE=Strict
```
#### Logging
```bash
# Logging Configuration
LOG_LEVEL=info # debug, info, warn, error
LOG_DIR=logs # Log directory
LOG_MAX_SIZE=20m # Max log file size
LOG_MAX_DAYS=14d # Log retention period
LOG_COMPRESS=true # Enable log compression
LOG_REQUESTS=true # Log HTTP requests
```
#### Speech Features
```bash
# Speech Processing
ENABLE_SPEECH_FEATURES=false # Master switch for speech features
ENABLE_WAKE_WORD=false # Enable wake word detection
ENABLE_SPEECH_TO_TEXT=false # Enable speech-to-text
WHISPER_MODEL_PATH=/models # Path to Whisper models
WHISPER_MODEL_TYPE=base # Whisper model type
# Audio Configuration
NOISE_THRESHOLD=0.05
MIN_SPEECH_DURATION=1.0
SILENCE_DURATION=0.5
SAMPLE_RATE=16000
CHANNELS=1
CHUNK_SIZE=1024
PULSE_SERVER=unix:/run/user/1000/pulse/native
```
## Best Practices
1. **Version Control**
- Never commit `.env` files to version control
- Always commit `.env.example` with documentation
- Consider committing `.env.dev` and `.env.test` for team development
2. **Security**
- Use strong, unique values for secrets
- Enable HTTPS in production
- Keep tokens and secrets in `.env` only
3. **Development**
- Use `.env.dev` for shared development settings
- Keep `.env` for personal overrides
- Enable debug logging in development
4. **Production**
- Use `.env.prod` for production defaults
- Set appropriate rate limits
- Configure proper logging
- Enable all security features
5. **Testing**
- Use `.env.test` for test configuration
- Use mock tokens and endpoints
- Enable detailed logging for debugging
## Troubleshooting
### Common Issues
1. **Missing Required Variables**
- Error: "Missing required environment variable: HASS_TOKEN"
- Solution: Ensure HASS_TOKEN is set in your .env file
2. **Permission Issues**
- Error: "EACCES: permission denied, access '/app/logs'"
- Solution: Ensure proper permissions on the logs directory
3. **Invalid Configuration**
- Error: "Invalid configuration value for PORT"
- Solution: Check the value format in your .env file
4. **Environment Override Issues**
- Problem: Environment-specific settings not applying
- Solution: Check NODE_ENV value and file naming
See [Troubleshooting Guide](troubleshooting.md) for more solutions.

View File

@@ -1,124 +0,0 @@
---
layout: default
title: Contributing
nav_order: 5
---
# Contributing Guide 🤝
Thank you for your interest in contributing to the MCP Server project!
## Getting Started
### Prerequisites
- [Bun](https://bun.sh) >= 1.0.26
- Home Assistant instance
- Basic understanding of TypeScript
### Development Setup
1. Fork the repository
2. Clone your fork:
```bash
git clone https://github.com/YOUR_USERNAME/homeassistant-mcp.git
cd homeassistant-mcp
```
3. Install dependencies:
```bash
bun install
```
4. Configure environment:
```bash
cp .env.example .env
# Edit .env with your Home Assistant details
```
## Development Workflow
### Branch Naming
- `feature/` - New features
- `fix/` - Bug fixes
- `docs/` - Documentation updates
Example:
```bash
git checkout -b feature/device-control-improvements
```
### Commit Messages
Follow simple, clear commit messages:
```
type: brief description
[optional detailed explanation]
```
Types:
- `feat:` - New feature
- `fix:` - Bug fix
- `docs:` - Documentation
- `chore:` - Maintenance
### Code Style
- Use TypeScript
- Follow existing code structure
- Keep changes focused and minimal
## Testing
Run tests before submitting:
```bash
# Run all tests
bun test
# Run specific test
bun test test/api/control.test.ts
```
## Pull Request Process
1. Ensure tests pass
2. Update documentation if needed
3. Provide clear description of changes
### PR Template
```markdown
## Description
Brief explanation of the changes
## Type of Change
- [ ] Bug fix
- [ ] New feature
- [ ] Documentation update
## Testing
Describe how you tested these changes
```
## Reporting Issues
- Use GitHub Issues
- Provide clear, reproducible steps
- Include environment details
## Code of Conduct
- Be respectful
- Focus on constructive feedback
- Help maintain a positive environment
## Resources
- [API Documentation](api.md)
- [Troubleshooting Guide](troubleshooting.md)
*Thank you for contributing!*

View File

@@ -1,141 +0,0 @@
# Deployment Guide
This documentation is automatically deployed to GitHub Pages using GitHub Actions. Here's how it works and how to manage deployments.
## Automatic Deployment
The documentation is automatically deployed when changes are pushed to the `main` or `master` branch. The deployment process:
1. Triggers on push to main/master
2. Sets up Python environment
3. Installs required dependencies
4. Builds the documentation
5. Deploys to the `gh-pages` branch
### GitHub Actions Workflow
The deployment is handled by the workflow in `.github/workflows/deploy-docs.yml`. This is the single source of truth for documentation deployment:
```yaml
name: Deploy MkDocs
on:
push:
branches:
- main
- master
workflow_dispatch: # Allow manual trigger
```
## Manual Deployment
If needed, you can deploy manually using:
```bash
# Create a virtual environment
python -m venv venv
# Activate the virtual environment
source venv/bin/activate
# Install dependencies
pip install -r docs/requirements.txt
# Build the documentation
mkdocs build
# Deploy to GitHub Pages
mkdocs gh-deploy --force
```
## Best Practices
### 1. Documentation Updates
- Test locally before pushing: `mkdocs serve`
- Verify all links work
- Ensure images are optimized
- Check mobile responsiveness
### 2. Version Control
- Keep documentation in sync with code versions
- Use meaningful commit messages
- Tag important documentation versions
### 3. Content Guidelines
- Use consistent formatting
- Keep navigation structure logical
- Include examples where appropriate
- Maintain up-to-date screenshots
### 4. Maintenance
- Regularly review and update content
- Check for broken links
- Update dependencies
- Monitor GitHub Actions logs
## Troubleshooting
### Common Issues
1. **Failed Deployments**
- Check GitHub Actions logs
- Verify dependencies are up to date
- Ensure all required files exist
2. **Broken Links**
- Run `mkdocs build --strict`
- Use relative paths in markdown
- Check case sensitivity
3. **Style Issues**
- Verify theme configuration
- Check CSS customizations
- Test on multiple browsers
## Configuration Files
### requirements.txt
Create a requirements file for documentation dependencies:
```txt
mkdocs-material
mkdocs-minify-plugin
mkdocs-git-revision-date-plugin
mkdocs-mkdocstrings
mkdocs-social-plugin
mkdocs-redirects
```
## Monitoring
- Check [GitHub Pages settings](https://github.com/jango-blockchained/advanced-homeassistant-mcp/settings/pages)
- Monitor build status in Actions tab
- Verify site accessibility
## Workflow Features
### Caching
The workflow implements caching for Python dependencies to speed up deployments:
- Pip cache for Python packages
- MkDocs dependencies cache
### Deployment Checks
Several checks are performed during deployment:
1. Link validation with `mkdocs build --strict`
2. Build verification
3. Post-deployment site accessibility check
### Manual Triggers
You can manually trigger deployments using the "workflow_dispatch" event in GitHub Actions.
## Cleanup
To clean up duplicate workflow files, run:
```bash
# Make the script executable
chmod +x scripts/cleanup-workflows.sh
# Run the cleanup script
./scripts/cleanup-workflows.sh
```

View File

@@ -1,310 +0,0 @@
# Development Best Practices
This guide outlines the best practices for developing tools and features for the Home Assistant MCP.
## Code Style
### TypeScript
1. Use TypeScript for all new code
2. Enable strict mode
3. Use explicit types
4. Avoid `any` type
5. Use interfaces over types
6. Document with JSDoc comments
```typescript
/**
* Represents a device in the system.
* @interface
*/
interface Device {
/** Unique device identifier */
id: string;
/** Human-readable device name */
name: string;
/** Device state */
state: DeviceState;
}
```
### Naming Conventions
1. Use PascalCase for:
- Classes
- Interfaces
- Types
- Enums
2. Use camelCase for:
- Variables
- Functions
- Methods
- Properties
3. Use UPPER_SNAKE_CASE for:
- Constants
- Enum values
```typescript
class DeviceManager {
private readonly DEFAULT_TIMEOUT = 5000;
async getDeviceState(deviceId: string): Promise<DeviceState> {
// Implementation
}
}
```
## Architecture
### SOLID Principles
1. Single Responsibility
- Each class/module has one job
- Split complex functionality
2. Open/Closed
- Open for extension
- Closed for modification
3. Liskov Substitution
- Subtypes must be substitutable
- Use interfaces properly
4. Interface Segregation
- Keep interfaces focused
- Split large interfaces
5. Dependency Inversion
- Depend on abstractions
- Use dependency injection
### Example
```typescript
// Bad
class DeviceManager {
async getState() { /* ... */ }
async setState() { /* ... */ }
async sendNotification() { /* ... */ } // Wrong responsibility
}
// Good
class DeviceManager {
constructor(
private notifier: NotificationService
) {}
async getState() { /* ... */ }
async setState() { /* ... */ }
}
class NotificationService {
async send() { /* ... */ }
}
```
## Error Handling
### Best Practices
1. Use custom error classes
2. Include error codes
3. Provide meaningful messages
4. Include error context
5. Handle async errors
6. Log appropriately
```typescript
class DeviceError extends Error {
constructor(
message: string,
public code: string,
public context: Record<string, any>
) {
super(message);
this.name = 'DeviceError';
}
}
try {
await device.connect();
} catch (error) {
throw new DeviceError(
'Failed to connect to device',
'DEVICE_CONNECTION_ERROR',
{ deviceId: device.id, attempt: 1 }
);
}
```
## Testing
### Guidelines
1. Write unit tests first
2. Use meaningful descriptions
3. Test edge cases
4. Mock external dependencies
5. Keep tests focused
6. Use test fixtures
```typescript
describe('DeviceManager', () => {
let manager: DeviceManager;
let mockDevice: jest.Mocked<Device>;
beforeEach(() => {
mockDevice = {
id: 'test_device',
getState: jest.fn()
};
manager = new DeviceManager(mockDevice);
});
it('should get device state', async () => {
mockDevice.getState.mockResolvedValue('on');
const state = await manager.getDeviceState();
expect(state).toBe('on');
});
});
```
## Performance
### Optimization
1. Use caching
2. Implement pagination
3. Optimize database queries
4. Use connection pooling
5. Implement rate limiting
6. Batch operations
```typescript
class DeviceCache {
private cache = new Map<string, CacheEntry>();
private readonly TTL = 60000; // 1 minute
async getDevice(id: string): Promise<Device> {
const cached = this.cache.get(id);
if (cached && Date.now() - cached.timestamp < this.TTL) {
return cached.device;
}
const device = await this.fetchDevice(id);
this.cache.set(id, {
device,
timestamp: Date.now()
});
return device;
}
}
```
## Security
### Guidelines
1. Validate all input
2. Use parameterized queries
3. Implement rate limiting
4. Use proper authentication
5. Follow OWASP guidelines
6. Sanitize output
```typescript
class InputValidator {
static validateDeviceId(id: string): boolean {
return /^[a-zA-Z0-9_-]{1,64}$/.test(id);
}
static sanitizeOutput(data: any): any {
// Implement output sanitization
return data;
}
}
```
## Documentation
### Standards
1. Use JSDoc comments
2. Document interfaces
3. Include examples
4. Document errors
5. Keep docs updated
6. Use markdown
```typescript
/**
* Manages device operations.
* @class
*/
class DeviceManager {
/**
* Gets the current state of a device.
* @param {string} deviceId - The device identifier.
* @returns {Promise<DeviceState>} The current device state.
* @throws {DeviceError} If device is not found or unavailable.
* @example
* const state = await deviceManager.getDeviceState('living_room_light');
*/
async getDeviceState(deviceId: string): Promise<DeviceState> {
// Implementation
}
}
```
## Logging
### Best Practices
1. Use appropriate levels
2. Include context
3. Structure log data
4. Handle sensitive data
5. Implement rotation
6. Use correlation IDs
```typescript
class Logger {
info(message: string, context: Record<string, any>) {
console.log(JSON.stringify({
level: 'info',
message,
context,
timestamp: new Date().toISOString(),
correlationId: context.correlationId
}));
}
}
```
## Version Control
### Guidelines
1. Use meaningful commits
2. Follow branching strategy
3. Write good PR descriptions
4. Review code thoroughly
5. Keep changes focused
6. Use conventional commits
```bash
# Good commit messages
git commit -m "feat(device): add support for zigbee devices"
git commit -m "fix(api): handle timeout errors properly"
```
## See Also
- [Tool Development Guide](tools.md)
- [Interface Documentation](interfaces.md)
- [Testing Guide](../testing.md)

View File

@@ -1,197 +0,0 @@
# Development Environment Setup
This guide will help you set up your development environment for the Home Assistant MCP Server.
## Prerequisites
### Required Software
- Python 3.10 or higher
- pip (Python package manager)
- git
- Docker (optional, for containerized development)
- Node.js 18+ (for frontend development)
### System Requirements
- 4GB RAM minimum
- 2 CPU cores minimum
- 10GB free disk space
## Initial Setup
1. Clone the Repository
```bash
git clone https://github.com/jango-blockchained/homeassistant-mcp.git
cd homeassistant-mcp
```
2. Create Virtual Environment
```bash
python -m venv .venv
source .venv/bin/activate # Linux/macOS
# or
.venv\Scripts\activate # Windows
```
3. Install Dependencies
```bash
pip install -r requirements.txt
pip install -r docs/requirements.txt # for documentation
```
## Development Tools
### Code Editor Setup
We recommend using Visual Studio Code with these extensions:
- Python
- Docker
- YAML
- ESLint
- Prettier
### VS Code Settings
```json
{
"python.linting.enabled": true,
"python.linting.pylintEnabled": true,
"python.formatting.provider": "black",
"editor.formatOnSave": true
}
```
## Configuration
1. Create Local Config
```bash
cp config.example.yaml config.yaml
```
2. Set Environment Variables
```bash
cp .env.example .env
# Edit .env with your settings
```
## Running Tests
### Unit Tests
```bash
pytest tests/unit
```
### Integration Tests
```bash
pytest tests/integration
```
### Coverage Report
```bash
pytest --cov=src tests/
```
## Docker Development
### Build Container
```bash
docker build -t mcp-server-dev -f Dockerfile.dev .
```
### Run Development Container
```bash
docker run -it --rm \
-v $(pwd):/app \
-p 8123:8123 \
mcp-server-dev
```
## Database Setup
### Local Development Database
```bash
docker run -d \
-p 5432:5432 \
-e POSTGRES_USER=mcp \
-e POSTGRES_PASSWORD=development \
-e POSTGRES_DB=mcp_dev \
postgres:14
```
### Run Migrations
```bash
alembic upgrade head
```
## Frontend Development
1. Install Node.js Dependencies
```bash
cd frontend
npm install
```
2. Start Development Server
```bash
npm run dev
```
## Documentation
### Build Documentation
```bash
mkdocs serve
```
### View Documentation
Open http://localhost:8000 in your browser
## Debugging
### VS Code Launch Configuration
```json
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: MCP Server",
"type": "python",
"request": "launch",
"program": "src/main.py",
"console": "integratedTerminal"
}
]
}
```
## Git Hooks
### Install Pre-commit
```bash
pip install pre-commit
pre-commit install
```
### Available Hooks
- black (code formatting)
- flake8 (linting)
- isort (import sorting)
- mypy (type checking)
## Troubleshooting
Common Issues:
1. Port already in use
- Check for running processes: `lsof -i :8123`
- Kill process if needed: `kill -9 PID`
2. Database connection issues
- Verify PostgreSQL is running
- Check connection settings in .env
3. Virtual environment problems
- Delete and recreate: `rm -rf .venv && python -m venv .venv`
- Reinstall dependencies
## Next Steps
1. Review the [Architecture Guide](../architecture.md)
2. Check [Contributing Guidelines](../contributing.md)
3. Start with [Simple Issues](https://github.com/jango-blockchained/homeassistant-mcp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)

View File

@@ -1,54 +0,0 @@
# Development Guide
Welcome to the development guide for the Home Assistant MCP Server. This section provides comprehensive information for developers who want to contribute to or extend the project.
## Development Overview
The MCP Server is built with modern development practices in mind, focusing on:
- Clean, maintainable code
- Comprehensive testing
- Clear documentation
- Modular architecture
## Getting Started
1. Set up your development environment
2. Fork the repository
3. Install dependencies
4. Run tests
5. Make your changes
6. Submit a pull request
## Development Topics
- [Architecture](../architecture.md) - System architecture and design
- [Contributing](../contributing.md) - Contribution guidelines
- [Testing](../testing.md) - Testing framework and guidelines
- [Troubleshooting](../troubleshooting.md) - Common issues and solutions
- [Deployment](../deployment.md) - Deployment procedures
- [Roadmap](../roadmap.md) - Future development plans
## Best Practices
- Follow the coding style guide
- Write comprehensive tests
- Document your changes
- Keep commits atomic
- Use meaningful commit messages
## Development Workflow
1. Create a feature branch
2. Make your changes
3. Run tests
4. Update documentation
5. Submit a pull request
6. Address review comments
7. Merge when approved
## Next Steps
- Review the [Architecture](../architecture.md)
- Check [Contributing Guidelines](../contributing.md)
- Set up your [Development Environment](environment.md)

View File

@@ -1,296 +0,0 @@
# Interface Documentation
This document describes the core interfaces used throughout the Home Assistant MCP.
## Core Interfaces
### Tool Interface
```typescript
interface Tool {
/** Unique identifier for the tool */
id: string;
/** Human-readable name */
name: string;
/** Detailed description */
description: string;
/** Semantic version */
version: string;
/** Tool category */
category: ToolCategory;
/** Execute tool functionality */
execute(params: any): Promise<ToolResult>;
}
```
### Tool Result
```typescript
interface ToolResult {
/** Operation success status */
success: boolean;
/** Response data */
data?: any;
/** Error message if failed */
message?: string;
/** Error code if failed */
error_code?: string;
}
```
### Tool Category
```typescript
enum ToolCategory {
DeviceManagement = 'device_management',
HistoryState = 'history_state',
Automation = 'automation',
AddonsPackages = 'addons_packages',
Notifications = 'notifications',
Events = 'events',
Utility = 'utility'
}
```
## Event Interfaces
### Event Subscription
```typescript
interface EventSubscription {
/** Unique subscription ID */
id: string;
/** Event type to subscribe to */
event_type: string;
/** Optional entity ID filter */
entity_id?: string;
/** Optional domain filter */
domain?: string;
/** Subscription creation timestamp */
created_at: string;
/** Last event timestamp */
last_event?: string;
}
```
### Event Message
```typescript
interface EventMessage {
/** Event type */
event_type: string;
/** Entity ID if applicable */
entity_id?: string;
/** Event data */
data: any;
/** Event origin */
origin: 'LOCAL' | 'REMOTE';
/** Event timestamp */
time_fired: string;
/** Event context */
context: EventContext;
}
```
## Device Interfaces
### Device
```typescript
interface Device {
/** Device ID */
id: string;
/** Device name */
name: string;
/** Device domain */
domain: string;
/** Current state */
state: string;
/** Device attributes */
attributes: Record<string, any>;
/** Device capabilities */
capabilities: DeviceCapabilities;
}
```
### Device Capabilities
```typescript
interface DeviceCapabilities {
/** Supported features */
features: string[];
/** Supported commands */
commands: string[];
/** State attributes */
attributes: {
/** Attribute name */
[key: string]: {
/** Attribute type */
type: 'string' | 'number' | 'boolean' | 'object';
/** Attribute description */
description: string;
/** Optional value constraints */
constraints?: {
min?: number;
max?: number;
enum?: any[];
};
};
};
}
```
## Authentication Interfaces
### Auth Token
```typescript
interface AuthToken {
/** Token value */
token: string;
/** Token type */
type: 'bearer' | 'jwt';
/** Expiration timestamp */
expires_at: string;
/** Token refresh info */
refresh?: {
token: string;
expires_at: string;
};
}
```
### User
```typescript
interface User {
/** User ID */
id: string;
/** Username */
username: string;
/** User type */
type: 'admin' | 'user' | 'service';
/** User permissions */
permissions: string[];
}
```
## Error Interfaces
### Tool Error
```typescript
interface ToolError extends Error {
/** Error code */
code: string;
/** HTTP status code */
status: number;
/** Error details */
details?: Record<string, any>;
}
```
### Validation Error
```typescript
interface ValidationError {
/** Error path */
path: string;
/** Error message */
message: string;
/** Error code */
code: string;
}
```
## Configuration Interfaces
### Tool Configuration
```typescript
interface ToolConfig {
/** Enable/disable tool */
enabled: boolean;
/** Tool-specific settings */
settings: Record<string, any>;
/** Rate limiting */
rate_limit?: {
/** Max requests */
max: number;
/** Time window in seconds */
window: number;
};
}
```
### System Configuration
```typescript
interface SystemConfig {
/** System name */
name: string;
/** Environment */
environment: 'development' | 'production';
/** Log level */
log_level: 'debug' | 'info' | 'warn' | 'error';
/** Tool configurations */
tools: Record<string, ToolConfig>;
}
```
## Best Practices
1. Use TypeScript for all interfaces
2. Include JSDoc comments
3. Use strict typing
4. Keep interfaces focused
5. Use consistent naming
6. Document constraints
7. Version interfaces
8. Include examples
## See Also
- [Tool Development Guide](tools.md)
- [Best Practices](best-practices.md)
- [Testing Guide](../testing.md)

View File

@@ -1,226 +0,0 @@
# Tool Development Guide
This guide explains how to create new tools for the Home Assistant MCP.
## Tool Structure
Each tool should follow this basic structure:
```typescript
interface Tool {
id: string;
name: string;
description: string;
version: string;
category: ToolCategory;
execute(params: any): Promise<ToolResult>;
}
```
## Creating a New Tool
1. Create a new file in the appropriate category directory
2. Implement the Tool interface
3. Add API endpoints
4. Add WebSocket handlers
5. Add documentation
6. Add tests
### Example Tool Implementation
```typescript
import { Tool, ToolCategory, ToolResult } from '../interfaces';
export class MyCustomTool implements Tool {
id = 'my_custom_tool';
name = 'My Custom Tool';
description = 'Description of what the tool does';
version = '1.0.0';
category = ToolCategory.Utility;
async execute(params: any): Promise<ToolResult> {
// Tool implementation
return {
success: true,
data: {
// Tool-specific response data
}
};
}
}
```
## Tool Categories
- Device Management
- History & State
- Automation
- Add-ons & Packages
- Notifications
- Events
- Utility
## API Integration
### REST Endpoint
```typescript
import { Router } from 'express';
import { MyCustomTool } from './my-custom-tool';
const router = Router();
const tool = new MyCustomTool();
router.post('/api/tools/custom', async (req, res) => {
try {
const result = await tool.execute(req.body);
res.json(result);
} catch (error) {
res.status(500).json({
success: false,
message: error.message
});
}
});
```
### WebSocket Handler
```typescript
import { WebSocketServer } from 'ws';
import { MyCustomTool } from './my-custom-tool';
const tool = new MyCustomTool();
wss.on('connection', (ws) => {
ws.on('message', async (message) => {
const { type, params } = JSON.parse(message);
if (type === 'my_custom_tool') {
const result = await tool.execute(params);
ws.send(JSON.stringify(result));
}
});
});
```
## Error Handling
```typescript
class ToolError extends Error {
constructor(
message: string,
public code: string,
public status: number = 500
) {
super(message);
this.name = 'ToolError';
}
}
// Usage in tool
async execute(params: any): Promise<ToolResult> {
try {
// Tool implementation
} catch (error) {
throw new ToolError(
'Operation failed',
'TOOL_ERROR',
500
);
}
}
```
## Testing
```typescript
import { MyCustomTool } from './my-custom-tool';
describe('MyCustomTool', () => {
let tool: MyCustomTool;
beforeEach(() => {
tool = new MyCustomTool();
});
it('should execute successfully', async () => {
const result = await tool.execute({
// Test parameters
});
expect(result.success).toBe(true);
});
it('should handle errors', async () => {
// Error test cases
});
});
```
## Documentation
1. Create tool documentation in `docs/tools/category/tool-name.md`
2. Update `tools/tools.md` with tool reference
3. Add tool to navigation in `mkdocs.yml`
### Documentation Template
```markdown
# Tool Name
Description of the tool.
## Features
- Feature 1
- Feature 2
## Usage
### REST API
```typescript
// API endpoints
```
### WebSocket
```typescript
// WebSocket usage
```
## Examples
### Example 1
```typescript
// Usage example
```
## Response Format
```json
{
"success": true,
"data": {
// Response data structure
}
}
```
```
## Best Practices
1. Follow consistent naming conventions
2. Implement proper error handling
3. Add comprehensive documentation
4. Write thorough tests
5. Use TypeScript for type safety
6. Follow SOLID principles
7. Implement rate limiting
8. Add proper logging
## See Also
- [Interface Documentation](interfaces.md)
- [Best Practices](best-practices.md)
- [Testing Guide](../testing.md)

View File

@@ -1,22 +0,0 @@
---
layout: default
title: Examples
nav_order: 7
has_children: true
---
# Example Projects 📚
This section contains examples and tutorials for common MCP Server integrations.
## Speech-to-Text Integration
Example of integrating speech recognition with MCP Server:
```typescript
// From examples/speech-to-text-example.ts
// Add example code and explanation
```
## More Examples Coming Soon
...

View File

@@ -1,228 +0,0 @@
# Extras & Tools Guide 🛠️
## Overview
I've included several additional tools and utilities in the `extra/` directory to enhance your Home Assistant MCP experience. These tools help with automation analysis, speech processing, and client integration.
## Available Tools 🧰
### 1. Home Assistant Analyzer CLI
```bash
# Installation
bun install -g @homeassistant-mcp/ha-analyzer-cli
# Usage
ha-analyzer analyze path/to/automation.yaml
```
Features:
- 🔍 Deep automation analysis using AI models
- 🚨 Security vulnerability scanning
- 💡 Performance optimization suggestions
- 📊 System health metrics
- ⚡ Energy usage analysis
- 🤖 Automation improvement recommendations
### 2. Speech-to-Text Example
```bash
# Run the example
bun run extra/speech-to-text-example.ts
```
Features:
- 🎤 Wake word detection ("hey jarvis", "ok google", "alexa")
- 🗣️ Speech-to-text transcription
- 🌍 Multiple language support
- 🚀 GPU acceleration support
- 📝 Event handling and logging
### 3. Claude Desktop Setup (macOS)
```bash
# Make script executable
chmod +x extra/claude-desktop-macos-setup.sh
# Run setup
./extra/claude-desktop-macos-setup.sh
```
Features:
- 🖥️ Automated Claude Desktop installation
- ⚙️ Environment configuration
- 🔗 MCP integration setup
- 🚀 Performance optimization
## Home Assistant Analyzer Details 📊
### Analysis Categories
1. **System Overview**
- Current state assessment
- Health check
- Configuration review
- Integration status
- Issue detection
2. **Performance Analysis**
- Resource usage monitoring
- Response time analysis
- Optimization opportunities
- Bottleneck detection
3. **Security Assessment**
- Current security measures
- Vulnerability detection
- Security recommendations
- Best practices review
4. **Optimization Suggestions**
- Performance improvements
- Configuration optimizations
- Integration enhancements
- Automation opportunities
5. **Maintenance Tasks**
- Required updates
- Cleanup recommendations
- Regular maintenance tasks
- System health checks
6. **Entity Usage Analysis**
- Most active entities
- Rarely used entities
- Potential duplicates
- Usage patterns
7. **Automation Analysis**
- Inefficient automations
- Improvement suggestions
- Blueprint recommendations
- Condition optimizations
8. **Energy Management**
- High consumption detection
- Monitoring suggestions
- Tariff optimization
- Usage patterns
### Configuration
```yaml
# config/analyzer.yaml
analysis:
depth: detailed # quick, basic, or detailed
models: # AI models to use
- gpt-4 # for complex analysis
- gpt-3.5-turbo # for quick checks
focus: # Analysis focus areas
- security
- performance
- automations
- energy
ignore: # Paths to ignore
- test/
- disabled/
```
## Speech-to-Text Integration 🎤
### Prerequisites
1. Docker installed and running
2. NVIDIA GPU with CUDA (optional, for faster processing)
3. Audio input device configured
### Configuration
```yaml
# speech-config.yaml
wake_word:
enabled: true
words:
- "hey jarvis"
- "ok google"
- "alexa"
sensitivity: 0.5
speech_to_text:
model: "base" # tiny, base, small, medium, large
language: "en" # en, es, fr, etc.
use_gpu: true # Enable GPU acceleration
```
### Usage Example
```typescript
import { SpeechProcessor } from './speech-to-text-example';
const processor = new SpeechProcessor({
wakeWord: true,
model: 'base',
language: 'en'
});
processor.on('wake_word', (timestamp) => {
console.log('Wake word detected!');
});
processor.on('transcription', (text) => {
console.log('Transcribed:', text);
});
await processor.start();
```
## Best Practices 🎯
1. **Analysis Tool Usage**
- Run regular system analyses
- Focus on specific areas when needed
- Review and implement suggestions
- Monitor improvements
2. **Speech Processing**
- Choose appropriate models
- Test in your environment
- Adjust sensitivity as needed
- Monitor performance
3. **Integration Setup**
- Follow security best practices
- Test in development first
- Monitor resource usage
- Keep configurations updated
## Troubleshooting 🔧
### Common Issues
1. **Analyzer CLI Issues**
- Verify API keys
- Check network connectivity
- Validate YAML syntax
- Review permissions
2. **Speech Processing Issues**
- Check audio device
- Verify Docker setup
- Monitor GPU usage
- Check model compatibility
3. **Integration Issues**
- Verify configurations
- Check dependencies
- Review logs
- Test connectivity
## API Reference 🔌
### Analyzer API
```typescript
import { HomeAssistantAnalyzer } from './ha-analyzer-cli';
const analyzer = new HomeAssistantAnalyzer({
depth: 'detailed',
focus: ['security', 'performance']
});
const analysis = await analyzer.analyze();
console.log(analysis.suggestions);
```
See [API Documentation](api.md) for more details.

View File

@@ -1,212 +0,0 @@
# Speech Features
The Home Assistant MCP Server includes powerful speech processing capabilities powered by fast-whisper and custom wake word detection. This guide explains how to set up and use these features effectively.
## Overview
The speech processing system consists of two main components:
1. Wake Word Detection - Listens for specific trigger phrases
2. Speech-to-Text - Transcribes spoken commands using fast-whisper
## Setup
### Prerequisites
1. Docker environment:
```bash
docker --version # Should be 20.10.0 or higher
```
2. For GPU acceleration:
- NVIDIA GPU with CUDA support
- NVIDIA Container Toolkit installed
- NVIDIA drivers 450.80.02 or higher
### Installation
1. Enable speech features in your `.env`:
```bash
ENABLE_SPEECH_FEATURES=true
ENABLE_WAKE_WORD=true
ENABLE_SPEECH_TO_TEXT=true
```
2. Configure model settings:
```bash
WHISPER_MODEL_PATH=/models
WHISPER_MODEL_TYPE=base
WHISPER_LANGUAGE=en
WHISPER_TASK=transcribe
WHISPER_DEVICE=cuda # or cpu
```
3. Start the services:
```bash
docker-compose up -d
```
## Usage
### Wake Word Detection
The wake word detector continuously listens for configured trigger phrases. Default wake words:
- "hey jarvis"
- "ok google"
- "alexa"
Custom wake words can be configured:
```bash
WAKE_WORDS=computer,jarvis,assistant
```
When a wake word is detected:
1. The system starts recording audio
2. Audio is processed through the speech-to-text pipeline
3. The resulting command is processed by the server
### Speech-to-Text
#### Automatic Transcription
After wake word detection:
1. Audio is automatically captured (default: 5 seconds)
2. The audio is transcribed using the configured whisper model
3. The transcribed text is processed as a command
#### Manual Transcription
You can also manually transcribe audio using the API:
```typescript
// Using the TypeScript client
import { SpeechService } from '@ha-mcp/client';
const speech = new SpeechService();
// Transcribe from audio buffer
const buffer = await getAudioBuffer();
const text = await speech.transcribe(buffer);
// Transcribe from file
const text = await speech.transcribeFile('command.wav');
```
```javascript
// Using the REST API
POST /api/speech/transcribe
Content-Type: multipart/form-data
file: <audio file>
```
### Event Handling
The system emits various events during speech processing:
```typescript
speech.on('wakeWord', (word: string) => {
console.log(`Wake word detected: ${word}`);
});
speech.on('listening', () => {
console.log('Listening for command...');
});
speech.on('transcribing', () => {
console.log('Processing speech...');
});
speech.on('transcribed', (text: string) => {
console.log(`Transcribed text: ${text}`);
});
speech.on('error', (error: Error) => {
console.error('Speech processing error:', error);
});
```
## Performance Optimization
### Model Selection
Choose an appropriate model based on your needs:
1. Resource-constrained environments:
- Use `tiny.en` or `base.en`
- Run on CPU if GPU unavailable
- Limit concurrent processing
2. High-accuracy requirements:
- Use `small.en` or `medium.en`
- Enable GPU acceleration
- Increase audio quality
3. Production environments:
- Use `base.en` or `small.en`
- Enable GPU acceleration
- Configure appropriate timeouts
### GPU Acceleration
When using GPU acceleration:
1. Monitor GPU memory usage:
```bash
nvidia-smi -l 1
```
2. Adjust model size if needed:
```bash
WHISPER_MODEL_TYPE=small # Decrease if GPU memory limited
```
3. Configure processing device:
```bash
WHISPER_DEVICE=cuda # Use GPU
WHISPER_DEVICE=cpu # Use CPU if GPU unavailable
```
## Troubleshooting
### Common Issues
1. Wake word detection not working:
- Check microphone permissions
- Adjust `WAKE_WORD_SENSITIVITY`
- Verify wake words configuration
2. Poor transcription quality:
- Check audio input quality
- Try a larger model
- Verify language settings
3. Performance issues:
- Monitor resource usage
- Consider smaller model
- Check GPU acceleration status
### Logging
Enable debug logging for detailed information:
```bash
LOG_LEVEL=debug
```
Speech-specific logs will be tagged with `[SPEECH]` prefix.
## Security Considerations
1. Audio Privacy:
- Audio is processed locally
- No data sent to external services
- Temporary files automatically cleaned
2. Access Control:
- Speech endpoints require authentication
- Rate limiting applies to transcription
- Configurable command restrictions
3. Resource Protection:
- Timeouts prevent hanging
- Memory limits enforced
- Graceful error handling

View File

@@ -1,5 +0,0 @@
# Configuration
## Basic Configuration
## Advanced Settings

View File

@@ -1,255 +0,0 @@
---
layout: default
title: Docker Deployment
parent: Getting Started
nav_order: 3
---
# Docker Setup Guide 🐳
## Overview
I've designed the MCP server to run efficiently in Docker containers, with support for different configurations including speech processing and GPU acceleration.
## Build Options 🛠️
### 1. Standard Build
```bash
./docker-build.sh
```
This build includes:
- Core MCP server functionality
- REST API endpoints
- WebSocket/SSE support
- Basic automation features
Resource usage:
- Memory: 50% of available RAM
- CPU: 50% per core
- Disk: ~200MB
### 2. Speech-Enabled Build
```bash
./docker-build.sh --speech
```
Additional features:
- Wake word detection
- Speech-to-text processing
- Multiple language support
Required images:
```bash
onerahmet/openai-whisper-asr-webservice:latest # Speech-to-text
rhasspy/wyoming-openwakeword:latest # Wake word detection
```
Resource requirements:
- Memory: 2GB minimum
- CPU: 2 cores minimum
- Disk: ~2GB
### 3. GPU-Accelerated Build
```bash
./docker-build.sh --speech --gpu
```
Enhanced features:
- CUDA GPU acceleration
- Float16 compute type
- Optimized performance
- Faster speech processing
Requirements:
- NVIDIA GPU
- CUDA drivers
- nvidia-docker runtime
## Docker Compose Files 📄
### 1. Base Configuration (`docker-compose.yml`)
```yaml
version: '3.8'
services:
homeassistant-mcp:
build: .
ports:
- "${HOST_PORT:-4000}:4000"
env_file:
- .env
- .env.${NODE_ENV:-development}
environment:
- NODE_ENV=${NODE_ENV:-development}
- PORT=4000
- HASS_HOST
- HASS_TOKEN
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
- .:/app
- /app/node_modules
- logs:/app/logs
```
### 2. Speech Support (`docker-compose.speech.yml`)
```yaml
services:
homeassistant-mcp:
environment:
- ENABLE_SPEECH_FEATURES=true
- ENABLE_WAKE_WORD=true
- ENABLE_SPEECH_TO_TEXT=true
fast-whisper:
image: onerahmet/openai-whisper-asr-webservice:latest
volumes:
- whisper-models:/models
- audio-data:/audio
wake-word:
image: rhasspy/wyoming-openwakeword:latest
devices:
- /dev/snd:/dev/snd
```
## Launch Commands 🚀
### Standard Launch
```bash
# Build and start
./docker-build.sh
docker compose up -d
# View logs
docker compose logs -f
# Stop services
docker compose down
```
### With Speech Features
```bash
# Build with speech support
./docker-build.sh --speech
# Start all services
docker compose -f docker-compose.yml -f docker-compose.speech.yml up -d
# View specific service logs
docker compose logs -f fast-whisper
docker compose logs -f wake-word
```
### With GPU Support
```bash
# Build with GPU acceleration
./docker-build.sh --speech --gpu
# Start with GPU support
docker compose -f docker-compose.yml -f docker-compose.speech.yml \
--env-file .env.gpu up -d
```
## Resource Management 📊
The build script automatically manages resources:
1. **Memory Allocation**
```bash
TOTAL_MEM=$(free -m | awk '/^Mem:/{print $2}')
BUILD_MEM=$(( TOTAL_MEM / 2 ))
```
2. **CPU Management**
```bash
CPU_COUNT=$(nproc)
CPU_QUOTA=$(( CPU_COUNT * 50000 ))
```
3. **Build Arguments**
```bash
BUILD_ARGS=(
--memory="${BUILD_MEM}m"
--memory-swap="${BUILD_MEM}m"
--cpu-quota="${CPU_QUOTA}"
)
```
## Troubleshooting 🔧
### Common Issues
1. **Build Failures**
- Check system resources
- Verify Docker daemon is running
- Ensure network connectivity
- Review build logs
2. **Speech Processing Issues**
- Verify audio device permissions
- Check CUDA installation (for GPU)
- Monitor resource usage
- Review service logs
3. **Performance Problems**
- Adjust resource limits
- Consider GPU acceleration
- Monitor container stats
- Check for resource conflicts
### Debug Commands
```bash
# Check container status
docker compose ps
# View resource usage
docker stats
# Check logs
docker compose logs --tail=100
# Inspect configuration
docker compose config
```
## Best Practices 🎯
1. **Resource Management**
- Monitor container resources
- Set appropriate limits
- Use GPU when available
- Regular cleanup
2. **Security**
- Use non-root users
- Limit container capabilities
- Regular security updates
- Proper secret management
3. **Maintenance**
- Regular image updates
- Log rotation
- Resource cleanup
- Performance monitoring
## Advanced Configuration ⚙️
### Custom Build Arguments
```bash
# Example: Custom memory limits
BUILD_MEM=4096 ./docker-build.sh --speech
# Example: Specific CUDA device
CUDA_VISIBLE_DEVICES=1 ./docker-build.sh --speech --gpu
```
### Environment Overrides
```bash
# Production settings
NODE_ENV=production ./docker-build.sh
# Custom port
HOST_PORT=5000 docker compose up -d
```
See [Configuration Guide](../configuration.md) for more environment options.

View File

@@ -1,8 +0,0 @@
# Getting Started
Welcome to the Advanced Home Assistant MCP getting started guide. Follow these steps to begin:
1. [Installation](installation.md)
2. [Configuration](configuration.md)
3. [Docker Setup](docker.md)
4. [Quick Start](quickstart.md)

View File

@@ -1,181 +0,0 @@
---
layout: default
title: Installation
parent: Getting Started
nav_order: 1
---
# Installation Guide 🛠️
This guide covers different methods to install and set up the MCP Server for Home Assistant. Choose the installation method that best suits your needs.
## Prerequisites
Before installing MCP Server, ensure you have:
- Home Assistant instance running and accessible
- Node.js 18+ or Docker installed
- Home Assistant Long-Lived Access Token ([How to get one](https://developers.home-assistant.io/docs/auth_api/#long-lived-access-token))
## Installation Methods
### 1. 🔧 Smithery Installation (Recommended)
The easiest way to install MCP Server is through Smithery:
#### Smithery Configuration
The project includes a `smithery.yaml` configuration:
```yaml
# Add smithery.yaml contents and explanation
```
#### Installation Steps
```bash
npx -y @smithery/cli install @jango-blockchained/advanced-homeassistant-mcp --client claude
```
### 2. 🐳 Docker Installation
For a containerized deployment:
```bash
# Clone the repository
git clone --depth 1 https://github.com/jango-blockchained/advanced-homeassistant-mcp.git
cd advanced-homeassistant-mcp
# Configure environment variables
cp .env.example .env
# Edit .env with your Home Assistant details:
# - HA_URL: Your Home Assistant URL
# - HA_TOKEN: Your Long-Lived Access Token
# - Other configuration options
# Build and start containers
docker compose up -d --build
# View logs (optional)
docker compose logs -f --tail=50
```
### 3. 💻 Manual Installation
For direct installation on your system:
```bash
# Install Bun runtime
curl -fsSL https://bun.sh/install | bash
# Clone and install
git clone https://github.com/jango-blockchained/advanced-homeassistant-mcp.git
cd advanced-homeassistant-mcp
bun install --frozen-lockfile
# Configure environment
cp .env.example .env
# Edit .env with your configuration
# Start the server
bun run dev --watch
```
## Configuration
### Environment Variables
Key configuration options in your `.env` file:
```env
# Home Assistant Configuration
HA_URL=http://your-homeassistant:8123
HA_TOKEN=your_long_lived_access_token
# Server Configuration
PORT=3000
HOST=0.0.0.0
NODE_ENV=production
# Security Settings
JWT_SECRET=your_secure_jwt_secret
RATE_LIMIT=100
```
### Client Integration
#### Cursor Integration
Add to `.cursor/config/config.json`:
```json
{
"mcpServers": {
"homeassistant-mcp": {
"command": "bun",
"args": ["run", "start"],
"cwd": "${workspaceRoot}",
"env": {
"NODE_ENV": "development"
}
}
}
}
```
#### Claude Desktop Integration
Add to your Claude configuration:
```json
{
"mcpServers": {
"homeassistant-mcp": {
"command": "bun",
"args": ["run", "start", "--port", "8080"],
"env": {
"NODE_ENV": "production"
}
}
}
}
```
## Verification
To verify your installation:
1. Check server status:
```bash
curl http://localhost:3000/health
```
2. Test Home Assistant connection:
```bash
curl http://localhost:3000/api/state
```
## Troubleshooting
If you encounter issues:
1. Check the [Troubleshooting Guide](../troubleshooting.md)
2. Verify your environment variables
3. Check server logs:
```bash
# For Docker installation
docker compose logs -f
# For manual installation
bun run dev
```
## Next Steps
- Follow the [Quick Start Guide](quickstart.md) to begin using MCP Server
- Read the [API Documentation](../api/index.md) for integration details
- Check the [Architecture Overview](../architecture.md) to understand the system
## Support
Need help? Check our [Support Resources](../index.md#support) or [open an issue](https://github.com/jango-blockchained/advanced-homeassistant-mcp/issues).

View File

@@ -1,219 +0,0 @@
---
layout: default
title: Quick Start
parent: Getting Started
nav_order: 2
---
# Quick Start Guide 🚀
This guide will help you get started with MCP Server after installation. We'll cover basic usage, common commands, and simple integrations.
## First Steps
### 1. Verify Connection
After installation, verify your MCP Server is running and connected to Home Assistant:
```bash
# Check server health
curl http://localhost:3000/health
# Verify Home Assistant connection
curl http://localhost:3000/api/state
```
### 2. Basic Voice Commands
Try these basic voice commands to test your setup:
```bash
# Example using curl for testing
curl -X POST http://localhost:3000/api/command \
-H "Content-Type: application/json" \
-d '{"command": "Turn on the living room lights"}'
```
Common voice commands:
- "Turn on/off [device name]"
- "Set [device] to [value]"
- "What's the temperature in [room]?"
- "Is [device] on or off?"
## Real-World Examples
### 1. Smart Lighting Control
```javascript
// Browser example using fetch
const response = await fetch('http://localhost:3000/api/command', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
command: 'Set living room lights to 50% brightness and warm white color'
})
});
```
### 2. Real-Time Updates
Subscribe to device state changes using Server-Sent Events (SSE):
```javascript
const eventSource = new EventSource('http://localhost:3000/subscribe_events?token=YOUR_TOKEN&domain=light');
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('Device state changed:', data);
// Update your UI here
};
```
### 3. Scene Automation
Create and trigger scenes for different activities:
```javascript
// Create a "Movie Night" scene
const createScene = async () => {
await fetch('http://localhost:3000/api/scene', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
name: 'Movie Night',
actions: [
{ device: 'living_room_lights', action: 'dim', value: 20 },
{ device: 'tv', action: 'on' },
{ device: 'soundbar', action: 'on' }
]
})
});
};
// Trigger the scene with voice command:
// "Hey MCP, activate movie night scene"
```
## Integration Examples
### 1. Web Dashboard Integration
```javascript
// React component example
function SmartHomeControl() {
const [devices, setDevices] = useState([]);
useEffect(() => {
// Subscribe to device updates
const events = new EventSource('http://localhost:3000/subscribe_events');
events.onmessage = (event) => {
const data = JSON.parse(event.data);
setDevices(currentDevices =>
currentDevices.map(device =>
device.id === data.id ? {...device, ...data} : device
)
);
};
return () => events.close();
}, []);
return (
<div className="dashboard">
{devices.map(device => (
<DeviceCard key={device.id} device={device} />
))}
</div>
);
}
```
### 2. Voice Assistant Integration
```typescript
// Example using speech-to-text with MCP
async function handleVoiceCommand(audioBlob: Blob) {
// First, convert speech to text
const text = await speechToText(audioBlob);
// Then send command to MCP
const response = await fetch('http://localhost:3000/api/command', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ command: text })
});
return response.json();
}
```
## Best Practices
1. **Error Handling**
```javascript
try {
const response = await fetch('http://localhost:3000/api/command', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ command: 'Turn on lights' })
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
} catch (error) {
console.error('Error:', error);
// Handle error appropriately
}
```
2. **Connection Management**
```javascript
class MCPConnection {
constructor() {
this.eventSource = null;
this.reconnectAttempts = 0;
}
connect() {
this.eventSource = new EventSource('http://localhost:3000/subscribe_events');
this.eventSource.onerror = this.handleError.bind(this);
}
handleError() {
if (this.reconnectAttempts < 3) {
setTimeout(() => {
this.reconnectAttempts++;
this.connect();
}, 1000 * this.reconnectAttempts);
}
}
}
```
## Next Steps
- Explore the [API Documentation](../api/index.md) for advanced features
- Learn about [SSE API](../api/sse.md) for real-time updates
- Check out [Architecture](../architecture.md) for system design details
- Read the [Contributing Guide](../contributing.md) to get involved
## Troubleshooting
If you encounter issues:
- Verify your authentication token
- Check server logs for errors
- Ensure Home Assistant is accessible
- Review the [Troubleshooting Guide](../troubleshooting.md)
Need more help? Visit our [Support Resources](../index.md#support).

View File

@@ -1,143 +0,0 @@
---
layout: default
title: Home
nav_order: 1
---
# Home Assistant MCP Documentation 🏠🤖
Welcome to the documentation for my Home Assistant MCP (Model Context Protocol) Server. This documentation will help you get started with installation, configuration, and usage of the MCP server.
## What is MCP? 🤔
MCP is a lightweight integration tool for Home Assistant that provides:
- 🔌 REST API for device control
- 📡 WebSocket/SSE for real-time updates
- 🤖 AI-powered automation analysis
- 🎤 Optional speech processing
- 🔐 Secure authentication
## Quick Links 🔗
- [Quick Start Guide](getting-started/quick-start.md)
- [Configuration Guide](getting-started/configuration.md)
- [API Reference](api/overview.md)
- [Tools & Extras](tools/overview.md)
## System Architecture 📊
```mermaid
flowchart TB
subgraph Client["Client Applications"]
direction TB
Web["Web Interface"]
Mobile["Mobile Apps"]
Voice["Voice Control"]
end
subgraph MCP["MCP Server"]
direction TB
API["REST API"]
WS["WebSocket/SSE"]
Auth["Authentication"]
subgraph Speech["Speech Processing (Optional)"]
direction TB
Wake["Wake Word Detection"]
STT["Speech-to-Text"]
subgraph STT_Options["STT Options"]
direction LR
Whisper["Whisper"]
FastWhisper["Fast Whisper"]
end
Wake --> STT
STT --> STT_Options
end
end
subgraph HA["Home Assistant"]
direction TB
HASS_API["HASS API"]
HASS_WS["HASS WebSocket"]
Devices["Smart Devices"]
end
Client --> MCP
MCP --> HA
HA --> Devices
style Speech fill:#f9f,stroke:#333,stroke-width:2px
style STT_Options fill:#bbf,stroke:#333,stroke-width:1px
```
## Prerequisites 📋
- 🚀 [Bun runtime](https://bun.sh) (v1.0.26+)
- 🏡 [Home Assistant](https://www.home-assistant.io/) instance
- 🐳 Docker (optional, recommended for deployment)
- 🖥️ Node.js 18+ (optional, for speech features)
- 🎮 NVIDIA GPU with CUDA support (optional, for faster speech processing)
## Why Bun? 🚀
I chose Bun as the runtime for several key benefits:
-**Blazing Fast Performance**
- Up to 4x faster than Node.js
- Built-in TypeScript support
- Optimized file system operations
- 🎯 **All-in-One Solution**
- Package manager (faster than npm/yarn)
- Bundler (no webpack needed)
- Test runner (built-in testing)
- TypeScript transpiler
- 🔋 **Built-in Features**
- SQLite3 driver
- .env file loading
- WebSocket client/server
- File watcher
- Test runner
## Getting Started 🚀
Check out the [Quick Start Guide](getting-started/quick-start.md) to begin your journey with Home Assistant MCP!
## Key Features
### 🎮 Device Control
- Basic REST API for device management
- WebSocket and Server-Sent Events (SSE) for real-time updates
- Simple automation rule support
### 🛡️ Security & Performance
- JWT authentication
- Basic request validation
- Lightweight server design
## Documentation Structure
### Getting Started
- [Installation Guide](getting-started/installation.md) - Set up MCP Server
- [Quick Start Tutorial](getting-started/quickstart.md) - Basic usage examples
### Core Documentation
- [API Documentation](api/index.md) - API reference
- [Architecture Overview](architecture.md) - System design
- [Contributing Guidelines](contributing.md) - How to contribute
- [Troubleshooting Guide](troubleshooting.md) - Common issues
## Support
Need help or want to report issues?
- [GitHub Issues](https://github.com/jango-blockchained/homeassistant-mcp/issues)
- [GitHub Discussions](https://github.com/jango-blockchained/homeassistant-mcp/discussions)
## License
This project is licensed under the MIT License. See the [LICENSE](https://github.com/jango-blockchained/homeassistant-mcp/blob/main/LICENSE) file for details.

View File

@@ -1,62 +0,0 @@
// Dark mode handling
document.addEventListener('DOMContentLoaded', function () {
// Check for saved dark mode preference
const darkMode = localStorage.getItem('darkMode');
if (darkMode === 'true') {
document.body.classList.add('dark-mode');
}
});
// Smooth scrolling for anchor links
document.querySelectorAll('a[href^="#"]').forEach(anchor => {
anchor.addEventListener('click', function (e) {
e.preventDefault();
document.querySelector(this.getAttribute('href')).scrollIntoView({
behavior: 'smooth'
});
});
});
// Add copy button to code blocks
document.querySelectorAll('pre code').forEach((block) => {
const button = document.createElement('button');
button.className = 'copy-button';
button.textContent = 'Copy';
button.addEventListener('click', async () => {
await navigator.clipboard.writeText(block.textContent);
button.textContent = 'Copied!';
setTimeout(() => {
button.textContent = 'Copy';
}, 2000);
});
const pre = block.parentNode;
pre.insertBefore(button, block);
});
// Add version selector handling
const versionSelector = document.querySelector('.version-selector');
if (versionSelector) {
versionSelector.addEventListener('change', (e) => {
const version = e.target.value;
window.location.href = `/${version}/`;
});
}
// Add feedback handling
document.querySelectorAll('.feedback-button').forEach(button => {
button.addEventListener('click', function () {
const feedback = this.getAttribute('data-feedback');
// Send feedback to analytics
if (typeof gtag !== 'undefined') {
gtag('event', 'feedback', {
'event_category': 'Documentation',
'event_label': feedback
});
}
// Show thank you message
this.textContent = 'Thank you!';
this.disabled = true;
});
});

View File

@@ -1,12 +0,0 @@
window.MathJax = {
tex: {
inlineMath: [["\\(", "\\)"]],
displayMath: [["\\[", "\\]"]],
processEscapes: true,
processEnvironments: true
},
options: {
ignoreHtmlClass: ".*|",
processHtmlClass: "arithmatex"
}
};

View File

@@ -1,196 +0,0 @@
# Natural Language Processing Guide 🤖
## Overview
My MCP Server includes powerful Natural Language Processing (NLP) capabilities powered by various AI models. This enables intelligent automation analysis, natural language control, and context-aware interactions with your Home Assistant setup.
## Available Models 🎯
### OpenAI Models
- **GPT-4**
- Best for complex automation analysis
- Natural language understanding
- Context window: 8k-32k tokens
- Recommended for: Automation analysis, complex queries
- **GPT-3.5-Turbo**
- Faster response times
- More cost-effective
- Context window: 4k tokens
- Recommended for: Quick commands, basic analysis
### Claude Models
- **Claude 2**
- Excellent code analysis
- Large context window (100k tokens)
- Strong system understanding
- Recommended for: Deep automation analysis
### DeepSeek Models
- **DeepSeek-Coder**
- Specialized in code understanding
- Efficient for automation rules
- Context window: 8k tokens
- Recommended for: Code generation, rule analysis
## Configuration ⚙️
```bash
# AI Model Configuration
PROCESSOR_TYPE=openai # openai, claude, or deepseek
OPENAI_MODEL=gpt-3.5-turbo # or gpt-4, gpt-4-32k
OPENAI_API_KEY=your_key_here
# Optional: DeepSeek Configuration
DEEPSEEK_API_KEY=your_key_here
DEEPSEEK_BASE_URL=https://api.deepseek.com/v1
# Analysis Settings
ANALYSIS_TIMEOUT=30000 # Timeout in milliseconds
MAX_RETRIES=3 # Number of retries on failure
```
## Usage Examples 💡
### 1. Automation Analysis
```bash
# Analyze an automation rule
bun run analyze-automation path/to/automation.yaml
# Example output:
# "This automation triggers on motion detection and turns on lights.
# Potential issues:
# - No timeout for light turn-off
# - Missing condition for ambient light level"
```
### 2. Natural Language Commands
```typescript
// Send a natural language command
const response = await fetch('http://localhost:3000/api/nlp/command', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify({
command: "Turn on the living room lights and set them to warm white"
})
});
```
### 3. Context-Aware Queries
```typescript
// Query with context
const response = await fetch('http://localhost:3000/api/nlp/query', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify({
query: "What's the temperature trend in the bedroom?",
context: {
timeframe: "last_24h",
include_humidity: true
}
})
});
```
## Custom Prompts 📝
You can customize the AI's behavior by creating custom prompts. See [Custom Prompts Guide](prompts.md) for details.
Example custom prompt:
```yaml
name: energy_analysis
description: Analyze home energy usage patterns
prompt: |
Analyze the following energy usage data and provide:
1. Peak usage patterns
2. Potential optimizations
3. Comparison with typical usage
4. Cost-saving recommendations
Context: {context}
Data: {data}
```
## Best Practices 🎯
1. **Model Selection**
- Use GPT-3.5-Turbo for quick queries
- Use GPT-4 for complex analysis
- Use Claude for large context analysis
- Use DeepSeek for code-heavy tasks
2. **Performance Optimization**
- Cache frequent queries
- Use streaming for long responses
- Implement retry logic for API calls
3. **Cost Management**
- Monitor API usage
- Implement rate limiting
- Cache responses where appropriate
4. **Error Handling**
- Implement fallback models
- Handle API timeouts gracefully
- Log failed queries for analysis
## Advanced Features 🚀
### 1. Chain of Thought Analysis
```typescript
const result = await analyzeWithCoT({
query: "Optimize my morning routine automation",
steps: ["Parse current automation", "Analyze patterns", "Suggest improvements"]
});
```
### 2. Multi-Model Analysis
```typescript
const results = await analyzeWithMultiModel({
query: "Security system optimization",
models: ["gpt-4", "claude-2"],
compareResults: true
});
```
### 3. Contextual Memory
```typescript
const memory = new ContextualMemory({
timeframe: "24h",
maxItems: 100
});
await memory.add("User typically arrives home at 17:30");
```
## Troubleshooting 🔧
### Common Issues
1. **Slow Response Times**
- Check model selection
- Verify API rate limits
- Consider caching
2. **Poor Analysis Quality**
- Review prompt design
- Check context window limits
- Consider using a more capable model
3. **API Errors**
- Verify API keys
- Check network connectivity
- Review rate limits
## API Reference 📚
See [API Documentation](api.md) for detailed endpoint specifications.

View File

@@ -1,263 +0,0 @@
# Custom Prompts Guide 🎯
## Overview
Custom prompts allow you to tailor the AI's behavior to your specific needs. I've designed this system to be flexible and powerful, enabling everything from simple commands to complex automation analysis.
## Prompt Structure 📝
Custom prompts are defined in YAML format:
```yaml
name: prompt_name
description: Brief description of what this prompt does
version: 1.0
author: your_name
tags: [automation, analysis, security]
models: [gpt-4, claude-2] # Compatible models
prompt: |
Your detailed prompt text here.
You can use {variables} for dynamic content.
Context: {context}
Data: {data}
variables:
- name: context
type: object
description: Contextual information
required: true
- name: data
type: array
description: Data to analyze
required: true
```
## Prompt Types 🎨
### 1. Analysis Prompts
```yaml
name: automation_analysis
description: Analyze Home Assistant automations
prompt: |
Analyze the following Home Assistant automation:
{automation_yaml}
Provide:
1. Security implications
2. Performance considerations
3. Potential improvements
4. Error handling suggestions
```
### 2. Command Prompts
```yaml
name: natural_command
description: Process natural language commands
prompt: |
Convert the following natural language command into Home Assistant actions:
"{command}"
Available devices: {devices}
Current state: {state}
```
### 3. Query Prompts
```yaml
name: state_query
description: Answer questions about system state
prompt: |
Answer the following question about the system state:
"{question}"
Current states:
{states}
Historical data:
{history}
```
## Variables and Context 🔄
### Built-in Variables
- `{timestamp}` - Current time
- `{user}` - Current user
- `{device_states}` - All device states
- `{last_events}` - Recent events
- `{system_info}` - System information
### Custom Variables
```yaml
variables:
- name: temperature_threshold
type: number
default: 25
description: Temperature threshold for alerts
- name: devices
type: array
required: true
description: List of relevant devices
```
## Creating Custom Prompts 🛠️
1. Create a new file in `prompts/custom/`:
```bash
bun run create-prompt my_prompt
```
2. Edit the generated template:
```yaml
name: my_custom_prompt
description: My custom prompt for specific tasks
version: 1.0
author: your_name
prompt: |
Your prompt text here
```
3. Test your prompt:
```bash
bun run test-prompt my_custom_prompt
```
## Advanced Features 🚀
### 1. Prompt Chaining
```yaml
name: complex_analysis
chain:
- automation_analysis
- security_check
- optimization_suggestions
```
### 2. Conditional Prompts
```yaml
name: adaptive_response
conditions:
- if: "temperature > 25"
use: high_temp_prompt
- if: "temperature < 10"
use: low_temp_prompt
- else: normal_temp_prompt
```
### 3. Dynamic Templates
```yaml
name: dynamic_template
template: |
{% if time.hour < 12 %}
Good morning! Here's the morning analysis:
{% else %}
Good evening! Here's the evening analysis:
{% endif %}
{analysis_content}
```
## Best Practices 🎯
1. **Prompt Design**
- Be specific and clear
- Include examples
- Use consistent formatting
- Consider edge cases
2. **Variable Usage**
- Define clear variable types
- Provide defaults when possible
- Document requirements
- Validate inputs
3. **Performance**
- Keep prompts concise
- Use appropriate models
- Cache when possible
- Consider token limits
4. **Maintenance**
- Version your prompts
- Document changes
- Test thoroughly
- Share improvements
## Examples 📚
### Home Security Analysis
```yaml
name: security_analysis
description: Analyze home security status
prompt: |
Analyze the current security status:
Doors: {door_states}
Windows: {window_states}
Cameras: {camera_states}
Motion Sensors: {motion_states}
Recent Events:
{recent_events}
Provide:
1. Current security status
2. Potential vulnerabilities
3. Recommended actions
4. Automation suggestions
```
### Energy Optimization
```yaml
name: energy_optimization
description: Analyze and optimize energy usage
prompt: |
Review energy consumption patterns:
Usage Data: {energy_data}
Device States: {device_states}
Weather: {weather_data}
Provide:
1. Usage patterns
2. Inefficiencies
3. Optimization suggestions
4. Estimated savings
```
## Troubleshooting 🔧
### Common Issues
1. **Prompt Not Working**
- Verify YAML syntax
- Check variable definitions
- Validate model compatibility
- Review token limits
2. **Poor Results**
- Improve prompt specificity
- Add more context
- Try different models
- Include examples
3. **Performance Issues**
- Optimize prompt length
- Review caching strategy
- Check rate limits
- Monitor token usage
## API Integration 🔌
```typescript
// Load a custom prompt
const prompt = await loadPrompt('my_custom_prompt');
// Execute with variables
const result = await executePrompt(prompt, {
context: currentContext,
data: analysisData
});
```
See [API Documentation](api.md) for more details.

View File

@@ -1,42 +0,0 @@
# Core
mkdocs>=1.5.3
mkdocs-material>=9.5.3
# Enhanced Functionality
mkdocs-minify-plugin>=0.7.1
mkdocs-git-revision-date-localized-plugin>=1.2.1
mkdocs-glightbox>=0.3.4
mkdocs-git-authors-plugin>=0.7.2
mkdocs-git-committers-plugin>=0.2.3
mkdocs-static-i18n>=1.2.0
mkdocs-awesome-pages-plugin>=2.9.2
mkdocs-redirects>=1.2.1
mkdocs-include-markdown-plugin>=6.0.4
mkdocs-macros-plugin>=1.0.4
mkdocs-meta-descriptions-plugin>=3.0.0
mkdocs-print-site-plugin>=2.3.6
# Code Documentation
mkdocstrings>=0.24.0
mkdocstrings-python>=1.7.5
# Markdown Extensions
pymdown-extensions>=10.5
markdown>=3.5.1
mdx_truly_sane_lists>=1.3
pygments>=2.17.2
# Math Support
python-markdown-math>=0.8
# Diagrams
plantuml-markdown>=3.9.2
mkdocs-mermaid2-plugin>=1.1.1
# Search Enhancements
mkdocs-material[imaging]>=9.5.3
pillow>=10.2.0
cairosvg>=2.7.1
# Development Tools
mike>=2.0.0 # For version management

View File

@@ -1,52 +0,0 @@
# Roadmap for MCP Server
The following roadmap outlines our planned enhancements and future directions for the Home Assistant MCP Server. This document is a living guide that will be updated as new features are developed.
## Near-Term Goals
- **Core Functionality Improvements:**
- Enhance REST API capabilities
- Improve WebSocket and SSE reliability
- Develop more robust error handling
- **Security Enhancements:**
- Strengthen JWT authentication
- Improve input validation
- Add basic logging for security events
- **Performance Optimizations:**
- Optimize server response times
- Improve resource utilization
- Implement basic caching mechanisms
## Mid-Term Goals
- **Device Integration:**
- Expand support for additional Home Assistant device types
- Improve device state synchronization
- Develop more flexible automation rule support
- **Developer Experience:**
- Improve documentation
- Create more comprehensive examples
- Develop basic CLI tools for configuration
## Long-Term Vision
- **Extensibility:**
- Design a simple plugin system
- Create guidelines for community contributions
- Establish a clear extension mechanism
- **Reliability:**
- Implement comprehensive testing
- Develop monitoring and basic health check features
- Improve overall system stability
## How to Follow the Roadmap
- **Community Involvement:** We welcome feedback and contributions.
- **Transparency:** Check our GitHub repository for ongoing discussions.
- **Iterative Development:** Goals may change based on community needs and technical feasibility.
*This roadmap is intended as a guide and may evolve based on community needs, technological advancements, and strategic priorities.*

View File

@@ -1,146 +0,0 @@
# Security Guide
This document outlines security best practices and configurations for the Home Assistant MCP Server.
## Authentication
### JWT Authentication
The server uses JWT (JSON Web Tokens) for API authentication:
```http
Authorization: Bearer YOUR_JWT_TOKEN
```
### Token Configuration
```yaml
security:
jwt_secret: YOUR_SECRET_KEY
token_expiry: 24h
refresh_token_expiry: 7d
```
## Access Control
### CORS Configuration
Configure allowed origins to prevent unauthorized access:
```yaml
security:
allowed_origins:
- http://localhost:3000
- https://your-domain.com
```
### IP Filtering
Restrict access by IP address:
```yaml
security:
allowed_ips:
- 192.168.1.0/24
- 10.0.0.0/8
```
## SSL/TLS Configuration
### Enable HTTPS
```yaml
ssl:
enabled: true
cert_file: /path/to/cert.pem
key_file: /path/to/key.pem
```
### Certificate Management
1. Use Let's Encrypt for free SSL certificates
2. Regularly renew certificates
3. Monitor certificate expiration
## Rate Limiting
### Basic Rate Limiting
```yaml
rate_limit:
enabled: true
requests_per_minute: 100
burst: 20
```
### Advanced Rate Limiting
```yaml
rate_limit:
rules:
- endpoint: /api/control
requests_per_minute: 50
- endpoint: /api/state
requests_per_minute: 200
```
## Data Protection
### Sensitive Data
- Use environment variables for secrets
- Encrypt sensitive data at rest
- Implement secure backup procedures
### Logging Security
- Avoid logging sensitive information
- Rotate logs regularly
- Protect log file access
## Best Practices
1. Regular Security Updates
- Keep dependencies updated
- Monitor security advisories
- Apply patches promptly
2. Password Policies
- Enforce strong passwords
- Implement password expiration
- Use secure password storage
3. Monitoring
- Log security events
- Monitor access patterns
- Set up alerts for suspicious activity
4. Network Security
- Use VPN for remote access
- Implement network segmentation
- Configure firewalls properly
## Security Checklist
- [ ] Configure SSL/TLS
- [ ] Set up JWT authentication
- [ ] Configure CORS properly
- [ ] Enable rate limiting
- [ ] Implement IP filtering
- [ ] Secure sensitive data
- [ ] Set up monitoring
- [ ] Configure backup encryption
- [ ] Update security policies
## Incident Response
1. Detection
- Monitor security logs
- Set up intrusion detection
- Configure alerts
2. Response
- Document incident details
- Isolate affected systems
- Investigate root cause
3. Recovery
- Apply security fixes
- Restore from backups
- Update security measures
## Additional Resources
- [Security Best Practices](https://owasp.org/www-project-top-ten/)
- [JWT Security](https://jwt.io/introduction)
- [SSL Configuration](https://ssl-config.mozilla.org/)

View File

@@ -1,164 +0,0 @@
/* Modern Dark Theme Enhancements */
[data-md-color-scheme="slate"] {
--md-default-bg-color: #1a1b26;
--md-default-fg-color: #a9b1d6;
--md-default-fg-color--light: #a9b1d6;
--md-default-fg-color--lighter: #787c99;
--md-default-fg-color--lightest: #4e5173;
--md-primary-fg-color: #7aa2f7;
--md-primary-fg-color--light: #7dcfff;
--md-primary-fg-color--dark: #2ac3de;
--md-accent-fg-color: #bb9af7;
--md-accent-fg-color--transparent: #bb9af722;
--md-accent-bg-color: #1a1b26;
--md-accent-bg-color--light: #24283b;
}
/* Code Blocks */
.highlight pre {
background-color: #24283b !important;
border-radius: 6px;
padding: 1em;
margin: 1em 0;
overflow: auto;
}
.highlight code {
font-family: 'Roboto Mono', monospace;
font-size: 0.9em;
}
/* Copy Button */
.copy-button {
position: absolute;
right: 0.5em;
top: 0.5em;
padding: 0.4em 0.8em;
background-color: var(--md-accent-bg-color--light);
border: 1px solid var(--md-accent-fg-color--transparent);
border-radius: 4px;
color: var(--md-default-fg-color);
font-size: 0.8em;
cursor: pointer;
transition: all 0.2s ease;
}
.copy-button:hover {
background-color: var(--md-accent-fg-color--transparent);
border-color: var(--md-accent-fg-color);
}
/* Navigation Enhancements */
.md-nav {
font-size: 0.9rem;
}
.md-nav__link {
padding: 0.4rem 0;
transition: color 0.2s ease;
}
.md-nav__link:hover {
color: var(--md-primary-fg-color) !important;
}
/* Tabs */
.md-tabs__link {
opacity: 0.8;
transition: opacity 0.2s ease;
}
.md-tabs__link:hover {
opacity: 1;
}
.md-tabs__link--active {
opacity: 1;
}
/* Admonitions */
.md-typeset .admonition,
.md-typeset details {
border-width: 0;
border-left-width: 4px;
border-radius: 4px;
}
/* Tables */
.md-typeset table:not([class]) {
border-radius: 4px;
box-shadow: 0 2px 4px var(--md-accent-fg-color--transparent);
}
.md-typeset table:not([class]) th {
background-color: var(--md-accent-bg-color--light);
border-bottom: 2px solid var(--md-accent-fg-color--transparent);
}
/* Search */
.md-search__form {
background-color: var(--md-accent-bg-color--light);
border-radius: 4px;
}
/* Feedback Buttons */
.feedback-button {
padding: 0.5em 1em;
margin: 0 0.5em;
border-radius: 4px;
background-color: var(--md-accent-bg-color--light);
border: 1px solid var(--md-accent-fg-color--transparent);
color: var(--md-default-fg-color);
cursor: pointer;
transition: all 0.2s ease;
}
.feedback-button:hover {
background-color: var(--md-accent-fg-color--transparent);
border-color: var(--md-accent-fg-color);
}
.feedback-button:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Version Selector */
.version-selector {
padding: 0.5em;
border-radius: 4px;
background-color: var(--md-accent-bg-color--light);
border: 1px solid var(--md-accent-fg-color--transparent);
color: var(--md-default-fg-color);
}
/* Scrollbar */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: var(--md-accent-bg-color--light);
}
::-webkit-scrollbar-thumb {
background: var(--md-accent-fg-color--transparent);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--md-accent-fg-color);
}
/* Print Styles */
@media print {
.md-typeset a {
color: var(--md-default-fg-color) !important;
}
.md-content__inner {
margin: 0;
padding: 1rem;
}
}

View File

@@ -1,422 +0,0 @@
# Testing Documentation
## Quick Reference
```bash
# Most Common Commands
bun test # Run all tests
bun test --watch # Run tests in watch mode
bun test --coverage # Run tests with coverage
bun test path/to/test.ts # Run a specific test file
# Additional Options
DEBUG=true bun test # Run with debug output
bun test --pattern "auth" # Run tests matching a pattern
bun test --timeout 60000 # Run with a custom timeout
```
## Overview
This document describes the testing setup and practices used in the Home Assistant MCP project. We use Bun's test runner for both unit and integration testing, ensuring comprehensive coverage across modules.
## Test Structure
Tests are organized in two main locations:
1. **Root Level Integration Tests** (`/__tests__/`):
```
__tests__/
├── ai/ # AI/ML component tests
├── api/ # API integration tests
├── context/ # Context management tests
├── hass/ # Home Assistant integration tests
├── schemas/ # Schema validation tests
├── security/ # Security integration tests
├── tools/ # Tools and utilities tests
├── websocket/ # WebSocket integration tests
├── helpers.test.ts # Helper function tests
├── index.test.ts # Main application tests
└── server.test.ts # Server integration tests
```
2. **Component Level Unit Tests** (`src/**/`):
```
src/
├── __tests__/ # Global test setup and utilities
│ └── setup.ts # Global test configuration
├── component/
│ ├── __tests__/ # Component-specific unit tests
│ └── component.ts
```
## Test Configuration
### Bun Test Configuration (`bunfig.toml`)
```toml
[test]
preload = ["./src/__tests__/setup.ts"] # Global test setup
coverage = true # Enable coverage by default
timeout = 30000 # Test timeout in milliseconds
testMatch = ["**/__tests__/**/*.test.ts"] # Test file patterns
```
### Bun Scripts
Available test commands in `package.json`:
```bash
# Run all tests
bun test
# Watch mode for development
bun test --watch
# Generate coverage report
bun test --coverage
# Run linting
bun run lint
# Format code
bun run format
```
## Test Setup
### Global Configuration
A global test setup file (`src/__tests__/setup.ts`) provides:
- Environment configuration
- Mock utilities
- Test helper functions
- Global lifecycle hooks
### Test Environment
- Environment variables are loaded from `.env.test`.
- Console output is minimized unless `DEBUG=true`.
- JWT secrets and tokens are preconfigured for testing.
- Rate limiting and security features are initialized appropriately.
## Running Tests
```bash
# Basic test run
bun test
# Run tests with coverage
bun test --coverage
# Run a specific test file
bun test path/to/test.test.ts
# Run tests in watch mode
bun test --watch
# Run tests with debug output
DEBUG=true bun test
# Run tests with increased timeout
bun test --timeout 60000
# Run tests matching a pattern
bun test --pattern "auth"
```
## Advanced Debugging
### Using Node Inspector
```bash
# Start tests with inspector
bun test --inspect
# Start tests with inspector and break on first line
bun test --inspect-brk
```
### Using VS Code
Create a launch configuration in `.vscode/launch.json`:
```json
{
"version": "0.2.0",
"configurations": [
{
"type": "bun",
"request": "launch",
"name": "Debug Tests",
"program": "${workspaceFolder}/node_modules/bun/bin/bun",
"args": ["test", "${file}"],
"cwd": "${workspaceFolder}",
"env": { "DEBUG": "true" }
}
]
}
```
### Test Isolation
To run a single test in isolation:
```typescript
describe.only("specific test suite", () => {
it.only("specific test case", () => {
// Only this test will run
});
});
```
## Writing Tests
### Test File Naming
- Place test files in a `__tests__` directory adjacent to the code being tested.
- Name files with the pattern `*.test.ts`.
- Mirror the structure of the source code in your test organization.
### Example Test Structure
```typescript
describe("Security Features", () => {
it("should validate tokens correctly", () => {
const payload = { userId: "123", role: "user" };
const token = jwt.sign(payload, validSecret, { expiresIn: "1h" });
const result = TokenManager.validateToken(token, testIp);
expect(result.valid).toBe(true);
});
});
```
## Coverage
The project maintains strict coverage:
- Overall coverage: at least 80%
- Critical paths: 90%+
- New features: ≥85% coverage
Generate a coverage report with:
```bash
bun test --coverage
```
## Security Middleware Testing
### Utility Function Testing
The security middleware now uses a utility-first approach, which allows for more granular and comprehensive testing. Each security function is now independently testable, improving code reliability and maintainability.
#### Key Utility Functions
1. **Rate Limiting (`checkRateLimit`)**
- Tests multiple scenarios:
- Requests under threshold
- Requests exceeding threshold
- Rate limit reset after window expiration
```typescript
// Example test
it('should throw when requests exceed threshold', () => {
const ip = '127.0.0.2';
for (let i = 0; i < 11; i++) {
if (i < 10) {
expect(() => checkRateLimit(ip, 10)).not.toThrow();
} else {
expect(() => checkRateLimit(ip, 10)).toThrow('Too many requests from this IP');
}
}
});
```
2. **Request Validation (`validateRequestHeaders`)**
- Tests content type validation
- Checks request size limits
- Validates authorization headers
```typescript
it('should reject invalid content type', () => {
const mockRequest = new Request('http://localhost', {
method: 'POST',
headers: { 'content-type': 'text/plain' }
});
expect(() => validateRequestHeaders(mockRequest)).toThrow('Content-Type must be application/json');
});
```
3. **Input Sanitization (`sanitizeValue`)**
- Sanitizes HTML tags
- Handles nested objects
- Preserves non-string values
```typescript
it('should sanitize HTML tags', () => {
const input = '<script>alert("xss")</script>Hello';
const sanitized = sanitizeValue(input);
expect(sanitized).toBe('&lt;script&gt;alert(&quot;xss&quot;)&lt;/script&gt;Hello');
});
```
4. **Security Headers (`applySecurityHeaders`)**
- Verifies correct security header application
- Checks CSP, frame options, and other security headers
```typescript
it('should apply security headers', () => {
const mockRequest = new Request('http://localhost');
const headers = applySecurityHeaders(mockRequest);
expect(headers['content-security-policy']).toBeDefined();
expect(headers['x-frame-options']).toBeDefined();
});
```
5. **Error Handling (`handleError`)**
- Tests error responses in production and development modes
- Verifies error message and stack trace inclusion
```typescript
it('should include error details in development mode', () => {
const error = new Error('Test error');
const result = handleError(error, 'development');
expect(result).toEqual({
error: true,
message: 'Internal server error',
error: 'Test error',
stack: expect.any(String)
});
});
```
### Testing Philosophy
- **Isolation**: Each utility function is tested independently
- **Comprehensive Coverage**: Multiple scenarios for each function
- **Predictable Behavior**: Clear expectations for input and output
- **Error Handling**: Robust testing of error conditions
### Best Practices
1. Use minimal, focused test cases
2. Test both successful and failure scenarios
3. Verify input sanitization and security measures
4. Mock external dependencies when necessary
### Running Security Tests
```bash
# Run all tests
bun test
# Run specific security tests
bun test __tests__/security/
```
### Continuous Improvement
- Regularly update test cases
- Add new test scenarios as security requirements evolve
- Perform periodic security audits
## Best Practices
1. **Isolation**: Each test should be independent and not rely on the state of other tests.
2. **Mocking**: Use the provided mock utilities for external dependencies.
3. **Cleanup**: Clean up any resources or state modifications in `afterEach` or `afterAll` hooks.
4. **Descriptive Names**: Use clear, descriptive test names that explain the expected behavior.
5. **Assertions**: Make specific, meaningful assertions rather than general ones.
6. **Setup**: Use `beforeEach` for common test setup to avoid repetition.
7. **Error Cases**: Test both success and error cases for complete coverage.
## Coverage
The project aims for high test coverage, particularly focusing on:
- Security-critical code paths
- API endpoints
- Data validation
- Error handling
- Event broadcasting
Run coverage reports using:
```bash
bun test --coverage
```
## Debugging Tests
To debug tests:
1. Set `DEBUG=true` to enable console output during tests
2. Use the `--watch` flag for development
3. Add `console.log()` statements (they're only shown when DEBUG is true)
4. Use the test utilities' debugging helpers
### Advanced Debugging
1. **Using Node Inspector**:
```bash
# Start tests with inspector
bun test --inspect
# Start tests with inspector and break on first line
bun test --inspect-brk
```
2. **Using VS Code**:
```jsonc
// .vscode/launch.json
{
"version": "0.2.0",
"configurations": [
{
"type": "bun",
"request": "launch",
"name": "Debug Tests",
"program": "${workspaceFolder}/node_modules/bun/bin/bun",
"args": ["test", "${file}"],
"cwd": "${workspaceFolder}",
"env": { "DEBUG": "true" }
}
]
}
```
3. **Test Isolation**:
To run a single test in isolation:
```typescript
describe.only("specific test suite", () => {
it.only("specific test case", () => {
// Only this test will run
});
});
```
## Contributing
When contributing new code:
1. Add tests for new features
2. Ensure existing tests pass
3. Maintain or improve coverage
4. Follow the existing test patterns and naming conventions
5. Document any new test utilities or patterns
## Coverage Requirements
The project maintains strict coverage requirements:
- Minimum overall coverage: 80%
- Critical paths (security, API, data validation): 90%
- New features must include tests with >= 85% coverage
Coverage reports are generated in multiple formats:
- Console summary
- HTML report (./coverage/index.html)
- LCOV report (./coverage/lcov.info)
To view detailed coverage:
```bash
# Generate and open coverage report
bun test --coverage && open coverage/index.html
```

View File

@@ -1,240 +0,0 @@
# Add-on Management Tool
The Add-on Management tool provides functionality to manage Home Assistant add-ons through the MCP interface.
## Features
- List available add-ons
- Install/uninstall add-ons
- Start/stop/restart add-ons
- Get add-on information
- Update add-ons
- Configure add-ons
- View add-on logs
- Monitor add-on status
## Usage
### REST API
```typescript
GET /api/addons
GET /api/addons/{addon_slug}
POST /api/addons/{addon_slug}/install
POST /api/addons/{addon_slug}/uninstall
POST /api/addons/{addon_slug}/start
POST /api/addons/{addon_slug}/stop
POST /api/addons/{addon_slug}/restart
GET /api/addons/{addon_slug}/logs
PUT /api/addons/{addon_slug}/config
GET /api/addons/{addon_slug}/stats
```
### WebSocket
```typescript
// List add-ons
{
"type": "get_addons"
}
// Get add-on info
{
"type": "get_addon_info",
"addon_slug": "required_addon_slug"
}
// Install add-on
{
"type": "install_addon",
"addon_slug": "required_addon_slug",
"version": "optional_version"
}
// Control add-on
{
"type": "control_addon",
"addon_slug": "required_addon_slug",
"action": "start|stop|restart"
}
```
## Examples
### List All Add-ons
```typescript
const response = await fetch('http://your-ha-mcp/api/addons', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const addons = await response.json();
```
### Install Add-on
```typescript
const response = await fetch('http://your-ha-mcp/api/addons/mosquitto/install', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"version": "latest"
})
});
```
### Configure Add-on
```typescript
const response = await fetch('http://your-ha-mcp/api/addons/mosquitto/config', {
method: 'PUT',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"logins": [
{
"username": "mqtt_user",
"password": "mqtt_password"
}
],
"customize": {
"active": true,
"folder": "mosquitto"
}
})
});
```
## Response Format
### Add-on List Response
```json
{
"success": true,
"data": {
"addons": [
{
"slug": "addon_slug",
"name": "Add-on Name",
"version": "1.0.0",
"state": "started",
"repository": "core",
"installed": true,
"update_available": false
}
]
}
}
```
### Add-on Info Response
```json
{
"success": true,
"data": {
"addon": {
"slug": "addon_slug",
"name": "Add-on Name",
"version": "1.0.0",
"description": "Add-on description",
"long_description": "Detailed description",
"repository": "core",
"installed": true,
"state": "started",
"webui": "http://[HOST]:[PORT:80]",
"boot": "auto",
"options": {
// Add-on specific options
},
"schema": {
// Add-on options schema
},
"ports": {
"80/tcp": 8080
},
"ingress": true,
"ingress_port": 8099
}
}
}
```
### Add-on Stats Response
```json
{
"success": true,
"data": {
"stats": {
"cpu_percent": 2.5,
"memory_usage": 128974848,
"memory_limit": 536870912,
"network_rx": 1234,
"network_tx": 5678,
"blk_read": 12345,
"blk_write": 67890
}
}
}
```
## Error Handling
### Common Error Codes
- `404`: Add-on not found
- `401`: Unauthorized
- `400`: Invalid request
- `409`: Add-on operation failed
- `422`: Invalid configuration
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 50 requests per 15 minutes
- Configurable through environment variables:
- `ADDON_RATE_LIMIT`
- `ADDON_RATE_WINDOW`
## Best Practices
1. Always check add-on compatibility
2. Back up configurations before updates
3. Monitor resource usage
4. Use appropriate update strategies
5. Implement proper error handling
6. Test configurations in safe environment
7. Handle rate limiting gracefully
8. Keep add-ons updated
## Add-on Security
- Use secure passwords
- Regularly update add-ons
- Monitor add-on logs
- Restrict network access
- Use SSL/TLS when available
- Follow principle of least privilege
## See Also
- [Package Management](package.md)
- [Device Control](../device-management/control.md)
- [Event Subscription](../events/subscribe-events.md)

View File

@@ -1,236 +0,0 @@
# Package Management Tool
The Package Management tool provides functionality to manage Home Assistant Community Store (HACS) packages through the MCP interface.
## Features
- List available packages
- Install/update/remove packages
- Search packages
- Get package information
- Manage package repositories
- Track package updates
- View package documentation
- Monitor package status
## Usage
### REST API
```typescript
GET /api/packages
GET /api/packages/{package_id}
POST /api/packages/{package_id}/install
POST /api/packages/{package_id}/uninstall
POST /api/packages/{package_id}/update
GET /api/packages/search
GET /api/packages/categories
GET /api/packages/repositories
```
### WebSocket
```typescript
// List packages
{
"type": "get_packages",
"category": "optional_category"
}
// Search packages
{
"type": "search_packages",
"query": "search_query",
"category": "optional_category"
}
// Install package
{
"type": "install_package",
"package_id": "required_package_id",
"version": "optional_version"
}
```
## Package Categories
- Integrations
- Frontend
- Themes
- AppDaemon Apps
- NetDaemon Apps
- Python Scripts
- Plugins
## Examples
### List All Packages
```typescript
const response = await fetch('http://your-ha-mcp/api/packages', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const packages = await response.json();
```
### Search Packages
```typescript
const response = await fetch('http://your-ha-mcp/api/packages/search?q=weather&category=integrations', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const searchResults = await response.json();
```
### Install Package
```typescript
const response = await fetch('http://your-ha-mcp/api/packages/custom-weather-card/install', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"version": "latest"
})
});
```
## Response Format
### Package List Response
```json
{
"success": true,
"data": {
"packages": [
{
"id": "package_id",
"name": "Package Name",
"category": "integrations",
"description": "Package description",
"version": "1.0.0",
"installed": true,
"update_available": false,
"stars": 150,
"downloads": 10000
}
]
}
}
```
### Package Info Response
```json
{
"success": true,
"data": {
"package": {
"id": "package_id",
"name": "Package Name",
"category": "integrations",
"description": "Package description",
"long_description": "Detailed description",
"version": "1.0.0",
"installed_version": "0.9.0",
"available_version": "1.0.0",
"installed": true,
"update_available": true,
"stars": 150,
"downloads": 10000,
"repository": "https://github.com/author/repo",
"author": {
"name": "Author Name",
"url": "https://github.com/author"
},
"documentation": "https://github.com/author/repo/wiki",
"dependencies": [
"dependency1",
"dependency2"
]
}
}
}
```
### Search Response
```json
{
"success": true,
"data": {
"results": [
{
"id": "package_id",
"name": "Package Name",
"category": "integrations",
"description": "Package description",
"version": "1.0.0",
"score": 0.95
}
],
"total": 42
}
}
```
## Error Handling
### Common Error Codes
- `404`: Package not found
- `401`: Unauthorized
- `400`: Invalid request
- `409`: Package operation failed
- `422`: Invalid configuration
- `424`: Dependency error
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 50 requests per 15 minutes
- Configurable through environment variables:
- `PACKAGE_RATE_LIMIT`
- `PACKAGE_RATE_WINDOW`
## Best Practices
1. Check package compatibility
2. Review package documentation
3. Verify package dependencies
4. Back up before updates
5. Test in safe environment
6. Monitor resource usage
7. Keep packages updated
8. Handle rate limiting gracefully
## Package Security
- Verify package sources
- Review package permissions
- Check package reputation
- Monitor package activity
- Keep dependencies updated
- Follow security advisories
## See Also
- [Add-on Management](addon.md)
- [Device Control](../device-management/control.md)
- [Event Subscription](../events/subscribe-events.md)

View File

@@ -1,321 +0,0 @@
# Automation Configuration Tool
The Automation Configuration tool provides functionality to create, update, and manage Home Assistant automation configurations.
## Features
- Create new automations
- Update existing automations
- Delete automations
- Duplicate automations
- Import/Export automation configurations
- Validate automation configurations
## Usage
### REST API
```typescript
POST /api/automations
PUT /api/automations/{automation_id}
DELETE /api/automations/{automation_id}
POST /api/automations/{automation_id}/duplicate
POST /api/automations/validate
```
### WebSocket
```typescript
// Create automation
{
"type": "create_automation",
"automation": {
// Automation configuration
}
}
// Update automation
{
"type": "update_automation",
"automation_id": "required_automation_id",
"automation": {
// Updated configuration
}
}
// Delete automation
{
"type": "delete_automation",
"automation_id": "required_automation_id"
}
```
## Automation Configuration
### Basic Structure
```json
{
"id": "morning_routine",
"alias": "Morning Routine",
"description": "Turn on lights and adjust temperature in the morning",
"trigger": [
{
"platform": "time",
"at": "07:00:00"
}
],
"condition": [
{
"condition": "time",
"weekday": ["mon", "tue", "wed", "thu", "fri"]
}
],
"action": [
{
"service": "light.turn_on",
"target": {
"entity_id": "light.bedroom"
},
"data": {
"brightness": 255,
"transition": 300
}
}
],
"mode": "single"
}
```
### Trigger Types
```json
// Time-based trigger
{
"platform": "time",
"at": "07:00:00"
}
// State-based trigger
{
"platform": "state",
"entity_id": "binary_sensor.motion",
"to": "on"
}
// Event-based trigger
{
"platform": "event",
"event_type": "custom_event"
}
// Numeric state trigger
{
"platform": "numeric_state",
"entity_id": "sensor.temperature",
"above": 25
}
```
### Condition Types
```json
// Time condition
{
"condition": "time",
"after": "07:00:00",
"before": "22:00:00"
}
// State condition
{
"condition": "state",
"entity_id": "device_tracker.phone",
"state": "home"
}
// Numeric state condition
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 25
}
```
### Action Types
```json
// Service call action
{
"service": "light.turn_on",
"target": {
"entity_id": "light.bedroom"
}
}
// Delay action
{
"delay": "00:00:30"
}
// Scene activation
{
"scene": "scene.evening_mode"
}
// Conditional action
{
"choose": [
{
"conditions": [
{
"condition": "state",
"entity_id": "sun.sun",
"state": "below_horizon"
}
],
"sequence": [
{
"service": "light.turn_on",
"target": {
"entity_id": "light.living_room"
}
}
]
}
]
}
```
## Examples
### Create New Automation
```typescript
const response = await fetch('http://your-ha-mcp/api/automations', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"alias": "Morning Routine",
"description": "Turn on lights in the morning",
"trigger": [
{
"platform": "time",
"at": "07:00:00"
}
],
"action": [
{
"service": "light.turn_on",
"target": {
"entity_id": "light.bedroom"
}
}
]
})
});
```
### Update Existing Automation
```typescript
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine', {
method: 'PUT',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"alias": "Morning Routine",
"trigger": [
{
"platform": "time",
"at": "07:30:00" // Updated time
}
],
"action": [
{
"service": "light.turn_on",
"target": {
"entity_id": "light.bedroom"
}
}
]
})
});
```
## Response Format
### Success Response
```json
{
"success": true,
"data": {
"automation": {
"id": "created_automation_id",
// Full automation configuration
}
}
}
```
### Validation Response
```json
{
"success": true,
"data": {
"valid": true,
"warnings": [
"No conditions specified"
]
}
}
```
## Error Handling
### Common Error Codes
- `404`: Automation not found
- `401`: Unauthorized
- `400`: Invalid configuration
- `409`: Automation creation/update failed
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE",
"validation_errors": [
{
"path": "trigger[0].platform",
"message": "Invalid trigger platform"
}
]
}
```
## Best Practices
1. Always validate configurations before saving
2. Use descriptive aliases and descriptions
3. Group related automations
4. Test automations in a safe environment
5. Document automation dependencies
6. Use variables for reusable values
7. Implement proper error handling
8. Consider automation modes carefully
## See Also
- [Automation Management](automation.md)
- [Event Subscription](../events/subscribe-events.md)
- [Scene Management](../history-state/scene.md)

View File

@@ -1,211 +0,0 @@
# Automation Management Tool
The Automation Management tool provides functionality to manage and control Home Assistant automations.
## Features
- List all automations
- Get automation details
- Toggle automation state (enable/disable)
- Trigger automations manually
- Monitor automation execution
- View automation history
## Usage
### REST API
```typescript
GET /api/automations
GET /api/automations/{automation_id}
POST /api/automations/{automation_id}/toggle
POST /api/automations/{automation_id}/trigger
GET /api/automations/{automation_id}/history
```
### WebSocket
```typescript
// List automations
{
"type": "get_automations"
}
// Toggle automation
{
"type": "toggle_automation",
"automation_id": "required_automation_id"
}
// Trigger automation
{
"type": "trigger_automation",
"automation_id": "required_automation_id",
"variables": {
// Optional variables
}
}
```
## Examples
### List All Automations
```typescript
const response = await fetch('http://your-ha-mcp/api/automations', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const automations = await response.json();
```
### Toggle Automation State
```typescript
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine/toggle', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token'
}
});
```
### Trigger Automation Manually
```typescript
const response = await fetch('http://your-ha-mcp/api/automations/morning_routine/trigger', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"variables": {
"brightness": 100,
"temperature": 22
}
})
});
```
## Response Format
### Automation List Response
```json
{
"success": true,
"data": {
"automations": [
{
"id": "automation_id",
"name": "Automation Name",
"enabled": true,
"last_triggered": "2024-02-05T12:00:00Z",
"trigger_count": 42
}
]
}
}
```
### Automation Details Response
```json
{
"success": true,
"data": {
"automation": {
"id": "automation_id",
"name": "Automation Name",
"enabled": true,
"triggers": [
{
"platform": "time",
"at": "07:00:00"
}
],
"conditions": [],
"actions": [
{
"service": "light.turn_on",
"target": {
"entity_id": "light.bedroom"
}
}
],
"mode": "single",
"max": 10,
"last_triggered": "2024-02-05T12:00:00Z",
"trigger_count": 42
}
}
}
```
### Automation History Response
```json
{
"success": true,
"data": {
"history": [
{
"timestamp": "2024-02-05T12:00:00Z",
"trigger": {
"platform": "time",
"at": "07:00:00"
},
"context": {
"user_id": "user_123",
"variables": {}
},
"result": "success"
}
]
}
}
```
## Error Handling
### Common Error Codes
- `404`: Automation not found
- `401`: Unauthorized
- `400`: Invalid request
- `409`: Automation execution failed
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 50 requests per 15 minutes
- Configurable through environment variables:
- `AUTOMATION_RATE_LIMIT`
- `AUTOMATION_RATE_WINDOW`
## Best Practices
1. Monitor automation execution history
2. Use descriptive automation names
3. Implement proper error handling
4. Cache automation configurations when possible
5. Handle rate limiting gracefully
6. Test automations before enabling
7. Use variables for flexible automation behavior
## See Also
- [Automation Configuration](automation-config.md)
- [Event Subscription](../events/subscribe-events.md)
- [Device Control](../device-management/control.md)

View File

@@ -1,195 +0,0 @@
# Device Control Tool
The Device Control tool provides functionality to control various types of devices in your Home Assistant instance.
## Supported Device Types
- Lights
- Switches
- Covers
- Climate devices
- Media players
- And more...
## Usage
### REST API
```typescript
POST /api/devices/{device_id}/control
```
### WebSocket
```typescript
{
"type": "control_device",
"device_id": "required_device_id",
"domain": "required_domain",
"service": "required_service",
"data": {
// Service-specific data
}
}
```
## Domain-Specific Commands
### Lights
```typescript
// Turn on/off
POST /api/devices/light/{device_id}/control
{
"service": "turn_on", // or "turn_off"
}
// Set brightness
{
"service": "turn_on",
"data": {
"brightness": 255 // 0-255
}
}
// Set color
{
"service": "turn_on",
"data": {
"rgb_color": [255, 0, 0] // Red
}
}
```
### Covers
```typescript
// Open/close
POST /api/devices/cover/{device_id}/control
{
"service": "open_cover", // or "close_cover"
}
// Set position
{
"service": "set_cover_position",
"data": {
"position": 50 // 0-100
}
}
```
### Climate
```typescript
// Set temperature
POST /api/devices/climate/{device_id}/control
{
"service": "set_temperature",
"data": {
"temperature": 22.5
}
}
// Set mode
{
"service": "set_hvac_mode",
"data": {
"hvac_mode": "heat" // heat, cool, auto, off
}
}
```
## Examples
### Control Light Brightness
```typescript
const response = await fetch('http://your-ha-mcp/api/devices/light/living_room/control', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"service": "turn_on",
"data": {
"brightness": 128
}
})
});
```
### Control Cover Position
```typescript
const response = await fetch('http://your-ha-mcp/api/devices/cover/bedroom/control', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"service": "set_cover_position",
"data": {
"position": 75
}
})
});
```
## Response Format
### Success Response
```json
{
"success": true,
"data": {
"state": "on",
"attributes": {
// Updated device attributes
}
}
}
```
### Error Response
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Error Handling
### Common Error Codes
- `404`: Device not found
- `401`: Unauthorized
- `400`: Invalid service or parameters
- `409`: Device unavailable or offline
## Rate Limiting
- Default limit: 100 requests per 15 minutes
- Configurable through environment variables:
- `DEVICE_CONTROL_RATE_LIMIT`
- `DEVICE_CONTROL_RATE_WINDOW`
## Best Practices
1. Validate device availability before sending commands
2. Implement proper error handling
3. Use appropriate retry strategies for failed commands
4. Cache device capabilities when possible
5. Handle rate limiting gracefully
## See Also
- [List Devices](list-devices.md)
- [Device History](../history-state/history.md)
- [Event Subscription](../events/subscribe-events.md)

View File

@@ -1,139 +0,0 @@
# List Devices Tool
The List Devices tool provides functionality to retrieve and manage device information from your Home Assistant instance.
## Features
- List all available Home Assistant devices
- Group devices by domain
- Get device states and attributes
- Filter devices by various criteria
## Usage
### REST API
```typescript
GET /api/devices
GET /api/devices/{domain}
GET /api/devices/{device_id}/state
```
### WebSocket
```typescript
// List all devices
{
"type": "list_devices",
"domain": "optional_domain"
}
// Get device state
{
"type": "get_device_state",
"device_id": "required_device_id"
}
```
### Examples
#### List All Devices
```typescript
const response = await fetch('http://your-ha-mcp/api/devices', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const devices = await response.json();
```
#### Get Devices by Domain
```typescript
const response = await fetch('http://your-ha-mcp/api/devices/light', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const lightDevices = await response.json();
```
## Response Format
### Device List Response
```json
{
"success": true,
"data": {
"devices": [
{
"id": "device_id",
"name": "Device Name",
"domain": "light",
"state": "on",
"attributes": {
"brightness": 255,
"color_temp": 370
}
}
]
}
}
```
### Device State Response
```json
{
"success": true,
"data": {
"state": "on",
"attributes": {
"brightness": 255,
"color_temp": 370
},
"last_changed": "2024-02-05T12:00:00Z",
"last_updated": "2024-02-05T12:00:00Z"
}
}
```
## Error Handling
### Common Error Codes
- `404`: Device not found
- `401`: Unauthorized
- `400`: Invalid request parameters
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 100 requests per 15 minutes
- Configurable through environment variables:
- `DEVICE_LIST_RATE_LIMIT`
- `DEVICE_LIST_RATE_WINDOW`
## Best Practices
1. Cache device lists when possible
2. Use domain filtering for better performance
3. Implement proper error handling
4. Handle rate limiting gracefully
## See Also
- [Device Control](control.md)
- [Device History](../history-state/history.md)
- [Event Subscription](../events/subscribe-events.md)

View File

@@ -1,251 +0,0 @@
# SSE Statistics Tool
The SSE Statistics tool provides functionality to monitor and analyze Server-Sent Events (SSE) connections and performance in your Home Assistant MCP instance.
## Features
- Monitor active SSE connections
- Track connection statistics
- Analyze event delivery
- Monitor resource usage
- Connection management
- Performance metrics
- Historical data
- Alert configuration
## Usage
### REST API
```typescript
GET /api/sse/stats
GET /api/sse/connections
GET /api/sse/connections/{connection_id}
GET /api/sse/metrics
GET /api/sse/history
```
### WebSocket
```typescript
// Get SSE stats
{
"type": "get_sse_stats"
}
// Get connection details
{
"type": "get_sse_connection",
"connection_id": "required_connection_id"
}
// Get performance metrics
{
"type": "get_sse_metrics",
"period": "1h|24h|7d|30d"
}
```
## Examples
### Get Current Statistics
```typescript
const response = await fetch('http://your-ha-mcp/api/sse/stats', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const stats = await response.json();
```
### Get Connection Details
```typescript
const response = await fetch('http://your-ha-mcp/api/sse/connections/conn_123', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const connection = await response.json();
```
### Get Performance Metrics
```typescript
const response = await fetch('http://your-ha-mcp/api/sse/metrics?period=24h', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const metrics = await response.json();
```
## Response Format
### Statistics Response
```json
{
"success": true,
"data": {
"active_connections": 42,
"total_events_sent": 12345,
"events_per_second": 5.2,
"memory_usage": 128974848,
"cpu_usage": 2.5,
"uptime": "PT24H",
"event_backlog": 0
}
}
```
### Connection Details Response
```json
{
"success": true,
"data": {
"connection": {
"id": "conn_123",
"client_id": "client_456",
"user_id": "user_789",
"connected_at": "2024-02-05T12:00:00Z",
"last_event_at": "2024-02-05T12:05:00Z",
"events_sent": 150,
"subscriptions": [
{
"event_type": "state_changed",
"entity_id": "light.living_room"
}
],
"state": "active",
"ip_address": "192.168.1.100",
"user_agent": "Mozilla/5.0 ..."
}
}
}
```
### Performance Metrics Response
```json
{
"success": true,
"data": {
"metrics": {
"connections": {
"current": 42,
"max": 100,
"average": 35.5
},
"events": {
"total": 12345,
"rate": {
"current": 5.2,
"max": 15.0,
"average": 4.8
}
},
"latency": {
"p50": 15,
"p95": 45,
"p99": 100
},
"resources": {
"memory": {
"current": 128974848,
"max": 536870912
},
"cpu": {
"current": 2.5,
"max": 10.0,
"average": 3.2
}
}
},
"period": "24h",
"timestamp": "2024-02-05T12:00:00Z"
}
}
```
## Error Handling
### Common Error Codes
- `404`: Connection not found
- `401`: Unauthorized
- `400`: Invalid request parameters
- `503`: Service overloaded
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Monitoring Metrics
### Connection Metrics
- Active connections
- Connection duration
- Connection state
- Client information
- Geographic distribution
- Protocol version
### Event Metrics
- Events per second
- Event types distribution
- Delivery success rate
- Event latency
- Queue size
- Backlog size
### Resource Metrics
- Memory usage
- CPU usage
- Network bandwidth
- Disk I/O
- Connection pool status
- Thread pool status
## Alert Thresholds
- Connection limits
- Event rate limits
- Resource usage limits
- Latency thresholds
- Error rate thresholds
- Backlog thresholds
## Best Practices
1. Monitor connection health
2. Track resource usage
3. Set up alerts
4. Analyze usage patterns
5. Optimize performance
6. Plan capacity
7. Implement failover
8. Regular maintenance
## Performance Optimization
- Connection pooling
- Event batching
- Resource throttling
- Load balancing
- Cache optimization
- Connection cleanup
## See Also
- [Event Subscription](subscribe-events.md)
- [Device Control](../device-management/control.md)
- [Automation Management](../automation/automation.md)

View File

@@ -1,253 +0,0 @@
# Event Subscription Tool
The Event Subscription tool provides functionality to subscribe to and monitor real-time events from your Home Assistant instance.
## Features
- Subscribe to Home Assistant events
- Monitor specific entities
- Domain-based monitoring
- Event filtering
- Real-time updates
- Event history
- Custom event handling
- Connection management
## Usage
### REST API
```typescript
POST /api/events/subscribe
DELETE /api/events/unsubscribe
GET /api/events/subscriptions
GET /api/events/history
```
### WebSocket
```typescript
// Subscribe to events
{
"type": "subscribe_events",
"event_type": "optional_event_type",
"entity_id": "optional_entity_id",
"domain": "optional_domain"
}
// Unsubscribe from events
{
"type": "unsubscribe_events",
"subscription_id": "required_subscription_id"
}
```
### Server-Sent Events (SSE)
```typescript
GET /api/events/stream?event_type=state_changed&entity_id=light.living_room
```
## Event Types
- `state_changed`: Entity state changes
- `automation_triggered`: Automation executions
- `scene_activated`: Scene activations
- `device_registered`: New device registrations
- `service_registered`: New service registrations
- `homeassistant_start`: System startup
- `homeassistant_stop`: System shutdown
- Custom events
## Examples
### Subscribe to All State Changes
```typescript
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"event_type": "state_changed"
})
});
```
### Monitor Specific Entity
```typescript
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"event_type": "state_changed",
"entity_id": "light.living_room"
})
});
```
### Domain-Based Monitoring
```typescript
const response = await fetch('http://your-ha-mcp/api/events/subscribe', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"event_type": "state_changed",
"domain": "light"
})
});
```
### SSE Connection Example
```typescript
const eventSource = new EventSource(
'http://your-ha-mcp/api/events/stream?event_type=state_changed&entity_id=light.living_room',
{
headers: {
'Authorization': 'Bearer your_access_token'
}
}
);
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
console.log('Event received:', data);
};
eventSource.onerror = (error) => {
console.error('SSE error:', error);
eventSource.close();
};
```
## Response Format
### Subscription Response
```json
{
"success": true,
"data": {
"subscription_id": "sub_123",
"event_type": "state_changed",
"entity_id": "light.living_room",
"created_at": "2024-02-05T12:00:00Z"
}
}
```
### Event Message Format
```json
{
"event_type": "state_changed",
"entity_id": "light.living_room",
"data": {
"old_state": {
"state": "off",
"attributes": {},
"last_changed": "2024-02-05T11:55:00Z"
},
"new_state": {
"state": "on",
"attributes": {
"brightness": 255
},
"last_changed": "2024-02-05T12:00:00Z"
}
},
"origin": "LOCAL",
"time_fired": "2024-02-05T12:00:00Z",
"context": {
"id": "context_123",
"parent_id": null,
"user_id": "user_123"
}
}
```
### Subscriptions List Response
```json
{
"success": true,
"data": {
"subscriptions": [
{
"id": "sub_123",
"event_type": "state_changed",
"entity_id": "light.living_room",
"created_at": "2024-02-05T12:00:00Z",
"last_event": "2024-02-05T12:05:00Z"
}
]
}
}
```
## Error Handling
### Common Error Codes
- `404`: Event type not found
- `401`: Unauthorized
- `400`: Invalid subscription parameters
- `409`: Subscription already exists
- `429`: Too many subscriptions
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limits:
- Maximum subscriptions: 100 per client
- Maximum event rate: 1000 events per minute
- Configurable through environment variables:
- `EVENT_SUB_MAX_SUBSCRIPTIONS`
- `EVENT_SUB_RATE_LIMIT`
- `EVENT_SUB_RATE_WINDOW`
## Best Practices
1. Use specific event types when possible
2. Implement proper error handling
3. Handle connection interruptions
4. Process events asynchronously
5. Implement backoff strategies
6. Monitor subscription health
7. Clean up unused subscriptions
8. Handle rate limiting gracefully
## Connection Management
- Implement heartbeat monitoring
- Use reconnection strategies
- Handle connection timeouts
- Monitor connection quality
- Implement fallback mechanisms
- Clean up resources properly
## See Also
- [SSE Statistics](sse-stats.md)
- [Device Control](../device-management/control.md)
- [Automation Management](../automation/automation.md)

View File

@@ -1,167 +0,0 @@
# Device History Tool
The Device History tool allows you to retrieve historical state information for devices in your Home Assistant instance.
## Features
- Fetch device state history
- Filter by time range
- Get significant changes
- Aggregate data by time periods
- Export historical data
## Usage
### REST API
```typescript
GET /api/history/{device_id}
GET /api/history/{device_id}/period/{start_time}
GET /api/history/{device_id}/period/{start_time}/{end_time}
```
### WebSocket
```typescript
{
"type": "get_history",
"device_id": "required_device_id",
"start_time": "optional_iso_timestamp",
"end_time": "optional_iso_timestamp",
"significant_changes_only": false
}
```
## Query Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `start_time` | ISO timestamp | Start of the period to fetch history for |
| `end_time` | ISO timestamp | End of the period to fetch history for |
| `significant_changes_only` | boolean | Only return significant state changes |
| `minimal_response` | boolean | Return minimal state information |
| `no_attributes` | boolean | Exclude attribute data from response |
## Examples
### Get Recent History
```typescript
const response = await fetch('http://your-ha-mcp/api/history/light.living_room', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const history = await response.json();
```
### Get History for Specific Period
```typescript
const startTime = '2024-02-01T00:00:00Z';
const endTime = '2024-02-02T00:00:00Z';
const response = await fetch(
`http://your-ha-mcp/api/history/light.living_room/period/${startTime}/${endTime}`,
{
headers: {
'Authorization': 'Bearer your_access_token'
}
}
);
const history = await response.json();
```
## Response Format
### History Response
```json
{
"success": true,
"data": {
"history": [
{
"state": "on",
"attributes": {
"brightness": 255
},
"last_changed": "2024-02-05T12:00:00Z",
"last_updated": "2024-02-05T12:00:00Z"
},
{
"state": "off",
"last_changed": "2024-02-05T13:00:00Z",
"last_updated": "2024-02-05T13:00:00Z"
}
]
}
}
```
### Aggregated History Response
```json
{
"success": true,
"data": {
"aggregates": {
"daily": [
{
"date": "2024-02-05",
"on_time": "PT5H30M",
"off_time": "PT18H30M",
"changes": 10
}
]
}
}
}
```
## Error Handling
### Common Error Codes
- `404`: Device not found
- `401`: Unauthorized
- `400`: Invalid parameters
- `416`: Time range too large
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 50 requests per 15 minutes
- Configurable through environment variables:
- `HISTORY_RATE_LIMIT`
- `HISTORY_RATE_WINDOW`
## Data Retention
- Default retention period: 30 days
- Configurable through environment variables:
- `HISTORY_RETENTION_DAYS`
- Older data may be automatically aggregated
## Best Practices
1. Use appropriate time ranges to avoid large responses
2. Enable `significant_changes_only` for better performance
3. Use `minimal_response` when full state data isn't needed
4. Implement proper error handling
5. Cache frequently accessed historical data
6. Handle rate limiting gracefully
## See Also
- [List Devices](../device-management/list-devices.md)
- [Device Control](../device-management/control.md)
- [Scene Management](scene.md)

View File

@@ -1,215 +0,0 @@
# Scene Management Tool
The Scene Management tool provides functionality to manage and control scenes in your Home Assistant instance.
## Features
- List available scenes
- Activate scenes
- Create new scenes
- Update existing scenes
- Delete scenes
- Get scene state information
## Usage
### REST API
```typescript
GET /api/scenes
GET /api/scenes/{scene_id}
POST /api/scenes/{scene_id}/activate
POST /api/scenes
PUT /api/scenes/{scene_id}
DELETE /api/scenes/{scene_id}
```
### WebSocket
```typescript
// List scenes
{
"type": "get_scenes"
}
// Activate scene
{
"type": "activate_scene",
"scene_id": "required_scene_id"
}
// Create/Update scene
{
"type": "create_scene",
"scene": {
"name": "required_scene_name",
"entities": {
// Entity states
}
}
}
```
## Scene Configuration
### Scene Definition
```json
{
"name": "Movie Night",
"entities": {
"light.living_room": {
"state": "on",
"brightness": 50,
"color_temp": 2700
},
"cover.living_room": {
"state": "closed"
},
"media_player.tv": {
"state": "on",
"source": "HDMI 1"
}
}
}
```
## Examples
### List All Scenes
```typescript
const response = await fetch('http://your-ha-mcp/api/scenes', {
headers: {
'Authorization': 'Bearer your_access_token'
}
});
const scenes = await response.json();
```
### Activate a Scene
```typescript
const response = await fetch('http://your-ha-mcp/api/scenes/movie_night/activate', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token'
}
});
```
### Create a New Scene
```typescript
const response = await fetch('http://your-ha-mcp/api/scenes', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"name": "Movie Night",
"entities": {
"light.living_room": {
"state": "on",
"brightness": 50
},
"cover.living_room": {
"state": "closed"
}
}
})
});
```
## Response Format
### Scene List Response
```json
{
"success": true,
"data": {
"scenes": [
{
"id": "scene_id",
"name": "Scene Name",
"entities": {
// Entity configurations
}
}
]
}
}
```
### Scene Activation Response
```json
{
"success": true,
"data": {
"scene_id": "activated_scene_id",
"status": "activated",
"timestamp": "2024-02-05T12:00:00Z"
}
}
```
## Error Handling
### Common Error Codes
- `404`: Scene not found
- `401`: Unauthorized
- `400`: Invalid scene configuration
- `409`: Scene activation failed
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 50 requests per 15 minutes
- Configurable through environment variables:
- `SCENE_RATE_LIMIT`
- `SCENE_RATE_WINDOW`
## Best Practices
1. Validate entity availability before creating scenes
2. Use meaningful scene names
3. Group related entities in scenes
4. Implement proper error handling
5. Cache scene configurations when possible
6. Handle rate limiting gracefully
## Scene Transitions
Scenes can include transition settings for smooth state changes:
```json
{
"name": "Sunset Mode",
"entities": {
"light.living_room": {
"state": "on",
"brightness": 128,
"transition": 5 // 5 seconds
}
}
}
```
## See Also
- [Device Control](../device-management/control.md)
- [Device History](history.md)
- [Automation Management](../automation/automation.md)

View File

@@ -1,42 +0,0 @@
# Tools Overview
The Home Assistant MCP Server provides a variety of tools to help you manage and interact with your home automation system.
## Available Tools
### Device Management
- [List Devices](device-management/list-devices.md) - View and manage connected devices
- [Device Control](device-management/control.md) - Control device states and settings
### History & State
- [History](history-state/history.md) - View and analyze historical data
- [Scene Management](history-state/scene.md) - Create and manage scenes
### Automation
- [Automation Management](automation/automation.md) - Create and manage automations
- [Automation Configuration](automation/automation-config.md) - Configure automation settings
### Add-ons & Packages
- [Add-on Management](addons-packages/addon.md) - Manage server add-ons
- [Package Management](addons-packages/package.md) - Handle package installations
### Notifications
- [Notify](notifications/notify.md) - Send and manage notifications
### Events
- [Event Subscription](events/subscribe-events.md) - Subscribe to system events
- [SSE Statistics](events/sse-stats.md) - Monitor Server-Sent Events statistics
## Getting Started
To get started with these tools:
1. Ensure you have the MCP Server properly installed and configured
2. Check the specific tool documentation for detailed usage instructions
3. Use the API endpoints or command-line interface as needed
## Next Steps
- Review the [API Documentation](../api/index.md) for programmatic access
- Check [Configuration](../config/index.md) for tool-specific settings
- See [Examples](../examples/index.md) for practical use cases

View File

@@ -1,249 +0,0 @@
# Notification Tool
The Notification tool provides functionality to send notifications through various services in your Home Assistant instance.
## Features
- Send notifications
- Support for multiple notification services
- Custom notification data
- Rich media support
- Notification templates
- Delivery tracking
- Priority levels
- Notification groups
## Usage
### REST API
```typescript
POST /api/notify
POST /api/notify/{service_id}
GET /api/notify/services
GET /api/notify/history
```
### WebSocket
```typescript
// Send notification
{
"type": "send_notification",
"service": "required_service_id",
"message": "required_message",
"title": "optional_title",
"data": {
// Service-specific data
}
}
// Get notification services
{
"type": "get_notification_services"
}
```
## Supported Services
- Mobile App
- Email
- SMS
- Telegram
- Discord
- Slack
- Push Notifications
- Custom Services
## Examples
### Basic Notification
```typescript
const response = await fetch('http://your-ha-mcp/api/notify/mobile_app', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"message": "Motion detected in living room",
"title": "Security Alert"
})
});
```
### Rich Notification
```typescript
const response = await fetch('http://your-ha-mcp/api/notify/mobile_app', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"message": "Motion detected in living room",
"title": "Security Alert",
"data": {
"image": "https://your-camera-snapshot.jpg",
"actions": [
{
"action": "view_camera",
"title": "View Camera"
},
{
"action": "dismiss",
"title": "Dismiss"
}
],
"priority": "high",
"ttl": 3600,
"group": "security"
}
})
});
```
### Service-Specific Example (Telegram)
```typescript
const response = await fetch('http://your-ha-mcp/api/notify/telegram', {
method: 'POST',
headers: {
'Authorization': 'Bearer your_access_token',
'Content-Type': 'application/json'
},
body: JSON.stringify({
"message": "Temperature is too high!",
"title": "Climate Alert",
"data": {
"parse_mode": "markdown",
"inline_keyboard": [
[
{
"text": "Turn On AC",
"callback_data": "turn_on_ac"
}
]
]
}
})
});
```
## Response Format
### Success Response
```json
{
"success": true,
"data": {
"notification_id": "notification_123",
"status": "sent",
"timestamp": "2024-02-05T12:00:00Z",
"service": "mobile_app"
}
}
```
### Services List Response
```json
{
"success": true,
"data": {
"services": [
{
"id": "mobile_app",
"name": "Mobile App",
"enabled": true,
"features": [
"actions",
"images",
"sound"
]
}
]
}
}
```
### Notification History Response
```json
{
"success": true,
"data": {
"history": [
{
"id": "notification_123",
"service": "mobile_app",
"message": "Motion detected",
"title": "Security Alert",
"timestamp": "2024-02-05T12:00:00Z",
"status": "delivered"
}
]
}
}
```
## Error Handling
### Common Error Codes
- `404`: Service not found
- `401`: Unauthorized
- `400`: Invalid request
- `408`: Delivery timeout
- `422`: Invalid notification data
### Error Response Format
```json
{
"success": false,
"message": "Error description",
"error_code": "ERROR_CODE"
}
```
## Rate Limiting
- Default limit: 100 notifications per hour
- Configurable through environment variables:
- `NOTIFY_RATE_LIMIT`
- `NOTIFY_RATE_WINDOW`
## Best Practices
1. Use appropriate priority levels
2. Group related notifications
3. Include relevant context
4. Implement proper error handling
5. Use templates for consistency
6. Consider time zones
7. Respect user preferences
8. Handle rate limiting gracefully
## Notification Templates
```typescript
// Template example
{
"template": "security_alert",
"data": {
"location": "living_room",
"event_type": "motion",
"timestamp": "2024-02-05T12:00:00Z"
}
}
```
## See Also
- [Event Subscription](../events/subscribe-events.md)
- [Device Control](../device-management/control.md)
- [Automation Management](../automation/automation.md)

View File

@@ -1,374 +0,0 @@
---
layout: default
title: Troubleshooting
nav_order: 6
---
# Troubleshooting Guide 🔧
This guide helps you diagnose and resolve common issues with MCP Server.
## Quick Diagnostics
### Health Check
First, verify the server's health:
```bash
curl http://localhost:3000/health
```
Expected response:
```json
{
"status": "healthy",
"version": "1.0.0",
"uptime": 3600,
"homeAssistant": {
"connected": true,
"version": "2024.1.0"
}
}
```
## Common Issues
### 1. Connection Issues
#### Cannot Connect to MCP Server
**Symptoms:**
- Server not responding
- Connection refused errors
- Timeout errors
**Solutions:**
1. Check if the server is running:
```bash
# For Docker installation
docker compose ps
# For manual installation
ps aux | grep mcp
```
2. Verify port availability:
```bash
# Check if port is in use
netstat -tuln | grep 3000
```
3. Check logs:
```bash
# Docker logs
docker compose logs mcp
# Manual installation logs
bun run dev
```
#### Home Assistant Connection Failed
**Symptoms:**
- "Connection Error" in health check
- Cannot control devices
- State updates not working
**Solutions:**
1. Verify Home Assistant URL and token in `.env`:
```env
HA_URL=http://homeassistant:8123
HA_TOKEN=your_long_lived_access_token
```
2. Test Home Assistant connection:
```bash
curl -H "Authorization: Bearer YOUR_HA_TOKEN" \
http://your-homeassistant:8123/api/
```
3. Check network connectivity:
```bash
# For Docker setup
docker compose exec mcp ping homeassistant
```
### 2. Authentication Issues
#### Invalid Token
**Symptoms:**
- 401 Unauthorized responses
- "Invalid token" errors
**Solutions:**
1. Generate a new token:
```bash
curl -X POST http://localhost:3000/auth/token \
-H "Content-Type: application/json" \
-d '{"username": "your_username", "password": "your_password"}'
```
2. Verify token format:
```javascript
// Token should be in format:
Authorization: Bearer eyJhbGciOiJIUzI1NiIs...
```
#### Rate Limiting
**Symptoms:**
- 429 Too Many Requests
- "Rate limit exceeded" errors
**Solutions:**
1. Check current rate limit status:
```bash
curl -I http://localhost:3000/api/state
```
2. Adjust rate limits in configuration:
```yaml
security:
rateLimit: 100 # Increase if needed
rateLimitWindow: 60000 # Window in milliseconds
```
### 3. Real-time Updates Issues
#### SSE Connection Drops
**Symptoms:**
- Frequent disconnections
- Missing state updates
- EventSource errors
**Solutions:**
1. Implement proper reconnection logic:
```javascript
class SSEClient {
constructor() {
this.connect();
}
connect() {
this.eventSource = new EventSource('/subscribe_events');
this.eventSource.onerror = this.handleError.bind(this);
}
handleError(error) {
console.error('SSE Error:', error);
this.eventSource.close();
setTimeout(() => this.connect(), 1000);
}
}
```
2. Check network stability:
```bash
# Monitor connection stability
ping -c 100 localhost
```
### 4. Performance Issues
#### High Latency
**Symptoms:**
- Slow response times
- Command execution delays
- UI lag
**Solutions:**
1. Enable Redis caching:
```env
REDIS_ENABLED=true
REDIS_URL=redis://localhost:6379
```
2. Monitor system resources:
```bash
# Check CPU and memory usage
docker stats
# Or for manual installation
top -p $(pgrep -f mcp)
```
3. Optimize database queries and caching:
```typescript
// Use batch operations
const results = await Promise.all([
cache.get('key1'),
cache.get('key2')
]);
```
### 5. Device Control Issues
#### Commands Not Executing
**Symptoms:**
- Commands appear successful but no device response
- Inconsistent device states
- Error messages from Home Assistant
**Solutions:**
1. Verify device availability:
```bash
curl http://localhost:3000/api/state/light.living_room
```
2. Check command syntax:
```bash
# Test basic command
curl -X POST http://localhost:3000/api/command \
-H "Content-Type: application/json" \
-d '{"command": "Turn on living room lights"}'
```
3. Review Home Assistant logs:
```bash
docker compose exec homeassistant journalctl -f
```
## Debugging Tools
### Log Analysis
Enable debug logging:
```env
LOG_LEVEL=debug
DEBUG=mcp:*
```
### Network Debugging
Monitor network traffic:
```bash
# TCP dump for API traffic
tcpdump -i any port 3000 -w debug.pcap
```
### Performance Profiling
Enable performance monitoring:
```env
ENABLE_METRICS=true
METRICS_PORT=9090
```
## Getting Help
If you're still experiencing issues:
1. Check the [GitHub Issues](https://github.com/jango-blockchained/advanced-homeassistant-mcp/issues)
2. Search [Discussions](https://github.com/jango-blockchained/advanced-homeassistant-mcp/discussions)
3. Create a new issue with:
- Detailed description
- Logs
- Configuration (sanitized)
- Steps to reproduce
## Maintenance
### Regular Health Checks
Run periodic health checks:
```bash
# Create a cron job
*/5 * * * * curl -f http://localhost:3000/health || notify-admin
```
### Log Rotation
Configure log rotation:
```yaml
logging:
maxSize: "100m"
maxFiles: "7d"
compress: true
```
### Backup Configuration
Regularly backup your configuration:
```bash
# Backup script
tar -czf mcp-backup-$(date +%Y%m%d).tar.gz \
.env \
config/ \
data/
```
## FAQ
### General Questions
#### Q: What is MCP Server?
A: MCP Server is a bridge between Home Assistant and Language Learning Models, enabling natural language control and automation of your smart home devices.
#### Q: What are the system requirements?
A: MCP Server requires:
- Node.js 16 or higher
- Home Assistant instance
- 1GB RAM minimum
- 1GB disk space
#### Q: How do I update MCP Server?
A: For Docker installation:
```bash
docker compose pull
docker compose up -d
```
For manual installation:
```bash
git pull
bun install
bun run build
```
### Integration Questions
#### Q: Can I use MCP Server with any Home Assistant instance?
A: Yes, MCP Server works with any Home Assistant instance that has the REST API enabled and a valid long-lived access token.
#### Q: Does MCP Server support all Home Assistant integrations?
A: MCP Server supports all Home Assistant devices and services that are accessible via the REST API.
### Security Questions
#### Q: Is my Home Assistant token secure?
A: Yes, your Home Assistant token is stored securely and only used for authenticated communication between MCP Server and your Home Assistant instance.
#### Q: Can I use MCP Server remotely?
A: Yes, but we recommend using a secure connection (HTTPS) and proper authentication when exposing MCP Server to the internet.
### Troubleshooting Questions
#### Q: Why are my device states not updating?
A: Check:
1. Home Assistant connection
2. WebSocket connection status
3. Device availability in Home Assistant
4. Network connectivity
#### Q: Why are my commands not working?
A: Verify:
1. Command syntax
2. Device availability
3. User permissions
4. Home Assistant API access

View File

@@ -1,96 +0,0 @@
# Usage Guide
This guide explains how to use the Home Assistant MCP Server for basic device management and integration.
## Basic Setup
1. **Starting the Server:**
- Development mode: `bun run dev`
- Production mode: `bun run start`
2. **Accessing the Server:**
- Default URL: `http://localhost:3000`
- Ensure Home Assistant credentials are configured in `.env`
## Device Control
### REST API Interactions
Basic device control can be performed via the REST API:
```typescript
// Turn on a light
fetch('http://localhost:3000/api/control', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
},
body: JSON.stringify({
entity_id: 'light.living_room',
command: 'turn_on',
parameters: { brightness: 50 }
})
});
```
### Supported Commands
- `turn_on`
- `turn_off`
- `toggle`
- `set_brightness`
### Supported Entities
- Lights
- Switches
- Climate controls
- Media players
## Real-Time Updates
### WebSocket Connection
Subscribe to real-time device state changes:
```typescript
const ws = new WebSocket('ws://localhost:3000/events');
ws.onmessage = (event) => {
const deviceUpdate = JSON.parse(event.data);
console.log('Device state changed:', deviceUpdate);
};
```
## Authentication
All API requests require a valid JWT token in the Authorization header.
## Limitations
- Basic device control only
- Limited error handling
- Minimal third-party integrations
## Troubleshooting
1. Verify Home Assistant connection
2. Check JWT token validity
3. Ensure correct entity IDs
4. Review server logs for detailed errors
## Configuration
Configure the server using environment variables in `.env`:
```
HA_URL=http://homeassistant:8123
HA_TOKEN=your_home_assistant_token
JWT_SECRET=your_jwt_secret
```
## Next Steps
- Explore the [API Documentation](api.md)
- Check [Troubleshooting Guide](troubleshooting.md)
- Review [Contributing Guidelines](contributing.md)

View File

@@ -4,8 +4,6 @@ import { DOMParser, Element, Document } from '@xmldom/xmldom';
import dotenv from 'dotenv';
import readline from 'readline';
import chalk from 'chalk';
import express from 'express';
import bodyParser from 'body-parser';
// Load environment variables
dotenv.config();
@@ -118,9 +116,8 @@ interface ModelConfig {
// Update model listing to filter based on API key availability
const AVAILABLE_MODELS: ModelConfig[] = [
// OpenAI models always available
{ name: 'gpt-4o', maxTokens: 4096, contextWindow: 128000 },
{ name: 'gpt-4-turbo', maxTokens: 4096, contextWindow: 128000 },
{ name: 'gpt-4', maxTokens: 8192, contextWindow: 128000 },
{ name: 'gpt-4', maxTokens: 8192, contextWindow: 8192 },
{ name: 'gpt-4-turbo-preview', maxTokens: 4096, contextWindow: 128000 },
{ name: 'gpt-3.5-turbo', maxTokens: 4096, contextWindow: 16385 },
{ name: 'gpt-3.5-turbo-16k', maxTokens: 16385, contextWindow: 16385 },
@@ -151,18 +148,12 @@ const logger = {
// Update default model selection in loadConfig
function loadConfig(): AppConfig {
// Use environment variable or default to gpt-4o
const defaultModelName = process.env.OPENAI_MODEL || 'gpt-4o';
let defaultModel = AVAILABLE_MODELS.find(m => m.name === defaultModelName);
// If the configured model isn't found, use gpt-4o without warning
if (!defaultModel) {
defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4o') || AVAILABLE_MODELS[0];
}
// Always use gpt-4 for now
const defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4') || AVAILABLE_MODELS[0];
return {
mcpServer: process.env.MCP_SERVER || 'http://localhost:3000',
openaiModel: defaultModel.name, // Use the resolved model name
openaiModel: defaultModel.name,
maxRetries: parseInt(process.env.MAX_RETRIES || '3'),
analysisTimeout: parseInt(process.env.ANALYSIS_TIMEOUT || '30000'),
selectedModel: defaultModel
@@ -194,8 +185,8 @@ async function executeMcpTool(toolName: string, parameters: Record<string, any>
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), config.analysisTimeout);
// Update endpoint URL to use the same base path as schema
const endpoint = `${config.mcpServer}/mcp/execute`;
// Update endpoint URL to use the correct API path
const endpoint = `${config.mcpServer}/api/mcp/execute`;
const response = await fetch(endpoint, {
method: "POST",
@@ -258,43 +249,117 @@ function isMcpExecuteResponse(obj: any): obj is McpExecuteResponse {
(obj.success === true || typeof obj.message === 'string');
}
// Add mock data for testing
const MOCK_HA_INFO = {
devices: {
light: [
{ entity_id: 'light.living_room', state: 'on', attributes: { friendly_name: 'Living Room Light', brightness: 255 } },
{ entity_id: 'light.kitchen', state: 'off', attributes: { friendly_name: 'Kitchen Light', brightness: 0 } }
],
switch: [
{ entity_id: 'switch.tv', state: 'off', attributes: { friendly_name: 'TV Power' } }
],
sensor: [
{ entity_id: 'sensor.temperature', state: '21.5', attributes: { friendly_name: 'Living Room Temperature', unit_of_measurement: '°C' } },
{ entity_id: 'sensor.humidity', state: '45', attributes: { friendly_name: 'Living Room Humidity', unit_of_measurement: '%' } }
],
climate: [
{ entity_id: 'climate.thermostat', state: 'heat', attributes: { friendly_name: 'Main Thermostat', current_temperature: 20, target_temp_high: 24 } }
]
}
};
interface HassState {
entity_id: string;
state: string;
attributes: Record<string, any>;
last_changed: string;
last_updated: string;
}
interface ServiceInfo {
name: string;
description: string;
fields: Record<string, any>;
}
interface ServiceDomain {
domain: string;
services: Record<string, ServiceInfo>;
}
/**
* Collects comprehensive information about the Home Assistant instance using MCP tools
*/
async function collectHomeAssistantInfo(): Promise<any> {
const info: Record<string, any> = {};
const config = loadConfig();
const hassHost = process.env.HASS_HOST;
// Update schema endpoint to be consistent
const schemaResponse = await fetch(`${config.mcpServer}/mcp`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Accept': 'application/json'
}
});
if (!schemaResponse.ok) {
console.error(`Failed to fetch MCP schema: ${schemaResponse.status}`);
return info;
}
const schema = await schemaResponse.json() as McpSchema;
console.log("Available tools:", schema.tools.map(t => t.name));
// Execute list_devices to get basic device information
console.log("Fetching device information...");
try {
const deviceInfo = await executeMcpTool('list_devices');
if (deviceInfo && deviceInfo.success && deviceInfo.devices) {
info.devices = deviceInfo.devices;
} else {
console.warn(`Failed to list devices: ${deviceInfo?.message || 'Unknown error'}`);
// Check if we're in test mode
if (process.env.HA_TEST_MODE === '1') {
logger.info("Running in test mode with mock data");
return MOCK_HA_INFO;
}
} catch (error) {
console.warn("Error fetching devices:", error);
}
return info;
// Get states from Home Assistant directly
const statesResponse = await fetch(`${hassHost}/api/states`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Content-Type': 'application/json'
}
});
if (!statesResponse.ok) {
throw new Error(`Failed to fetch states: ${statesResponse.status}`);
}
const states = await statesResponse.json() as HassState[];
// Group devices by domain
const devices: Record<string, HassState[]> = {};
for (const state of states) {
const [domain] = state.entity_id.split('.');
if (!devices[domain]) {
devices[domain] = [];
}
devices[domain].push(state);
}
info.devices = devices;
info.device_summary = {
total_devices: states.length,
device_types: Object.keys(devices),
by_domain: Object.fromEntries(
Object.entries(devices).map(([domain, items]) => [domain, items.length])
)
};
const deviceCount = states.length;
const domainCount = Object.keys(devices).length;
if (deviceCount > 0) {
logger.success(`Found ${deviceCount} devices across ${domainCount} domains`);
} else {
logger.warn('No devices found in Home Assistant');
}
return info;
} catch (error) {
logger.error(`Error fetching devices: ${error instanceof Error ? error.message : 'Unknown error'}`);
if (process.env.HA_TEST_MODE !== '1') {
logger.warn(`Failed to connect to Home Assistant. Run with HA_TEST_MODE=1 to use test data.`);
return {
devices: {},
device_summary: {
total_devices: 0,
device_types: [],
by_domain: {}
}
};
}
return MOCK_HA_INFO;
}
}
/**
@@ -401,31 +466,66 @@ function getRelevantDeviceTypes(prompt: string): string[] {
* Generates analysis and recommendations using the OpenAI API based on the Home Assistant data
*/
async function generateAnalysis(haInfo: any): Promise<SystemAnalysis> {
const openai = getOpenAIClient();
const config = loadConfig();
// Compress and summarize the data
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
const deviceSummary = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, any>, [domain, devices]) => {
const deviceList = devices as any[];
acc[domain] = {
count: deviceList.length,
active: deviceList.filter(d => d.state === 'on' || d.state === 'home').length,
states: [...new Set(deviceList.map(d => d.state))],
sample: deviceList.slice(0, 2).map(d => ({
id: d.entity_id,
state: d.state,
name: d.attributes?.friendly_name
}))
// If in test mode, return mock analysis
if (process.env.HA_TEST_MODE === '1') {
logger.info("Generating mock analysis...");
return {
overview: {
state: ["System running normally", "4 device types detected"],
health: ["All systems operational", "No critical issues found"],
configurations: ["Basic configuration detected", "Default settings in use"],
integrations: ["Light", "Switch", "Sensor", "Climate"],
issues: ["No major issues detected"]
},
performance: {
resource_usage: ["Normal CPU usage", "Memory usage within limits"],
response_times: ["Average response time: 0.5s"],
optimization_areas: ["Consider grouping lights by room"]
},
security: {
current_measures: ["Basic security measures in place"],
vulnerabilities: ["No critical vulnerabilities detected"],
recommendations: ["Enable 2FA if not already enabled"]
},
optimization: {
performance_suggestions: ["Group frequently used devices"],
config_optimizations: ["Consider creating room-based views"],
integration_improvements: ["Add friendly names to all entities"],
automation_opportunities: ["Create morning/evening routines"]
},
maintenance: {
required_updates: ["No critical updates pending"],
cleanup_tasks: ["Remove unused entities"],
regular_tasks: ["Check sensor battery levels"]
},
entity_usage: {
most_active: ["light.living_room", "sensor.temperature"],
rarely_used: ["switch.tv"],
potential_duplicates: []
},
automation_analysis: {
inefficient_automations: [],
potential_improvements: ["Add time-based light controls"],
suggested_blueprints: ["Motion-activated lighting"],
condition_optimizations: []
},
energy_management: {
high_consumption: ["No high consumption devices detected"],
monitoring_suggestions: ["Add power monitoring to main appliances"],
tariff_optimizations: ["Consider time-of-use automation"]
}
};
return acc;
}, {}) : {};
}
// Original analysis code for non-test mode
const openai = getOpenAIClient();
const systemSummary = {
total_devices: deviceTypes.reduce((sum, type) => sum + deviceSummary[type].count, 0),
device_types: deviceTypes,
device_summary: deviceSummary,
active_devices: Object.values(deviceSummary).reduce((sum: number, info: any) => sum + info.active, 0)
total_devices: haInfo.device_summary?.total_devices || 0,
device_types: haInfo.device_summary?.device_types || [],
device_summary: haInfo.device_summary?.by_domain || {}
};
const prompt = `Analyze this Home Assistant system and provide insights in XML format:
@@ -578,100 +678,92 @@ Generate your response in this EXACT format:
}
}
async function getUserInput(question: string): Promise<string> {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
return new Promise((resolve) => {
rl.question(question, (answer) => {
rl.close();
resolve(answer);
});
});
interface AutomationConfig {
id?: string;
alias?: string;
description?: string;
trigger?: Array<{
platform: string;
[key: string]: any;
}>;
condition?: Array<{
condition: string;
[key: string]: any;
}>;
action?: Array<{
service?: string;
[key: string]: any;
}>;
mode?: string;
}
// Update chunk size calculation
const MAX_CHARACTERS = 8000; // ~2000 tokens (4 chars/token)
// Update model handling in retry
async function handleCustomPrompt(haInfo: any): Promise<void> {
try {
// Add device metadata
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
acc[domain] = (devices as any[]).length;
return acc;
}, {}) : {};
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
const userPrompt = await getUserInput("Enter your custom prompt: ");
if (!userPrompt) {
console.log("No prompt provided. Exiting...");
return;
}
const openai = getOpenAIClient();
const config = loadConfig();
const completion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
Current system has ${totalDevices} devices across ${deviceTypes.length} types: ${JSON.stringify(deviceStates)}`
},
{ role: "user", content: userPrompt },
],
max_tokens: config.selectedModel.maxTokens,
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(completion.choices[0].message?.content || "No response generated");
} catch (error) {
console.error("Error processing custom prompt:", error);
// Retry with simplified prompt if there's an error
try {
const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
const openai = getOpenAIClient();
const config = loadConfig();
const retryCompletion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
},
{ role: "user", content: retryPrompt },
],
max_tokens: config.selectedModel.maxTokens,
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(retryCompletion.choices[0].message?.content || "No response generated");
} catch (retryError) {
console.error("Error during retry:", retryError);
}
}
}
// Update automation handling
async function handleAutomationOptimization(haInfo: any): Promise<void> {
try {
const result = await executeMcpTool('automation', { action: 'list' });
if (!result?.success) {
logger.error(`Failed to retrieve automations: ${result?.message || 'Unknown error'}`);
return;
const hassHost = process.env.HASS_HOST;
// Get automations directly from Home Assistant
const automationsResponse = await fetch(`${hassHost}/api/states`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Content-Type': 'application/json'
}
});
if (!automationsResponse.ok) {
throw new Error(`Failed to fetch automations: ${automationsResponse.status}`);
}
const automations = result.automations || [];
const states = await automationsResponse.json() as HassState[];
const automations = states.filter(state => state.entity_id.startsWith('automation.'));
// Get services to understand what actions are available
const servicesResponse = await fetch(`${hassHost}/api/services`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Content-Type': 'application/json'
}
});
let availableServices: Record<string, any> = {};
if (servicesResponse.ok) {
const services = await servicesResponse.json() as ServiceDomain[];
availableServices = services.reduce((acc: Record<string, any>, service: ServiceDomain) => {
if (service.domain && service.services) {
acc[service.domain] = service.services;
}
return acc;
}, {});
logger.debug(`Retrieved services from ${Object.keys(availableServices).length} domains`);
}
// Enrich automation data with service information
const enrichedAutomations = automations.map(automation => {
const actions = automation.attributes?.action || [];
const enrichedActions = actions.map((action: any) => {
if (action.service) {
const [domain, service] = action.service.split('.');
const serviceInfo = availableServices[domain]?.[service];
return {
...action,
service_info: serviceInfo
};
}
return action;
});
return {
...automation,
config: {
id: automation.entity_id.split('.')[1],
alias: automation.attributes?.friendly_name,
trigger: automation.attributes?.trigger || [],
condition: automation.attributes?.condition || [],
action: enrichedActions,
mode: automation.attributes?.mode || 'single'
}
};
});
if (automations.length === 0) {
console.log(chalk.bold.underline("\nAutomation Optimization Report"));
console.log(chalk.yellow("No automations found in the system. Consider creating some automations to improve your Home Assistant experience."));
@@ -679,7 +771,7 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
}
logger.info(`Analyzing ${automations.length} automations...`);
const optimizationXml = await analyzeAutomations(automations);
const optimizationXml = await analyzeAutomations(enrichedAutomations);
const parser = new DOMParser();
const xmlDoc = parser.parseFromString(optimizationXml, "text/xml");
@@ -721,51 +813,85 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
}
}
// Add new automation optimization function
async function analyzeAutomations(automations: any[]): Promise<string> {
const openai = getOpenAIClient();
const config = loadConfig();
// Compress automation data by only including essential fields
const compressedAutomations = automations.map(automation => ({
id: automation.entity_id,
name: automation.attributes?.friendly_name || automation.entity_id,
state: automation.state,
last_triggered: automation.attributes?.last_triggered,
mode: automation.attributes?.mode,
trigger_count: automation.attributes?.trigger?.length || 0,
action_count: automation.attributes?.action?.length || 0
}));
// Create a more detailed summary of automations
const automationSummary = {
total: automations.length,
active: automations.filter(a => a.state === 'on').length,
by_type: automations.reduce((acc: Record<string, number>, auto) => {
const type = auto.attributes?.mode || 'single';
acc[type] = (acc[type] || 0) + 1;
return acc;
}, {}),
recently_triggered: automations.filter(a => {
const lastTriggered = a.attributes?.last_triggered;
if (!lastTriggered) return false;
const lastTriggerDate = new Date(lastTriggered);
const oneDayAgo = new Date();
oneDayAgo.setDate(oneDayAgo.getDate() - 1);
return lastTriggerDate > oneDayAgo;
}).length,
trigger_types: automations.reduce((acc: Record<string, number>, auto) => {
const triggers = auto.config?.trigger || [];
triggers.forEach((trigger: any) => {
const type = trigger.platform || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
action_types: automations.reduce((acc: Record<string, number>, auto) => {
const actions = auto.config?.action || [];
actions.forEach((action: any) => {
const type = action.service?.split('.')[0] || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
service_domains: Array.from(new Set(automations.flatMap(auto =>
(auto.config?.action || [])
.map((action: any) => action.service?.split('.')[0])
.filter(Boolean)
))).sort(),
names: automations.map(a => a.attributes?.friendly_name || a.entity_id.split('.')[1]).slice(0, 10)
};
const prompt = `Analyze these Home Assistant automations and provide optimization suggestions in XML format:
${JSON.stringify(compressedAutomations, null, 2)}
${JSON.stringify(automationSummary, null, 2)}
Key metrics:
- Total automations: ${automationSummary.total}
- Active automations: ${automationSummary.active}
- Recently triggered: ${automationSummary.recently_triggered}
- Automation modes: ${JSON.stringify(automationSummary.by_type)}
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
- Action types: ${JSON.stringify(automationSummary.action_types)}
- Service domains used: ${automationSummary.service_domains.join(', ')}
Generate your response in this EXACT format:
<analysis>
<findings>
<item>Finding 1</item>
<item>Finding 2</item>
<!-- Add more findings as needed -->
</findings>
<recommendations>
<item>Recommendation 1</item>
<item>Recommendation 2</item>
<!-- Add more recommendations as needed -->
</recommendations>
<blueprints>
<item>Blueprint suggestion 1</item>
<item>Blueprint suggestion 2</item>
<!-- Add more blueprint suggestions as needed -->
</blueprints>
</analysis>
If no optimizations are needed, return empty item lists but maintain the XML structure.
Focus on:
1. Identifying patterns and potential improvements
2. Suggesting energy-saving optimizations
1. Identifying patterns and potential improvements based on trigger and action types
2. Suggesting energy-saving optimizations based on the services being used
3. Recommending error handling improvements
4. Suggesting relevant blueprints`;
4. Suggesting relevant blueprints for common automation patterns
5. Analyzing the distribution of automation types and suggesting optimizations`;
try {
const completion = await openai.chat.completions.create({
@@ -773,12 +899,12 @@ Focus on:
messages: [
{
role: "system",
content: "You are a Home Assistant automation expert. Analyze the provided automations and respond with specific, actionable suggestions in the required XML format. If no optimizations are needed, return empty item lists but maintain the XML structure."
content: "You are a Home Assistant automation expert. Analyze the provided automation summary and respond with specific, actionable suggestions in the required XML format."
},
{ role: "user", content: prompt }
],
temperature: 0.2,
max_tokens: Math.min(config.selectedModel.maxTokens, 4000)
max_tokens: Math.min(config.selectedModel.maxTokens, 2048)
});
const response = completion.choices[0].message?.content || "";
@@ -819,62 +945,164 @@ Focus on:
}
}
// Update model selection prompt count dynamically
async function selectModel(): Promise<ModelConfig> {
console.log(chalk.bold.underline("\nAvailable Models:"));
AVAILABLE_MODELS.forEach((model, index) => {
console.log(
`${index + 1}. ${chalk.blue(model.name.padEnd(20))} ` +
`Context: ${chalk.yellow(model.contextWindow.toLocaleString().padStart(6))} tokens | ` +
`Max output: ${chalk.green(model.maxTokens.toLocaleString().padStart(5))} tokens`
);
});
// Add new handleCustomPrompt function
async function handleCustomPrompt(haInfo: any, customPrompt: string): Promise<void> {
try {
// Add device metadata
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
acc[domain] = (devices as any[]).length;
return acc;
}, {}) : {};
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
const maxOption = AVAILABLE_MODELS.length;
const choice = await getUserInput(`\nSelect model (1-${maxOption}): `);
const selectedIndex = parseInt(choice) - 1;
// Get automation information
const automations = haInfo.devices?.automation || [];
const automationDetails = automations.map((auto: any) => ({
name: auto.attributes?.friendly_name || auto.entity_id.split('.')[1],
state: auto.state,
last_triggered: auto.attributes?.last_triggered,
mode: auto.attributes?.mode,
triggers: auto.attributes?.trigger?.map((t: any) => ({
platform: t.platform,
...t
})) || [],
conditions: auto.attributes?.condition?.map((c: any) => ({
condition: c.condition,
...c
})) || [],
actions: auto.attributes?.action?.map((a: any) => ({
service: a.service,
...a
})) || []
}));
if (isNaN(selectedIndex) || selectedIndex < 0 || selectedIndex >= AVAILABLE_MODELS.length) {
console.log(chalk.yellow("Invalid selection, using default model"));
return AVAILABLE_MODELS[0];
}
const automationSummary = {
total: automations.length,
active: automations.filter((a: any) => a.state === 'on').length,
trigger_types: automations.reduce((acc: Record<string, number>, auto: any) => {
const triggers = auto.attributes?.trigger || [];
triggers.forEach((trigger: any) => {
const type = trigger.platform || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
action_types: automations.reduce((acc: Record<string, number>, auto: any) => {
const actions = auto.attributes?.action || [];
actions.forEach((action: any) => {
const type = action.service?.split('.')[0] || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
service_domains: Array.from(new Set(automations.flatMap((auto: any) =>
(auto.attributes?.action || [])
.map((action: any) => action.service?.split('.')[0])
.filter(Boolean)
))).sort()
};
const selectedModel = AVAILABLE_MODELS[selectedIndex];
// Create a summary of the devices
const deviceSummary = Object.entries(deviceStates)
.map(([domain, count]) => `${domain}: ${count}`)
.join(', ');
// Validate API keys for specific providers
if (selectedModel.name.startsWith('deepseek')) {
if (!process.env.DEEPSEEK_API_KEY) {
logger.error("DeepSeek models require DEEPSEEK_API_KEY in .env");
process.exit(1);
if (process.env.HA_TEST_MODE === '1') {
console.log("\nTest Mode Analysis Results:\n");
console.log("Based on your Home Assistant setup with:");
console.log(`- ${totalDevices} total devices`);
console.log(`- Device types: ${deviceTypes.join(', ')}`);
console.log("\nAnalysis for prompt: " + customPrompt);
console.log("1. Current State:");
console.log(" - All devices are functioning normally");
console.log(" - System is responsive and stable");
console.log("\n2. Recommendations:");
console.log(" - Consider grouping devices by room");
console.log(" - Add automation for frequently used devices");
console.log(" - Monitor power usage of main appliances");
console.log("\n3. Optimization Opportunities:");
console.log(" - Create scenes for different times of day");
console.log(" - Set up presence detection for automatic control");
return;
}
// Verify DeepSeek connection
const openai = getOpenAIClient();
const config = loadConfig();
const completion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
Current system has ${totalDevices} devices across ${deviceTypes.length} types.
Device distribution: ${deviceSummary}
Automation Summary:
- Total automations: ${automationSummary.total}
- Active automations: ${automationSummary.active}
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
- Action types: ${JSON.stringify(automationSummary.action_types)}
- Service domains used: ${automationSummary.service_domains.join(', ')}
Detailed Automation List:
${JSON.stringify(automationDetails, null, 2)}`
},
{ role: "user", content: customPrompt },
],
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(completion.choices[0].message?.content || "No response generated");
} catch (error) {
console.error("Error processing custom prompt:", error);
if (process.env.HA_TEST_MODE === '1') {
console.log("\nTest Mode Fallback Analysis:\n");
console.log("1. System Overview:");
console.log(" - Basic configuration detected");
console.log(" - All core services operational");
console.log("\n2. Suggestions:");
console.log(" - Review device naming conventions");
console.log(" - Consider adding automation blueprints");
return;
}
// Retry with simplified prompt if there's an error
try {
await getOpenAIClient().models.list();
} catch (error) {
logger.error(`DeepSeek connection failed: ${error.message}`);
process.exit(1);
const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
const openai = getOpenAIClient();
const config = loadConfig();
const retryCompletion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
},
{ role: "user", content: retryPrompt },
],
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(retryCompletion.choices[0].message?.content || "No response generated");
} catch (retryError) {
console.error("Error during retry:", retryError);
}
}
if (selectedModel.name.startsWith('gpt-4-o') && !process.env.OPENAI_API_KEY) {
logger.error("OpenAI models require OPENAI_API_KEY in .env");
process.exit(1);
}
return selectedModel;
}
// Enhanced main function with progress indicators
async function main() {
let config = loadConfig();
// Model selection
config.selectedModel = await selectModel();
logger.info(`Selected model: ${chalk.blue(config.selectedModel.name)} ` +
`(Context: ${config.selectedModel.contextWindow.toLocaleString()} tokens, ` +
`Output: ${config.selectedModel.maxTokens.toLocaleString()} tokens)`);
logger.info(`Starting analysis with ${config.selectedModel.name} model...`);
try {
@@ -888,12 +1116,20 @@ async function main() {
logger.success(`Collected data from ${Object.keys(haInfo.devices).length} device types`);
const mode = await getUserInput(
"\nSelect mode:\n1. Standard Analysis\n2. Custom Prompt\n3. Automation Optimization\nEnter choice (1-3): "
);
// Get mode from command line argument or default to 1
const mode = process.argv[2] || "1";
console.log("\nAvailable modes:");
console.log("1. Standard Analysis");
console.log("2. Custom Prompt");
console.log("3. Automation Optimization");
console.log(`Selected mode: ${mode}\n`);
if (mode === "2") {
await handleCustomPrompt(haInfo);
// For custom prompt mode, get the prompt from remaining arguments
const customPrompt = process.argv.slice(3).join(" ") || "Analyze my Home Assistant setup";
console.log(`Custom prompt: ${customPrompt}\n`);
await handleCustomPrompt(haInfo, customPrompt);
} else if (mode === "3") {
await handleAutomationOptimization(haInfo);
} else {
@@ -938,22 +1174,39 @@ function getItems(xmlDoc: Document, path: string): string[] {
.map(item => (item as Element).textContent || "");
}
// Add environment check for processor type
// Replace the Express server initialization at the bottom with Bun's server
if (process.env.PROCESSOR_TYPE === 'openai') {
// Initialize Express server only for OpenAI
const app = express();
const port = process.env.PORT || 3000;
// Initialize Bun server for OpenAI
const server = Bun.serve({
port: process.env.PORT || 3000,
async fetch(req) {
const url = new URL(req.url);
app.use(bodyParser.json());
// Handle chat endpoint
if (url.pathname === '/chat' && req.method === 'POST') {
try {
const body = await req.json();
// Handle chat logic here
return new Response(JSON.stringify({ success: true }), {
headers: { 'Content-Type': 'application/json' }
});
} catch (error) {
return new Response(JSON.stringify({
success: false,
error: error.message
}), {
status: 400,
headers: { 'Content-Type': 'application/json' }
});
}
}
// Keep existing OpenAI routes
app.post('/chat', async (req, res) => {
// ... existing OpenAI handler code ...
// Handle 404 for unknown routes
return new Response('Not Found', { status: 404 });
},
});
app.listen(port, () => {
console.log(`[OpenAI Server] Running on port ${port}`);
});
console.log(`[OpenAI Server] Running on port ${server.port}`);
} else {
console.log('[Claude Mode] Using stdio communication');
}

View File

@@ -1,169 +0,0 @@
site_name: Home Assistant MCP
site_url: https://jango-blockchained.github.io/homeassistant-mcp
repo_url: https://github.com/jango-blockchained/homeassistant-mcp
repo_name: jango-blockchained/homeassistant-mcp
edit_uri: edit/main/docs/
theme:
name: material
features:
- navigation.instant
- navigation.tracking
- navigation.sections
- navigation.expand
- navigation.indexes
- navigation.top
- toc.follow
- search.suggest
- search.highlight
- content.code.copy
- content.code.annotate
palette:
- scheme: default
primary: indigo
accent: indigo
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: indigo
accent: indigo
toggle:
icon: material/brightness-4
name: Switch to light mode
icon:
repo: fontawesome/brands/github
favicon: assets/favicon.png
logo: assets/logo.png
plugins:
- search
- mermaid2
- git-revision-date-localized:
type: date
- minify:
minify_html: true
markdown_extensions:
- admonition
- attr_list
- def_list
- footnotes
- meta
- toc:
permalink: true
- pymdownx.arithmatex
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- pymdownx.highlight
- pymdownx.inlinehilite
- pymdownx.keys
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_code_format
- pymdownx.tabbed:
alternate_style: true
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
nav:
- Home: index.md
- Getting Started:
- Quick Start: getting-started/quick-start.md
- Installation:
- Basic Setup: getting-started/installation.md
- Docker Setup: getting-started/docker.md
- GPU Support: getting-started/gpu.md
- Configuration:
- Environment: getting-started/configuration.md
- Security: getting-started/security.md
- Performance: getting-started/performance.md
- Core Features:
- Overview: features/core-features.md
- Device Control: features/device-control.md
- Automation: features/automation.md
- Events & States: features/events-states.md
- Security: features/security.md
- AI Features:
- Overview: ai/overview.md
- NLP Integration: ai/nlp.md
- Custom Prompts: ai/prompts.md
- Model Configuration: ai/models.md
- Best Practices: ai/best-practices.md
- Speech Processing:
- Overview: speech/overview.md
- Wake Word Detection: speech/wake-word.md
- Speech-to-Text: speech/stt.md
- GPU Acceleration: speech/gpu.md
- Language Support: speech/languages.md
- Tools & Utilities:
- Overview: tools/overview.md
- Analyzer CLI:
- Installation: tools/analyzer/installation.md
- Usage: tools/analyzer/usage.md
- Configuration: tools/analyzer/config.md
- Examples: tools/analyzer/examples.md
- Speech Examples:
- Basic Usage: tools/speech/basic.md
- Advanced Features: tools/speech/advanced.md
- Troubleshooting: tools/speech/troubleshooting.md
- Claude Desktop:
- Setup: tools/claude/setup.md
- Integration: tools/claude/integration.md
- Configuration: tools/claude/config.md
- API Reference:
- Overview: api/overview.md
- REST API:
- Authentication: api/rest/auth.md
- Endpoints: api/rest/endpoints.md
- Examples: api/rest/examples.md
- WebSocket API:
- Connection: api/websocket/connection.md
- Events: api/websocket/events.md
- Examples: api/websocket/examples.md
- SSE:
- Setup: api/sse/setup.md
- Events: api/sse/events.md
- Examples: api/sse/examples.md
- Development:
- Setup: development/setup.md
- Architecture: development/architecture.md
- Contributing: development/contributing.md
- Testing:
- Overview: development/testing/overview.md
- Unit Tests: development/testing/unit.md
- Integration Tests: development/testing/integration.md
- E2E Tests: development/testing/e2e.md
- Guidelines:
- Code Style: development/guidelines/code-style.md
- Documentation: development/guidelines/documentation.md
- Git Workflow: development/guidelines/git-workflow.md
- Troubleshooting:
- Common Issues: troubleshooting/common-issues.md
- FAQ: troubleshooting/faq.md
- Known Bugs: troubleshooting/known-bugs.md
- Support: troubleshooting/support.md
- About:
- License: about/license.md
- Author: about/author.md
- Changelog: about/changelog.md
- Roadmap: about/roadmap.md

View File

@@ -21,7 +21,7 @@
"profile": "bun --inspect src/index.ts",
"clean": "rm -rf dist .bun coverage",
"typecheck": "bun x tsc --noEmit",
"example:speech": "bun run examples/speech-to-text-example.ts"
"example:speech": "bun run extra/speech-to-text-example.ts"
},
"dependencies": {
"@elysiajs/cors": "^1.2.0",
@@ -31,13 +31,14 @@
"@types/sanitize-html": "^2.9.5",
"@types/ws": "^8.5.10",
"@xmldom/xmldom": "^0.9.7",
"dotenv": "^16.4.5",
"chalk": "^5.4.1",
"dotenv": "^16.4.7",
"elysia": "^1.2.11",
"helmet": "^7.1.0",
"jsonwebtoken": "^9.0.2",
"node-fetch": "^3.3.2",
"node-record-lpcm16": "^1.0.1",
"openai": "^4.82.0",
"openai": "^4.83.0",
"sanitize-html": "^2.11.0",
"typescript": "^5.3.3",
"winston": "^3.11.0",

View File

@@ -115,7 +115,7 @@ router.get("/subscribe_events", middleware.wsRateLimiter, (req, res) => {
res.writeHead(200, {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
Connection: "keep-alive",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
});

View File

@@ -12,7 +12,7 @@ export const AppConfigSchema = z.object({
.default("development"),
/** Home Assistant Configuration */
HASS_HOST: z.string().default("http://192.168.178.63:8123"),
HASS_HOST: z.string().default("http://homeassistant.local:8123"),
HASS_TOKEN: z.string().optional(),
/** Speech Features Configuration */
@@ -31,7 +31,7 @@ export const AppConfigSchema = z.object({
}),
/** Security Configuration */
JWT_SECRET: z.string().default("your-secret-key"),
JWT_SECRET: z.string().default("your-secret-key-must-be-32-char-min"),
RATE_LIMIT: z.object({
/** Time window for rate limiting in milliseconds */
windowMs: z.number().default(15 * 60 * 1000), // 15 minutes

View File

@@ -1,35 +0,0 @@
export const BOILERPLATE_CONFIG = {
configuration: {
LOG_LEVEL: {
type: "string" as const,
default: "debug",
description: "Logging level",
enum: ["error", "warn", "info", "debug", "trace"],
},
CACHE_DIRECTORY: {
type: "string" as const,
default: ".cache",
description: "Directory for cache files",
},
CONFIG_DIRECTORY: {
type: "string" as const,
default: ".config",
description: "Directory for configuration files",
},
DATA_DIRECTORY: {
type: "string" as const,
default: ".data",
description: "Directory for data files",
},
},
internal: {
boilerplate: {
configuration: {
LOG_LEVEL: "debug",
CACHE_DIRECTORY: ".cache",
CONFIG_DIRECTORY: ".config",
DATA_DIRECTORY: ".data",
},
},
},
};

View File

@@ -1,5 +1,5 @@
import { config as dotenvConfig } from "dotenv";
import fs from "fs";
import { file } from "bun";
import path from "path";
/**
@@ -15,7 +15,7 @@ const ENV_FILE_MAPPING: Record<string, string> = {
* Loads environment variables from the appropriate files based on NODE_ENV.
* First loads environment-specific file, then overrides with generic .env if it exists.
*/
export function loadEnvironmentVariables() {
export async function loadEnvironmentVariables() {
// Determine the current environment (default to 'development')
const nodeEnv = (process.env.NODE_ENV || "development").toLowerCase();
@@ -29,19 +29,29 @@ export function loadEnvironmentVariables() {
const envPath = path.resolve(process.cwd(), envFile);
// Load the environment-specific file if it exists
if (fs.existsSync(envPath)) {
dotenvConfig({ path: envPath });
console.log(`Loaded environment variables from ${envFile}`);
} else {
console.warn(`Environment-specific file ${envFile} not found.`);
try {
const envFileExists = await file(envPath).exists();
if (envFileExists) {
dotenvConfig({ path: envPath });
console.log(`Loaded environment variables from ${envFile}`);
} else {
console.warn(`Environment-specific file ${envFile} not found.`);
}
} catch (error) {
console.warn(`Error checking environment file ${envFile}:`, error);
}
// Finally, check if there is a generic .env file present
// If so, load it with the override option, so its values take precedence
const genericEnvPath = path.resolve(process.cwd(), ".env");
if (fs.existsSync(genericEnvPath)) {
dotenvConfig({ path: genericEnvPath, override: true });
console.log("Loaded and overrode with generic .env file");
try {
const genericEnvExists = await file(genericEnvPath).exists();
if (genericEnvExists) {
dotenvConfig({ path: genericEnvPath, override: true });
console.log("Loaded and overrode with generic .env file");
}
} catch (error) {
console.warn(`Error checking generic .env file:`, error);
}
}

View File

@@ -1,6 +1,4 @@
import "./polyfills.js";
import { config } from "dotenv";
import { resolve } from "path";
import { file } from "bun";
import { Elysia } from "elysia";
import { cors } from "@elysiajs/cors";
import { swagger } from "@elysiajs/swagger";
@@ -27,17 +25,23 @@ import {
} from "./commands.js";
import { speechService } from "./speech/index.js";
import { APP_CONFIG } from "./config/app.config.js";
import { loadEnvironmentVariables } from "./config/loadEnv.js";
import { MCP_SCHEMA } from "./mcp/schema.js";
import {
listDevicesTool,
controlTool,
subscribeEventsTool,
getSSEStatsTool,
automationConfigTool,
addonTool,
packageTool,
sceneTool,
notifyTool,
historyTool,
} from "./tools/index.js";
// Load environment variables based on NODE_ENV
const envFile =
process.env.NODE_ENV === "production"
? ".env"
: process.env.NODE_ENV === "test"
? ".env.test"
: ".env.development";
console.log(`Loading environment from ${envFile}`);
config({ path: resolve(process.cwd(), envFile) });
await loadEnvironmentVariables();
// Configuration
const HASS_TOKEN = process.env.HASS_TOKEN;
@@ -54,67 +58,18 @@ export interface Tool {
}
// Array to store tools
const tools: Tool[] = [];
// Define the list devices tool
const listDevicesTool: Tool = {
name: "list_devices",
description: "List all available Home Assistant devices",
parameters: z.object({}),
execute: async () => {
try {
const devices = await list_devices();
return {
success: true,
devices,
};
} catch (error) {
return {
success: false,
message:
error instanceof Error ? error.message : "Unknown error occurred",
};
}
},
};
// Add tools to the array
tools.push(listDevicesTool);
// Add the Home Assistant control tool
const controlTool: Tool = {
name: "control",
description: "Control Home Assistant devices and services",
parameters: z.object({
command: z.enum([
...commonCommands,
...coverCommands,
...climateCommands,
] as [string, ...string[]]),
entity_id: z.string().describe("The ID of the entity to control"),
}),
execute: async (params: { command: Command; entity_id: string }) => {
try {
const [domain] = params.entity_id.split(".");
await call_service(domain, params.command, {
entity_id: params.entity_id,
});
return {
success: true,
message: `Command ${params.command} executed successfully on ${params.entity_id}`,
};
} catch (error) {
return {
success: false,
message:
error instanceof Error ? error.message : "Unknown error occurred",
};
}
},
};
// Add the control tool to the array
tools.push(controlTool);
const tools: Tool[] = [
listDevicesTool,
controlTool,
subscribeEventsTool,
getSSEStatsTool,
automationConfigTool,
addonTool,
packageTool,
sceneTool,
notifyTool,
historyTool,
];
// Initialize Elysia app with middleware
const app = new Elysia()
@@ -126,11 +81,41 @@ const app = new Elysia()
.use(sanitizeInput)
.use(errorHandler);
// Health check endpoint
app.get("/health", () => ({
// Mount API routes
app.get("/api/mcp/schema", () => MCP_SCHEMA);
app.post("/api/mcp/execute", async ({ body }: { body: { name: string; parameters: Record<string, unknown> } }) => {
const { name: toolName, parameters } = body;
const tool = tools.find((t) => t.name === toolName);
if (!tool) {
return {
success: false,
message: `Tool '${toolName}' not found`,
};
}
try {
const result = await tool.execute(parameters);
return {
success: true,
result,
};
} catch (error) {
return {
success: false,
message: error instanceof Error ? error.message : "Unknown error occurred",
};
}
});
// Health check endpoint with MCP info
app.get("/api/mcp/health", () => ({
status: "ok",
timestamp: new Date().toISOString(),
version: "0.1.0",
version: "1.0.0",
mcp_version: "1.0",
supported_tools: tools.map(t => t.name),
speech_enabled: APP_CONFIG.SPEECH.ENABLED,
wake_word_enabled: APP_CONFIG.SPEECH.WAKE_WORD_ENABLED,
speech_to_text_enabled: APP_CONFIG.SPEECH.SPEECH_TO_TEXT_ENABLED,

View File

@@ -6,8 +6,26 @@ import { HassState } from "../types/index.js";
export const listDevicesTool: Tool = {
name: "list_devices",
description: "List all available Home Assistant devices",
parameters: z.object({}).describe("No parameters required"),
execute: async () => {
parameters: z.object({
domain: z.enum([
"light",
"climate",
"alarm_control_panel",
"cover",
"switch",
"contact",
"media_player",
"fan",
"lock",
"vacuum",
"scene",
"script",
"camera",
]).optional(),
area: z.string().optional(),
floor: z.string().optional(),
}).describe("Filter devices by domain, area, or floor"),
execute: async (params: { domain?: string; area?: string; floor?: string }) => {
try {
const response = await fetch(`${APP_CONFIG.HASS_HOST}/api/states`, {
headers: {
@@ -21,20 +39,87 @@ export const listDevicesTool: Tool = {
}
const states = (await response.json()) as HassState[];
const devices: Record<string, HassState[]> = {
light: states.filter(state => state.entity_id.startsWith('light.')),
climate: states.filter(state => state.entity_id.startsWith('climate.'))
let filteredStates = states;
// Apply filters
if (params.domain) {
filteredStates = filteredStates.filter(state => state.entity_id.startsWith(`${params.domain}.`));
}
if (params.area) {
filteredStates = filteredStates.filter(state => state.attributes?.area_id === params.area);
}
if (params.floor) {
filteredStates = filteredStates.filter(state => state.attributes?.floor === params.floor);
}
const devices: Record<string, HassState[]> = {};
// Group devices by domain
filteredStates.forEach(state => {
const [domain] = state.entity_id.split('.');
if (!devices[domain]) {
devices[domain] = [];
}
devices[domain].push(state);
});
// Calculate device statistics
const deviceStats = Object.entries(devices).map(([domain, entities]) => {
const activeStates = ['on', 'home', 'unlocked', 'open'];
const active = entities.filter(e => activeStates.includes(e.state)).length;
const uniqueStates = [...new Set(entities.map(e => e.state))];
return {
domain,
count: entities.length,
active,
inactive: entities.length - active,
states: uniqueStates,
sample: entities.slice(0, 2).map(e => ({
id: e.entity_id,
state: e.state,
name: e.attributes?.friendly_name || e.entity_id,
area: e.attributes?.area_id,
floor: e.attributes?.floor,
}))
};
});
const totalDevices = filteredStates.length;
const deviceTypes = Object.keys(devices);
const deviceSummary = {
total_devices: totalDevices,
device_types: deviceTypes,
by_domain: Object.fromEntries(
deviceStats.map(stat => [
stat.domain,
{
count: stat.count,
active: stat.active,
states: stat.states,
sample: stat.sample
}
])
)
};
return {
success: true,
devices,
device_summary: deviceSummary
};
} catch (error) {
console.error('Error in list devices tool:', error);
return {
success: false,
message:
error instanceof Error ? error.message : "Unknown error occurred",
message: error instanceof Error ? error.message : "Unknown error occurred",
devices: {},
device_summary: {
total_devices: 0,
device_types: [],
by_domain: {}
}
};
}
},

View File

@@ -1,66 +0,0 @@
import { afterEach, mock, expect } from "bun:test";
// Setup global mocks
global.fetch = mock(() => Promise.resolve(new Response()));
// Mock WebSocket
class MockWebSocket {
static CONNECTING = 0;
static OPEN = 1;
static CLOSING = 2;
static CLOSED = 3;
url: string;
readyState: number = MockWebSocket.CLOSED;
onopen: ((event: any) => void) | null = null;
onclose: ((event: any) => void) | null = null;
onmessage: ((event: any) => void) | null = null;
onerror: ((event: any) => void) | null = null;
constructor(url: string) {
this.url = url;
setTimeout(() => {
this.readyState = MockWebSocket.OPEN;
this.onopen?.({ type: 'open' });
}, 0);
}
send = mock((data: string) => {
if (this.readyState !== MockWebSocket.OPEN) {
throw new Error('WebSocket is not open');
}
});
close = mock(() => {
this.readyState = MockWebSocket.CLOSED;
this.onclose?.({ type: 'close', code: 1000, reason: '', wasClean: true });
});
}
// Add WebSocket to global
(global as any).WebSocket = MockWebSocket;
// Reset all mocks after each test
afterEach(() => {
mock.restore();
});
// Add custom matchers
expect.extend({
toBeValidResponse(received: Response) {
const pass = received instanceof Response && received.ok;
return {
message: () =>
`expected ${received instanceof Response ? 'Response' : typeof received} to${pass ? ' not' : ''} be a valid Response`,
pass
};
},
toBeValidWebSocket(received: any) {
const pass = received instanceof MockWebSocket;
return {
message: () =>
`expected ${received instanceof MockWebSocket ? 'MockWebSocket' : typeof received} to${pass ? ' not' : ''} be a valid WebSocket`,
pass
};
}
});