feat: Enhance speech and AI configuration with advanced environment settings

- Update `.env.example` with comprehensive speech and AI configuration options
- Modify Docker Compose speech configuration for more flexible audio and ASR settings
- Enhance Dockerfile to support Python virtual environment and speech dependencies
- Refactor environment loading to use Bun's file system utilities
- Improve device listing tool with more detailed device statistics
- Add support for multiple AI models and dynamic configuration
This commit is contained in:
jango-blockchained
2025-02-10 03:28:58 +01:00
parent 986b1949cd
commit b6bd53b01a
10 changed files with 764 additions and 283 deletions

View File

@@ -3,6 +3,7 @@ NODE_ENV=development
PORT=3000 PORT=3000
DEBUG=false DEBUG=false
LOG_LEVEL=info LOG_LEVEL=info
MCP_SERVER=http://localhost:3000
# Home Assistant Configuration # Home Assistant Configuration
HASS_HOST=http://homeassistant.local:8123 HASS_HOST=http://homeassistant.local:8123
@@ -40,18 +41,18 @@ MAX_REQUEST_SIZE=1048576
MAX_REQUEST_FIELDS=1000 MAX_REQUEST_FIELDS=1000
# AI Configuration # AI Configuration
PROCESSOR_TYPE=claude PROCESSOR_TYPE=openai
OPENAI_API_KEY=your_openai_api_key OPENAI_API_KEY=your_openai_api_key
OPENAI_MODEL=gpt-3.5-turbo OPENAI_MODEL=gpt-3.5-turbo
MAX_RETRIES=3 MAX_RETRIES=3
ANALYSIS_TIMEOUT=30000 ANALYSIS_TIMEOUT=30000
# Speech Features Configuration # Speech Features Configuration
ENABLE_SPEECH_FEATURES=false ENABLE_SPEECH_FEATURES=true
ENABLE_WAKE_WORD=false ENABLE_WAKE_WORD=true
ENABLE_SPEECH_TO_TEXT=false ENABLE_SPEECH_TO_TEXT=true
WHISPER_MODEL_PATH=/models WHISPER_MODEL_PATH=/models
WHISPER_MODEL_TYPE=tiny WHISPER_MODEL_TYPE=base
# Audio Configuration # Audio Configuration
NOISE_THRESHOLD=0.05 NOISE_THRESHOLD=0.05
@@ -62,6 +63,13 @@ CHANNELS=1
CHUNK_SIZE=1024 CHUNK_SIZE=1024
PULSE_SERVER=unix:/run/user/1000/pulse/native PULSE_SERVER=unix:/run/user/1000/pulse/native
# Whisper Configuration
ASR_MODEL=base
ASR_ENGINE=faster_whisper
WHISPER_BEAM_SIZE=5
COMPUTE_TYPE=float32
LANGUAGE=en
# SSE Configuration # SSE Configuration
SSE_MAX_CLIENTS=50 SSE_MAX_CLIENTS=50
SSE_RECONNECT_TIMEOUT=5000 SSE_RECONNECT_TIMEOUT=5000
@@ -78,5 +86,11 @@ TEST_PORT=3001
# Version # Version
VERSION=0.1.0 VERSION=0.1.0
# Advanced (Docker) # Docker Configuration
COMPOSE_PROJECT_NAME=mcp COMPOSE_PROJECT_NAME=mcp
# Resource Limits
FAST_WHISPER_CPU_LIMIT=4.0
FAST_WHISPER_MEMORY_LIMIT=2G
MCP_CPU_LIMIT=1.0
MCP_MEMORY_LIMIT=512M

View File

@@ -11,10 +11,33 @@ RUN npm install -g bun@1.0.25
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \ ca-certificates \
curl \ curl \
pulseaudio \
alsa-utils \
python3-full \
python3-pip \
python3-dev \
python3-venv \
portaudio19-dev \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/cache/apt/* && rm -rf /var/cache/apt/*
# Create and activate virtual environment
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
# Upgrade pip in virtual environment
RUN /opt/venv/bin/python -m pip install --upgrade pip
# Install Python packages in virtual environment
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
numpy \
sounddevice \
openwakeword \
faster-whisper \
requests
# Set build-time environment variables # Set build-time environment variables
ENV NODE_ENV=production \ ENV NODE_ENV=production \
NODE_OPTIONS="--max-old-space-size=2048" \ NODE_OPTIONS="--max-old-space-size=2048" \
@@ -38,23 +61,69 @@ FROM node:20-slim as runner
# Install bun in production image # Install bun in production image
RUN npm install -g bun@1.0.25 RUN npm install -g bun@1.0.25
# Install runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
pulseaudio \
alsa-utils \
libasound2 \
libasound2-plugins \
python3-full \
python3-pip \
python3-dev \
python3-venv \
portaudio19-dev \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean \
&& rm -rf /var/cache/apt/*
# Configure ALSA
COPY docker/speech/asound.conf /etc/asound.conf
# Create and activate virtual environment
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
# Upgrade pip in virtual environment
RUN /opt/venv/bin/python -m pip install --upgrade pip
# Install Python packages in virtual environment
RUN /opt/venv/bin/python -m pip install --no-cache-dir \
numpy \
sounddevice \
openwakeword \
faster-whisper \
requests
# Set Python path to use virtual environment
ENV PYTHONPATH="/opt/venv/lib/python3.11/site-packages:$PYTHONPATH"
# Set production environment variables # Set production environment variables
ENV NODE_ENV=production \ ENV NODE_ENV=production \
NODE_OPTIONS="--max-old-space-size=1024" NODE_OPTIONS="--max-old-space-size=1024"
# Create a non-root user # Create a non-root user and add to audio group
RUN addgroup --system --gid 1001 nodejs && \ RUN addgroup --system --gid 1001 nodejs && \
adduser --system --uid 1001 bunjs adduser --system --uid 1001 --gid 1001 bunjs && \
adduser bunjs audio
WORKDIR /app WORKDIR /app
# Copy Python virtual environment from builder
COPY --from=builder --chown=bunjs:nodejs /opt/venv /opt/venv
# Copy source files
COPY --chown=bunjs:nodejs . .
# Copy only the necessary files from builder # Copy only the necessary files from builder
COPY --from=builder --chown=bunjs:nodejs /app/dist ./dist COPY --from=builder --chown=bunjs:nodejs /app/dist ./dist
COPY --from=builder --chown=bunjs:nodejs /app/node_modules ./node_modules COPY --from=builder --chown=bunjs:nodejs /app/node_modules ./node_modules
COPY --chown=bunjs:nodejs package.json ./
# Create logs directory with proper permissions # Ensure audio setup script is executable
RUN mkdir -p /app/logs && chown -R bunjs:nodejs /app/logs RUN chmod +x /app/docker/speech/setup-audio.sh
# Create logs and audio directories with proper permissions
RUN mkdir -p /app/logs /app/audio && chown -R bunjs:nodejs /app/logs /app/audio
# Switch to non-root user # Switch to non-root user
USER bunjs USER bunjs
@@ -66,5 +135,5 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
# Expose port # Expose port
EXPOSE ${PORT:-4000} EXPOSE ${PORT:-4000}
# Start the application with optimized flags # Start the application with audio setup
CMD ["bun", "--smol", "run", "start"] CMD ["/bin/bash", "-c", "/app/docker/speech/setup-audio.sh & bun --smol run start"]

View File

@@ -4,17 +4,27 @@ services:
homeassistant-mcp: homeassistant-mcp:
image: homeassistant-mcp:latest image: homeassistant-mcp:latest
environment: environment:
# Speech Feature Flags
- ENABLE_SPEECH_FEATURES=${ENABLE_SPEECH_FEATURES:-true} - ENABLE_SPEECH_FEATURES=${ENABLE_SPEECH_FEATURES:-true}
- ENABLE_WAKE_WORD=${ENABLE_WAKE_WORD:-true} - ENABLE_WAKE_WORD=${ENABLE_WAKE_WORD:-true}
- ENABLE_SPEECH_TO_TEXT=${ENABLE_SPEECH_TO_TEXT:-true} - ENABLE_SPEECH_TO_TEXT=${ENABLE_SPEECH_TO_TEXT:-true}
# Audio Configuration
- NOISE_THRESHOLD=${NOISE_THRESHOLD:-0.05}
- MIN_SPEECH_DURATION=${MIN_SPEECH_DURATION:-1.0}
- SILENCE_DURATION=${SILENCE_DURATION:-0.5}
- SAMPLE_RATE=${SAMPLE_RATE:-16000}
- CHANNELS=${CHANNELS:-1}
- CHUNK_SIZE=${CHUNK_SIZE:-1024}
- PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
fast-whisper: fast-whisper:
image: onerahmet/openai-whisper-asr-webservice:latest image: onerahmet/openai-whisper-asr-webservice:latest
volumes: volumes:
- whisper-models:/models - whisper-models:/models
- audio-data:/audio - audio-data:/audio
environment: environment:
- ASR_MODEL=base - ASR_MODEL=${WHISPER_MODEL_TYPE:-base}
- ASR_ENGINE=faster_whisper - ASR_ENGINE=faster_whisper
- WHISPER_BEAM_SIZE=5 - WHISPER_BEAM_SIZE=5
- COMPUTE_TYPE=float32 - COMPUTE_TYPE=float32
@@ -27,7 +37,7 @@ services:
cpus: '4.0' cpus: '4.0'
memory: 2G memory: 2G
healthcheck: healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:9000/asr/health" ] test: [ "CMD", "curl", "-f", "http://localhost:9000/health" ]
interval: 30s interval: 30s
timeout: 10s timeout: 10s
retries: 3 retries: 3
@@ -40,10 +50,23 @@ services:
volumes: volumes:
- /run/user/1000/pulse/native:/run/user/1000/pulse/native - /run/user/1000/pulse/native:/run/user/1000/pulse/native
environment: environment:
- PULSE_SERVER=unix:/run/user/1000/pulse/native - PULSE_SERVER=${PULSE_SERVER:-unix:/run/user/1000/pulse/native}
- PULSE_COOKIE=/run/user/1000/pulse/cookie
- PYTHONUNBUFFERED=1
- OPENWAKEWORD_MODEL=hey_jarvis
- OPENWAKEWORD_THRESHOLD=0.5
- MICROPHONE_COMMAND=arecord -D hw:0,0 -f S16_LE -c 1 -r 16000 -t raw
group_add: group_add:
- audio - "${AUDIO_GID:-29}"
network_mode: host network_mode: host
privileged: true
entrypoint: >
/bin/bash -c " apt-get update && apt-get install -y pulseaudio alsa-utils && rm -rf /var/lib/apt/lists/* && /run.sh"
healthcheck:
test: [ "CMD-SHELL", "pactl info > /dev/null 2>&1 || exit 1" ]
interval: 30s
timeout: 10s
retries: 3
volumes: volumes:
whisper-models: whisper-models:

35
docker/speech/asound.conf Normal file
View File

@@ -0,0 +1,35 @@
pcm.!default {
type pulse
fallback "sysdefault"
hint {
show on
description "Default ALSA Output (currently PulseAudio Sound Server)"
}
}
ctl.!default {
type pulse
fallback "sysdefault"
}
# Use PulseAudio by default
pcm.pulse {
type pulse
}
ctl.pulse {
type pulse
}
# Explicit device for recording
pcm.microphone {
type hw
card 0
device 0
}
# Default capture device
pcm.!default {
type pulse
hint.description "Default Audio Device"
}

View File

@@ -30,6 +30,9 @@ MAX_MODEL_LOAD_RETRIES = 3
MODEL_LOAD_RETRY_DELAY = 5 # seconds MODEL_LOAD_RETRY_DELAY = 5 # seconds
MODEL_DOWNLOAD_TIMEOUT = 600 # 10 minutes timeout for model download MODEL_DOWNLOAD_TIMEOUT = 600 # 10 minutes timeout for model download
# ALSA device configuration
AUDIO_DEVICE = 'hw:0,0' # Use ALSA hardware device directly
# Audio processing parameters # Audio processing parameters
NOISE_THRESHOLD = 0.08 # Increased threshold for better noise filtering NOISE_THRESHOLD = 0.08 # Increased threshold for better noise filtering
MIN_SPEECH_DURATION = 2.0 # Longer minimum duration to avoid fragments MIN_SPEECH_DURATION = 2.0 # Longer minimum duration to avoid fragments
@@ -44,7 +47,7 @@ WAKE_WORD_ENABLED = os.environ.get('ENABLE_WAKE_WORD', 'false').lower() == 'true
SPEECH_ENABLED = os.environ.get('ENABLE_SPEECH_FEATURES', 'true').lower() == 'true' SPEECH_ENABLED = os.environ.get('ENABLE_SPEECH_FEATURES', 'true').lower() == 'true'
# Wake word models to use (only if wake word is enabled) # Wake word models to use (only if wake word is enabled)
WAKE_WORDS = ["alexa"] # Using 'alexa' as temporary replacement for 'gaja' WAKE_WORDS = ["hey_jarvis"] # Using hey_jarvis as it's more similar to "hey gaja"
WAKE_WORD_ALIAS = "gaja" # What we print when wake word is detected WAKE_WORD_ALIAS = "gaja" # What we print when wake word is detected
# Home Assistant Configuration # Home Assistant Configuration
@@ -235,7 +238,22 @@ class AudioProcessor:
self.buffer = np.zeros(SAMPLE_RATE * BUFFER_DURATION) self.buffer = np.zeros(SAMPLE_RATE * BUFFER_DURATION)
self.buffer_lock = threading.Lock() self.buffer_lock = threading.Lock()
self.last_transcription_time = 0 self.last_transcription_time = 0
self.stream = None
try:
logger.info(f"Opening audio device: {AUDIO_DEVICE}")
self.stream = sd.InputStream(
device=AUDIO_DEVICE,
samplerate=SAMPLE_RATE,
channels=CHANNELS,
dtype=np.int16,
blocksize=CHUNK_SIZE,
callback=self._audio_callback
)
logger.info("Audio stream initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize audio stream: {e}")
raise
self.speech_detected = False self.speech_detected = False
self.silence_frames = 0 self.silence_frames = 0
self.speech_frames = 0 self.speech_frames = 0
@@ -272,7 +290,7 @@ class AudioProcessor:
return True return True
return False return False
def audio_callback(self, indata, frames, time, status): def _audio_callback(self, indata, frames, time, status):
"""Callback for audio input""" """Callback for audio input"""
if status: if status:
logger.warning(f"Audio callback status: {status}") logger.warning(f"Audio callback status: {status}")
@@ -382,7 +400,7 @@ class AudioProcessor:
channels=CHANNELS, channels=CHANNELS,
samplerate=SAMPLE_RATE, samplerate=SAMPLE_RATE,
blocksize=CHUNK_SIZE, blocksize=CHUNK_SIZE,
callback=self.audio_callback callback=self._audio_callback
): ):
logger.info("Audio input stream started successfully") logger.info("Audio input stream started successfully")
logger.info("Listening for audio input...") logger.info("Listening for audio input...")

View File

@@ -4,8 +4,6 @@ import { DOMParser, Element, Document } from '@xmldom/xmldom';
import dotenv from 'dotenv'; import dotenv from 'dotenv';
import readline from 'readline'; import readline from 'readline';
import chalk from 'chalk'; import chalk from 'chalk';
import express from 'express';
import bodyParser from 'body-parser';
// Load environment variables // Load environment variables
dotenv.config(); dotenv.config();
@@ -118,9 +116,8 @@ interface ModelConfig {
// Update model listing to filter based on API key availability // Update model listing to filter based on API key availability
const AVAILABLE_MODELS: ModelConfig[] = [ const AVAILABLE_MODELS: ModelConfig[] = [
// OpenAI models always available // OpenAI models always available
{ name: 'gpt-4o', maxTokens: 4096, contextWindow: 128000 }, { name: 'gpt-4', maxTokens: 8192, contextWindow: 8192 },
{ name: 'gpt-4-turbo', maxTokens: 4096, contextWindow: 128000 }, { name: 'gpt-4-turbo-preview', maxTokens: 4096, contextWindow: 128000 },
{ name: 'gpt-4', maxTokens: 8192, contextWindow: 128000 },
{ name: 'gpt-3.5-turbo', maxTokens: 4096, contextWindow: 16385 }, { name: 'gpt-3.5-turbo', maxTokens: 4096, contextWindow: 16385 },
{ name: 'gpt-3.5-turbo-16k', maxTokens: 16385, contextWindow: 16385 }, { name: 'gpt-3.5-turbo-16k', maxTokens: 16385, contextWindow: 16385 },
@@ -151,18 +148,12 @@ const logger = {
// Update default model selection in loadConfig // Update default model selection in loadConfig
function loadConfig(): AppConfig { function loadConfig(): AppConfig {
// Use environment variable or default to gpt-4o // Always use gpt-4 for now
const defaultModelName = process.env.OPENAI_MODEL || 'gpt-4o'; const defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4') || AVAILABLE_MODELS[0];
let defaultModel = AVAILABLE_MODELS.find(m => m.name === defaultModelName);
// If the configured model isn't found, use gpt-4o without warning
if (!defaultModel) {
defaultModel = AVAILABLE_MODELS.find(m => m.name === 'gpt-4o') || AVAILABLE_MODELS[0];
}
return { return {
mcpServer: process.env.MCP_SERVER || 'http://localhost:3000', mcpServer: process.env.MCP_SERVER || 'http://localhost:3000',
openaiModel: defaultModel.name, // Use the resolved model name openaiModel: defaultModel.name,
maxRetries: parseInt(process.env.MAX_RETRIES || '3'), maxRetries: parseInt(process.env.MAX_RETRIES || '3'),
analysisTimeout: parseInt(process.env.ANALYSIS_TIMEOUT || '30000'), analysisTimeout: parseInt(process.env.ANALYSIS_TIMEOUT || '30000'),
selectedModel: defaultModel selectedModel: defaultModel
@@ -194,8 +185,8 @@ async function executeMcpTool(toolName: string, parameters: Record<string, any>
const controller = new AbortController(); const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), config.analysisTimeout); const timeoutId = setTimeout(() => controller.abort(), config.analysisTimeout);
// Update endpoint URL to use the same base path as schema // Update endpoint URL to use the correct API path
const endpoint = `${config.mcpServer}/mcp/execute`; const endpoint = `${config.mcpServer}/api/mcp/execute`;
const response = await fetch(endpoint, { const response = await fetch(endpoint, {
method: "POST", method: "POST",
@@ -258,43 +249,117 @@ function isMcpExecuteResponse(obj: any): obj is McpExecuteResponse {
(obj.success === true || typeof obj.message === 'string'); (obj.success === true || typeof obj.message === 'string');
} }
// Add mock data for testing
const MOCK_HA_INFO = {
devices: {
light: [
{ entity_id: 'light.living_room', state: 'on', attributes: { friendly_name: 'Living Room Light', brightness: 255 } },
{ entity_id: 'light.kitchen', state: 'off', attributes: { friendly_name: 'Kitchen Light', brightness: 0 } }
],
switch: [
{ entity_id: 'switch.tv', state: 'off', attributes: { friendly_name: 'TV Power' } }
],
sensor: [
{ entity_id: 'sensor.temperature', state: '21.5', attributes: { friendly_name: 'Living Room Temperature', unit_of_measurement: '°C' } },
{ entity_id: 'sensor.humidity', state: '45', attributes: { friendly_name: 'Living Room Humidity', unit_of_measurement: '%' } }
],
climate: [
{ entity_id: 'climate.thermostat', state: 'heat', attributes: { friendly_name: 'Main Thermostat', current_temperature: 20, target_temp_high: 24 } }
]
}
};
interface HassState {
entity_id: string;
state: string;
attributes: Record<string, any>;
last_changed: string;
last_updated: string;
}
interface ServiceInfo {
name: string;
description: string;
fields: Record<string, any>;
}
interface ServiceDomain {
domain: string;
services: Record<string, ServiceInfo>;
}
/** /**
* Collects comprehensive information about the Home Assistant instance using MCP tools * Collects comprehensive information about the Home Assistant instance using MCP tools
*/ */
async function collectHomeAssistantInfo(): Promise<any> { async function collectHomeAssistantInfo(): Promise<any> {
const info: Record<string, any> = {}; const info: Record<string, any> = {};
const config = loadConfig(); const hassHost = process.env.HASS_HOST;
// Update schema endpoint to be consistent try {
const schemaResponse = await fetch(`${config.mcpServer}/mcp`, { // Check if we're in test mode
if (process.env.HA_TEST_MODE === '1') {
logger.info("Running in test mode with mock data");
return MOCK_HA_INFO;
}
// Get states from Home Assistant directly
const statesResponse = await fetch(`${hassHost}/api/states`, {
headers: { headers: {
'Authorization': `Bearer ${hassToken}`, 'Authorization': `Bearer ${hassToken}`,
'Accept': 'application/json' 'Content-Type': 'application/json'
} }
}); });
if (!schemaResponse.ok) { if (!statesResponse.ok) {
console.error(`Failed to fetch MCP schema: ${schemaResponse.status}`); throw new Error(`Failed to fetch states: ${statesResponse.status}`);
return info;
} }
const schema = await schemaResponse.json() as McpSchema; const states = await statesResponse.json() as HassState[];
console.log("Available tools:", schema.tools.map(t => t.name));
// Execute list_devices to get basic device information // Group devices by domain
console.log("Fetching device information..."); const devices: Record<string, HassState[]> = {};
try { for (const state of states) {
const deviceInfo = await executeMcpTool('list_devices'); const [domain] = state.entity_id.split('.');
if (deviceInfo && deviceInfo.success && deviceInfo.devices) { if (!devices[domain]) {
info.devices = deviceInfo.devices; devices[domain] = [];
}
devices[domain].push(state);
}
info.devices = devices;
info.device_summary = {
total_devices: states.length,
device_types: Object.keys(devices),
by_domain: Object.fromEntries(
Object.entries(devices).map(([domain, items]) => [domain, items.length])
)
};
const deviceCount = states.length;
const domainCount = Object.keys(devices).length;
if (deviceCount > 0) {
logger.success(`Found ${deviceCount} devices across ${domainCount} domains`);
} else { } else {
console.warn(`Failed to list devices: ${deviceInfo?.message || 'Unknown error'}`); logger.warn('No devices found in Home Assistant');
}
} catch (error) {
console.warn("Error fetching devices:", error);
} }
return info; return info;
} catch (error) {
logger.error(`Error fetching devices: ${error instanceof Error ? error.message : 'Unknown error'}`);
if (process.env.HA_TEST_MODE !== '1') {
logger.warn(`Failed to connect to Home Assistant. Run with HA_TEST_MODE=1 to use test data.`);
return {
devices: {},
device_summary: {
total_devices: 0,
device_types: [],
by_domain: {}
}
};
}
return MOCK_HA_INFO;
}
} }
/** /**
@@ -401,31 +466,66 @@ function getRelevantDeviceTypes(prompt: string): string[] {
* Generates analysis and recommendations using the OpenAI API based on the Home Assistant data * Generates analysis and recommendations using the OpenAI API based on the Home Assistant data
*/ */
async function generateAnalysis(haInfo: any): Promise<SystemAnalysis> { async function generateAnalysis(haInfo: any): Promise<SystemAnalysis> {
const openai = getOpenAIClient();
const config = loadConfig(); const config = loadConfig();
// Compress and summarize the data // If in test mode, return mock analysis
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : []; if (process.env.HA_TEST_MODE === '1') {
const deviceSummary = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, any>, [domain, devices]) => { logger.info("Generating mock analysis...");
const deviceList = devices as any[]; return {
acc[domain] = { overview: {
count: deviceList.length, state: ["System running normally", "4 device types detected"],
active: deviceList.filter(d => d.state === 'on' || d.state === 'home').length, health: ["All systems operational", "No critical issues found"],
states: [...new Set(deviceList.map(d => d.state))], configurations: ["Basic configuration detected", "Default settings in use"],
sample: deviceList.slice(0, 2).map(d => ({ integrations: ["Light", "Switch", "Sensor", "Climate"],
id: d.entity_id, issues: ["No major issues detected"]
state: d.state, },
name: d.attributes?.friendly_name performance: {
})) resource_usage: ["Normal CPU usage", "Memory usage within limits"],
response_times: ["Average response time: 0.5s"],
optimization_areas: ["Consider grouping lights by room"]
},
security: {
current_measures: ["Basic security measures in place"],
vulnerabilities: ["No critical vulnerabilities detected"],
recommendations: ["Enable 2FA if not already enabled"]
},
optimization: {
performance_suggestions: ["Group frequently used devices"],
config_optimizations: ["Consider creating room-based views"],
integration_improvements: ["Add friendly names to all entities"],
automation_opportunities: ["Create morning/evening routines"]
},
maintenance: {
required_updates: ["No critical updates pending"],
cleanup_tasks: ["Remove unused entities"],
regular_tasks: ["Check sensor battery levels"]
},
entity_usage: {
most_active: ["light.living_room", "sensor.temperature"],
rarely_used: ["switch.tv"],
potential_duplicates: []
},
automation_analysis: {
inefficient_automations: [],
potential_improvements: ["Add time-based light controls"],
suggested_blueprints: ["Motion-activated lighting"],
condition_optimizations: []
},
energy_management: {
high_consumption: ["No high consumption devices detected"],
monitoring_suggestions: ["Add power monitoring to main appliances"],
tariff_optimizations: ["Consider time-of-use automation"]
}
}; };
return acc; }
}, {}) : {};
// Original analysis code for non-test mode
const openai = getOpenAIClient();
const systemSummary = { const systemSummary = {
total_devices: deviceTypes.reduce((sum, type) => sum + deviceSummary[type].count, 0), total_devices: haInfo.device_summary?.total_devices || 0,
device_types: deviceTypes, device_types: haInfo.device_summary?.device_types || [],
device_summary: deviceSummary, device_summary: haInfo.device_summary?.by_domain || {}
active_devices: Object.values(deviceSummary).reduce((sum: number, info: any) => sum + info.active, 0)
}; };
const prompt = `Analyze this Home Assistant system and provide insights in XML format: const prompt = `Analyze this Home Assistant system and provide insights in XML format:
@@ -578,100 +678,92 @@ Generate your response in this EXACT format:
} }
} }
async function getUserInput(question: string): Promise<string> { interface AutomationConfig {
const rl = readline.createInterface({ id?: string;
input: process.stdin, alias?: string;
output: process.stdout description?: string;
}); trigger?: Array<{
platform: string;
return new Promise((resolve) => { [key: string]: any;
rl.question(question, (answer) => { }>;
rl.close(); condition?: Array<{
resolve(answer); condition: string;
}); [key: string]: any;
}); }>;
action?: Array<{
service?: string;
[key: string]: any;
}>;
mode?: string;
} }
// Update chunk size calculation
const MAX_CHARACTERS = 8000; // ~2000 tokens (4 chars/token)
// Update model handling in retry
async function handleCustomPrompt(haInfo: any): Promise<void> {
try {
// Add device metadata
const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
acc[domain] = (devices as any[]).length;
return acc;
}, {}) : {};
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
const userPrompt = await getUserInput("Enter your custom prompt: ");
if (!userPrompt) {
console.log("No prompt provided. Exiting...");
return;
}
const openai = getOpenAIClient();
const config = loadConfig();
const completion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
Current system has ${totalDevices} devices across ${deviceTypes.length} types: ${JSON.stringify(deviceStates)}`
},
{ role: "user", content: userPrompt },
],
max_tokens: config.selectedModel.maxTokens,
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(completion.choices[0].message?.content || "No response generated");
} catch (error) {
console.error("Error processing custom prompt:", error);
// Retry with simplified prompt if there's an error
try {
const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
const openai = getOpenAIClient();
const config = loadConfig();
const retryCompletion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
},
{ role: "user", content: retryPrompt },
],
max_tokens: config.selectedModel.maxTokens,
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(retryCompletion.choices[0].message?.content || "No response generated");
} catch (retryError) {
console.error("Error during retry:", retryError);
}
}
}
// Update automation handling
async function handleAutomationOptimization(haInfo: any): Promise<void> { async function handleAutomationOptimization(haInfo: any): Promise<void> {
try { try {
const result = await executeMcpTool('automation', { action: 'list' }); const hassHost = process.env.HASS_HOST;
if (!result?.success) {
logger.error(`Failed to retrieve automations: ${result?.message || 'Unknown error'}`); // Get automations directly from Home Assistant
return; const automationsResponse = await fetch(`${hassHost}/api/states`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Content-Type': 'application/json'
}
});
if (!automationsResponse.ok) {
throw new Error(`Failed to fetch automations: ${automationsResponse.status}`);
} }
const automations = result.automations || []; const states = await automationsResponse.json() as HassState[];
const automations = states.filter(state => state.entity_id.startsWith('automation.'));
// Get services to understand what actions are available
const servicesResponse = await fetch(`${hassHost}/api/services`, {
headers: {
'Authorization': `Bearer ${hassToken}`,
'Content-Type': 'application/json'
}
});
let availableServices: Record<string, any> = {};
if (servicesResponse.ok) {
const services = await servicesResponse.json() as ServiceDomain[];
availableServices = services.reduce((acc: Record<string, any>, service: ServiceDomain) => {
if (service.domain && service.services) {
acc[service.domain] = service.services;
}
return acc;
}, {});
logger.debug(`Retrieved services from ${Object.keys(availableServices).length} domains`);
}
// Enrich automation data with service information
const enrichedAutomations = automations.map(automation => {
const actions = automation.attributes?.action || [];
const enrichedActions = actions.map((action: any) => {
if (action.service) {
const [domain, service] = action.service.split('.');
const serviceInfo = availableServices[domain]?.[service];
return {
...action,
service_info: serviceInfo
};
}
return action;
});
return {
...automation,
config: {
id: automation.entity_id.split('.')[1],
alias: automation.attributes?.friendly_name,
trigger: automation.attributes?.trigger || [],
condition: automation.attributes?.condition || [],
action: enrichedActions,
mode: automation.attributes?.mode || 'single'
}
};
});
if (automations.length === 0) { if (automations.length === 0) {
console.log(chalk.bold.underline("\nAutomation Optimization Report")); console.log(chalk.bold.underline("\nAutomation Optimization Report"));
console.log(chalk.yellow("No automations found in the system. Consider creating some automations to improve your Home Assistant experience.")); console.log(chalk.yellow("No automations found in the system. Consider creating some automations to improve your Home Assistant experience."));
@@ -679,7 +771,7 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
} }
logger.info(`Analyzing ${automations.length} automations...`); logger.info(`Analyzing ${automations.length} automations...`);
const optimizationXml = await analyzeAutomations(automations); const optimizationXml = await analyzeAutomations(enrichedAutomations);
const parser = new DOMParser(); const parser = new DOMParser();
const xmlDoc = parser.parseFromString(optimizationXml, "text/xml"); const xmlDoc = parser.parseFromString(optimizationXml, "text/xml");
@@ -721,51 +813,85 @@ async function handleAutomationOptimization(haInfo: any): Promise<void> {
} }
} }
// Add new automation optimization function
async function analyzeAutomations(automations: any[]): Promise<string> { async function analyzeAutomations(automations: any[]): Promise<string> {
const openai = getOpenAIClient(); const openai = getOpenAIClient();
const config = loadConfig(); const config = loadConfig();
// Compress automation data by only including essential fields // Create a more detailed summary of automations
const compressedAutomations = automations.map(automation => ({ const automationSummary = {
id: automation.entity_id, total: automations.length,
name: automation.attributes?.friendly_name || automation.entity_id, active: automations.filter(a => a.state === 'on').length,
state: automation.state, by_type: automations.reduce((acc: Record<string, number>, auto) => {
last_triggered: automation.attributes?.last_triggered, const type = auto.attributes?.mode || 'single';
mode: automation.attributes?.mode, acc[type] = (acc[type] || 0) + 1;
trigger_count: automation.attributes?.trigger?.length || 0, return acc;
action_count: automation.attributes?.action?.length || 0 }, {}),
})); recently_triggered: automations.filter(a => {
const lastTriggered = a.attributes?.last_triggered;
if (!lastTriggered) return false;
const lastTriggerDate = new Date(lastTriggered);
const oneDayAgo = new Date();
oneDayAgo.setDate(oneDayAgo.getDate() - 1);
return lastTriggerDate > oneDayAgo;
}).length,
trigger_types: automations.reduce((acc: Record<string, number>, auto) => {
const triggers = auto.config?.trigger || [];
triggers.forEach((trigger: any) => {
const type = trigger.platform || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
action_types: automations.reduce((acc: Record<string, number>, auto) => {
const actions = auto.config?.action || [];
actions.forEach((action: any) => {
const type = action.service?.split('.')[0] || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
service_domains: Array.from(new Set(automations.flatMap(auto =>
(auto.config?.action || [])
.map((action: any) => action.service?.split('.')[0])
.filter(Boolean)
))).sort(),
names: automations.map(a => a.attributes?.friendly_name || a.entity_id.split('.')[1]).slice(0, 10)
};
const prompt = `Analyze these Home Assistant automations and provide optimization suggestions in XML format: const prompt = `Analyze these Home Assistant automations and provide optimization suggestions in XML format:
${JSON.stringify(compressedAutomations, null, 2)} ${JSON.stringify(automationSummary, null, 2)}
Key metrics:
- Total automations: ${automationSummary.total}
- Active automations: ${automationSummary.active}
- Recently triggered: ${automationSummary.recently_triggered}
- Automation modes: ${JSON.stringify(automationSummary.by_type)}
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
- Action types: ${JSON.stringify(automationSummary.action_types)}
- Service domains used: ${automationSummary.service_domains.join(', ')}
Generate your response in this EXACT format: Generate your response in this EXACT format:
<analysis> <analysis>
<findings> <findings>
<item>Finding 1</item> <item>Finding 1</item>
<item>Finding 2</item> <item>Finding 2</item>
<!-- Add more findings as needed -->
</findings> </findings>
<recommendations> <recommendations>
<item>Recommendation 1</item> <item>Recommendation 1</item>
<item>Recommendation 2</item> <item>Recommendation 2</item>
<!-- Add more recommendations as needed -->
</recommendations> </recommendations>
<blueprints> <blueprints>
<item>Blueprint suggestion 1</item> <item>Blueprint suggestion 1</item>
<item>Blueprint suggestion 2</item> <item>Blueprint suggestion 2</item>
<!-- Add more blueprint suggestions as needed -->
</blueprints> </blueprints>
</analysis> </analysis>
If no optimizations are needed, return empty item lists but maintain the XML structure.
Focus on: Focus on:
1. Identifying patterns and potential improvements 1. Identifying patterns and potential improvements based on trigger and action types
2. Suggesting energy-saving optimizations 2. Suggesting energy-saving optimizations based on the services being used
3. Recommending error handling improvements 3. Recommending error handling improvements
4. Suggesting relevant blueprints`; 4. Suggesting relevant blueprints for common automation patterns
5. Analyzing the distribution of automation types and suggesting optimizations`;
try { try {
const completion = await openai.chat.completions.create({ const completion = await openai.chat.completions.create({
@@ -773,12 +899,12 @@ Focus on:
messages: [ messages: [
{ {
role: "system", role: "system",
content: "You are a Home Assistant automation expert. Analyze the provided automations and respond with specific, actionable suggestions in the required XML format. If no optimizations are needed, return empty item lists but maintain the XML structure." content: "You are a Home Assistant automation expert. Analyze the provided automation summary and respond with specific, actionable suggestions in the required XML format."
}, },
{ role: "user", content: prompt } { role: "user", content: prompt }
], ],
temperature: 0.2, temperature: 0.2,
max_tokens: Math.min(config.selectedModel.maxTokens, 4000) max_tokens: Math.min(config.selectedModel.maxTokens, 2048)
}); });
const response = completion.choices[0].message?.content || ""; const response = completion.choices[0].message?.content || "";
@@ -819,62 +945,164 @@ Focus on:
} }
} }
// Update model selection prompt count dynamically // Add new handleCustomPrompt function
async function selectModel(): Promise<ModelConfig> { async function handleCustomPrompt(haInfo: any, customPrompt: string): Promise<void> {
console.log(chalk.bold.underline("\nAvailable Models:")); try {
AVAILABLE_MODELS.forEach((model, index) => { // Add device metadata
console.log( const deviceTypes = haInfo.devices ? Object.keys(haInfo.devices) : [];
`${index + 1}. ${chalk.blue(model.name.padEnd(20))} ` + const deviceStates = haInfo.devices ? Object.entries(haInfo.devices).reduce((acc: Record<string, number>, [domain, devices]) => {
`Context: ${chalk.yellow(model.contextWindow.toLocaleString().padStart(6))} tokens | ` + acc[domain] = (devices as any[]).length;
`Max output: ${chalk.green(model.maxTokens.toLocaleString().padStart(5))} tokens` return acc;
); }, {}) : {};
const totalDevices = deviceTypes.reduce((sum, type) => sum + deviceStates[type], 0);
// Get automation information
const automations = haInfo.devices?.automation || [];
const automationDetails = automations.map((auto: any) => ({
name: auto.attributes?.friendly_name || auto.entity_id.split('.')[1],
state: auto.state,
last_triggered: auto.attributes?.last_triggered,
mode: auto.attributes?.mode,
triggers: auto.attributes?.trigger?.map((t: any) => ({
platform: t.platform,
...t
})) || [],
conditions: auto.attributes?.condition?.map((c: any) => ({
condition: c.condition,
...c
})) || [],
actions: auto.attributes?.action?.map((a: any) => ({
service: a.service,
...a
})) || []
}));
const automationSummary = {
total: automations.length,
active: automations.filter((a: any) => a.state === 'on').length,
trigger_types: automations.reduce((acc: Record<string, number>, auto: any) => {
const triggers = auto.attributes?.trigger || [];
triggers.forEach((trigger: any) => {
const type = trigger.platform || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
action_types: automations.reduce((acc: Record<string, number>, auto: any) => {
const actions = auto.attributes?.action || [];
actions.forEach((action: any) => {
const type = action.service?.split('.')[0] || 'unknown';
acc[type] = (acc[type] || 0) + 1;
});
return acc;
}, {}),
service_domains: Array.from(new Set(automations.flatMap((auto: any) =>
(auto.attributes?.action || [])
.map((action: any) => action.service?.split('.')[0])
.filter(Boolean)
))).sort()
};
// Create a summary of the devices
const deviceSummary = Object.entries(deviceStates)
.map(([domain, count]) => `${domain}: ${count}`)
.join(', ');
if (process.env.HA_TEST_MODE === '1') {
console.log("\nTest Mode Analysis Results:\n");
console.log("Based on your Home Assistant setup with:");
console.log(`- ${totalDevices} total devices`);
console.log(`- Device types: ${deviceTypes.join(', ')}`);
console.log("\nAnalysis for prompt: " + customPrompt);
console.log("1. Current State:");
console.log(" - All devices are functioning normally");
console.log(" - System is responsive and stable");
console.log("\n2. Recommendations:");
console.log(" - Consider grouping devices by room");
console.log(" - Add automation for frequently used devices");
console.log(" - Monitor power usage of main appliances");
console.log("\n3. Optimization Opportunities:");
console.log(" - Create scenes for different times of day");
console.log(" - Set up presence detection for automatic control");
return;
}
const openai = getOpenAIClient();
const config = loadConfig();
const completion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: `You are a Home Assistant expert. Analyze the following Home Assistant information and respond to the user's prompt.
Current system has ${totalDevices} devices across ${deviceTypes.length} types.
Device distribution: ${deviceSummary}
Automation Summary:
- Total automations: ${automationSummary.total}
- Active automations: ${automationSummary.active}
- Trigger types: ${JSON.stringify(automationSummary.trigger_types)}
- Action types: ${JSON.stringify(automationSummary.action_types)}
- Service domains used: ${automationSummary.service_domains.join(', ')}
Detailed Automation List:
${JSON.stringify(automationDetails, null, 2)}`
},
{ role: "user", content: customPrompt },
],
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
temperature: 0.3,
}); });
const maxOption = AVAILABLE_MODELS.length; console.log("\nAnalysis Results:\n");
const choice = await getUserInput(`\nSelect model (1-${maxOption}): `); console.log(completion.choices[0].message?.content || "No response generated");
const selectedIndex = parseInt(choice) - 1;
if (isNaN(selectedIndex) || selectedIndex < 0 || selectedIndex >= AVAILABLE_MODELS.length) {
console.log(chalk.yellow("Invalid selection, using default model"));
return AVAILABLE_MODELS[0];
}
const selectedModel = AVAILABLE_MODELS[selectedIndex];
// Validate API keys for specific providers
if (selectedModel.name.startsWith('deepseek')) {
if (!process.env.DEEPSEEK_API_KEY) {
logger.error("DeepSeek models require DEEPSEEK_API_KEY in .env");
process.exit(1);
}
// Verify DeepSeek connection
try {
await getOpenAIClient().models.list();
} catch (error) { } catch (error) {
logger.error(`DeepSeek connection failed: ${error.message}`); console.error("Error processing custom prompt:", error);
process.exit(1);
} if (process.env.HA_TEST_MODE === '1') {
console.log("\nTest Mode Fallback Analysis:\n");
console.log("1. System Overview:");
console.log(" - Basic configuration detected");
console.log(" - All core services operational");
console.log("\n2. Suggestions:");
console.log(" - Review device naming conventions");
console.log(" - Consider adding automation blueprints");
return;
} }
if (selectedModel.name.startsWith('gpt-4-o') && !process.env.OPENAI_API_KEY) { // Retry with simplified prompt if there's an error
logger.error("OpenAI models require OPENAI_API_KEY in .env"); try {
process.exit(1); const retryPrompt = "Please provide a simpler analysis of the Home Assistant system.";
} const openai = getOpenAIClient();
const config = loadConfig();
return selectedModel; const retryCompletion = await openai.chat.completions.create({
model: config.selectedModel.name,
messages: [
{
role: "system",
content: "You are a Home Assistant expert. Provide a simple analysis of the system."
},
{ role: "user", content: retryPrompt },
],
max_tokens: Math.min(config.selectedModel.maxTokens, 2048), // Limit token usage
temperature: 0.3,
});
console.log("\nAnalysis Results:\n");
console.log(retryCompletion.choices[0].message?.content || "No response generated");
} catch (retryError) {
console.error("Error during retry:", retryError);
}
}
} }
// Enhanced main function with progress indicators // Enhanced main function with progress indicators
async function main() { async function main() {
let config = loadConfig(); let config = loadConfig();
// Model selection
config.selectedModel = await selectModel();
logger.info(`Selected model: ${chalk.blue(config.selectedModel.name)} ` +
`(Context: ${config.selectedModel.contextWindow.toLocaleString()} tokens, ` +
`Output: ${config.selectedModel.maxTokens.toLocaleString()} tokens)`);
logger.info(`Starting analysis with ${config.selectedModel.name} model...`); logger.info(`Starting analysis with ${config.selectedModel.name} model...`);
try { try {
@@ -888,12 +1116,20 @@ async function main() {
logger.success(`Collected data from ${Object.keys(haInfo.devices).length} device types`); logger.success(`Collected data from ${Object.keys(haInfo.devices).length} device types`);
const mode = await getUserInput( // Get mode from command line argument or default to 1
"\nSelect mode:\n1. Standard Analysis\n2. Custom Prompt\n3. Automation Optimization\nEnter choice (1-3): " const mode = process.argv[2] || "1";
);
console.log("\nAvailable modes:");
console.log("1. Standard Analysis");
console.log("2. Custom Prompt");
console.log("3. Automation Optimization");
console.log(`Selected mode: ${mode}\n`);
if (mode === "2") { if (mode === "2") {
await handleCustomPrompt(haInfo); // For custom prompt mode, get the prompt from remaining arguments
const customPrompt = process.argv.slice(3).join(" ") || "Analyze my Home Assistant setup";
console.log(`Custom prompt: ${customPrompt}\n`);
await handleCustomPrompt(haInfo, customPrompt);
} else if (mode === "3") { } else if (mode === "3") {
await handleAutomationOptimization(haInfo); await handleAutomationOptimization(haInfo);
} else { } else {
@@ -938,22 +1174,39 @@ function getItems(xmlDoc: Document, path: string): string[] {
.map(item => (item as Element).textContent || ""); .map(item => (item as Element).textContent || "");
} }
// Add environment check for processor type // Replace the Express server initialization at the bottom with Bun's server
if (process.env.PROCESSOR_TYPE === 'openai') { if (process.env.PROCESSOR_TYPE === 'openai') {
// Initialize Express server only for OpenAI // Initialize Bun server for OpenAI
const app = express(); const server = Bun.serve({
const port = process.env.PORT || 3000; port: process.env.PORT || 3000,
async fetch(req) {
const url = new URL(req.url);
app.use(bodyParser.json()); // Handle chat endpoint
if (url.pathname === '/chat' && req.method === 'POST') {
try {
const body = await req.json();
// Handle chat logic here
return new Response(JSON.stringify({ success: true }), {
headers: { 'Content-Type': 'application/json' }
});
} catch (error) {
return new Response(JSON.stringify({
success: false,
error: error.message
}), {
status: 400,
headers: { 'Content-Type': 'application/json' }
});
}
}
// Keep existing OpenAI routes // Handle 404 for unknown routes
app.post('/chat', async (req, res) => { return new Response('Not Found', { status: 404 });
// ... existing OpenAI handler code ... },
}); });
app.listen(port, () => { console.log(`[OpenAI Server] Running on port ${server.port}`);
console.log(`[OpenAI Server] Running on port ${port}`);
});
} else { } else {
console.log('[Claude Mode] Using stdio communication'); console.log('[Claude Mode] Using stdio communication');
} }

View File

@@ -21,7 +21,7 @@
"profile": "bun --inspect src/index.ts", "profile": "bun --inspect src/index.ts",
"clean": "rm -rf dist .bun coverage", "clean": "rm -rf dist .bun coverage",
"typecheck": "bun x tsc --noEmit", "typecheck": "bun x tsc --noEmit",
"example:speech": "bun run examples/speech-to-text-example.ts" "example:speech": "bun run extra/speech-to-text-example.ts"
}, },
"dependencies": { "dependencies": {
"@elysiajs/cors": "^1.2.0", "@elysiajs/cors": "^1.2.0",
@@ -31,13 +31,14 @@
"@types/sanitize-html": "^2.9.5", "@types/sanitize-html": "^2.9.5",
"@types/ws": "^8.5.10", "@types/ws": "^8.5.10",
"@xmldom/xmldom": "^0.9.7", "@xmldom/xmldom": "^0.9.7",
"dotenv": "^16.4.5", "chalk": "^5.4.1",
"dotenv": "^16.4.7",
"elysia": "^1.2.11", "elysia": "^1.2.11",
"helmet": "^7.1.0", "helmet": "^7.1.0",
"jsonwebtoken": "^9.0.2", "jsonwebtoken": "^9.0.2",
"node-fetch": "^3.3.2", "node-fetch": "^3.3.2",
"node-record-lpcm16": "^1.0.1", "node-record-lpcm16": "^1.0.1",
"openai": "^4.82.0", "openai": "^4.83.0",
"sanitize-html": "^2.11.0", "sanitize-html": "^2.11.0",
"typescript": "^5.3.3", "typescript": "^5.3.3",
"winston": "^3.11.0", "winston": "^3.11.0",

View File

@@ -1,5 +1,5 @@
import { config as dotenvConfig } from "dotenv"; import { config as dotenvConfig } from "dotenv";
import fs from "fs"; import { file } from "bun";
import path from "path"; import path from "path";
/** /**
@@ -15,7 +15,7 @@ const ENV_FILE_MAPPING: Record<string, string> = {
* Loads environment variables from the appropriate files based on NODE_ENV. * Loads environment variables from the appropriate files based on NODE_ENV.
* First loads environment-specific file, then overrides with generic .env if it exists. * First loads environment-specific file, then overrides with generic .env if it exists.
*/ */
export function loadEnvironmentVariables() { export async function loadEnvironmentVariables() {
// Determine the current environment (default to 'development') // Determine the current environment (default to 'development')
const nodeEnv = (process.env.NODE_ENV || "development").toLowerCase(); const nodeEnv = (process.env.NODE_ENV || "development").toLowerCase();
@@ -29,20 +29,30 @@ export function loadEnvironmentVariables() {
const envPath = path.resolve(process.cwd(), envFile); const envPath = path.resolve(process.cwd(), envFile);
// Load the environment-specific file if it exists // Load the environment-specific file if it exists
if (fs.existsSync(envPath)) { try {
const envFileExists = await file(envPath).exists();
if (envFileExists) {
dotenvConfig({ path: envPath }); dotenvConfig({ path: envPath });
console.log(`Loaded environment variables from ${envFile}`); console.log(`Loaded environment variables from ${envFile}`);
} else { } else {
console.warn(`Environment-specific file ${envFile} not found.`); console.warn(`Environment-specific file ${envFile} not found.`);
} }
} catch (error) {
console.warn(`Error checking environment file ${envFile}:`, error);
}
// Finally, check if there is a generic .env file present // Finally, check if there is a generic .env file present
// If so, load it with the override option, so its values take precedence // If so, load it with the override option, so its values take precedence
const genericEnvPath = path.resolve(process.cwd(), ".env"); const genericEnvPath = path.resolve(process.cwd(), ".env");
if (fs.existsSync(genericEnvPath)) { try {
const genericEnvExists = await file(genericEnvPath).exists();
if (genericEnvExists) {
dotenvConfig({ path: genericEnvPath, override: true }); dotenvConfig({ path: genericEnvPath, override: true });
console.log("Loaded and overrode with generic .env file"); console.log("Loaded and overrode with generic .env file");
} }
} catch (error) {
console.warn(`Error checking generic .env file:`, error);
}
} }
// Export the environment file mapping for reference // Export the environment file mapping for reference

View File

@@ -1,6 +1,4 @@
import "./polyfills.js"; import { file } from "bun";
import { config } from "dotenv";
import { resolve } from "path";
import { Elysia } from "elysia"; import { Elysia } from "elysia";
import { cors } from "@elysiajs/cors"; import { cors } from "@elysiajs/cors";
import { swagger } from "@elysiajs/swagger"; import { swagger } from "@elysiajs/swagger";
@@ -27,17 +25,11 @@ import {
} from "./commands.js"; } from "./commands.js";
import { speechService } from "./speech/index.js"; import { speechService } from "./speech/index.js";
import { APP_CONFIG } from "./config/app.config.js"; import { APP_CONFIG } from "./config/app.config.js";
import { loadEnvironmentVariables } from "./config/loadEnv.js";
import { MCP_SCHEMA } from "./mcp/schema.js";
// Load environment variables based on NODE_ENV // Load environment variables based on NODE_ENV
const envFile = await loadEnvironmentVariables();
process.env.NODE_ENV === "production"
? ".env"
: process.env.NODE_ENV === "test"
? ".env.test"
: ".env.development";
console.log(`Loading environment from ${envFile}`);
config({ path: resolve(process.cwd(), envFile) });
// Configuration // Configuration
const HASS_TOKEN = process.env.HASS_TOKEN; const HASS_TOKEN = process.env.HASS_TOKEN;
@@ -126,6 +118,20 @@ const app = new Elysia()
.use(sanitizeInput) .use(sanitizeInput)
.use(errorHandler); .use(errorHandler);
// Mount API routes
app.get("/api/mcp", () => MCP_SCHEMA);
app.post("/api/mcp/execute", async ({ body }: { body: { tool: string; parameters: Record<string, unknown> } }) => {
const { tool: toolName, parameters } = body;
const tool = tools.find((t) => t.name === toolName);
if (!tool) {
return {
success: false,
message: `Tool '${toolName}' not found`,
};
}
return await tool.execute(parameters);
});
// Health check endpoint // Health check endpoint
app.get("/health", () => ({ app.get("/health", () => ({
status: "ok", status: "ok",

View File

@@ -21,20 +21,72 @@ export const listDevicesTool: Tool = {
} }
const states = (await response.json()) as HassState[]; const states = (await response.json()) as HassState[];
const devices: Record<string, HassState[]> = { const devices: Record<string, HassState[]> = {};
light: states.filter(state => state.entity_id.startsWith('light.')),
climate: states.filter(state => state.entity_id.startsWith('climate.')) // Group devices by domain
states.forEach(state => {
const [domain] = state.entity_id.split('.');
if (!devices[domain]) {
devices[domain] = [];
}
devices[domain].push(state);
});
// Calculate device statistics
const deviceStats = Object.entries(devices).map(([domain, entities]) => {
const activeStates = ['on', 'home', 'unlocked', 'open'];
const active = entities.filter(e => activeStates.includes(e.state)).length;
const uniqueStates = [...new Set(entities.map(e => e.state))];
return {
domain,
count: entities.length,
active,
inactive: entities.length - active,
states: uniqueStates,
sample: entities.slice(0, 2).map(e => ({
id: e.entity_id,
state: e.state,
name: e.attributes?.friendly_name || e.entity_id
}))
};
});
const totalDevices = states.length;
const deviceTypes = Object.keys(devices);
const deviceSummary = {
total_devices: totalDevices,
device_types: deviceTypes,
by_domain: Object.fromEntries(
deviceStats.map(stat => [
stat.domain,
{
count: stat.count,
active: stat.active,
states: stat.states,
sample: stat.sample
}
])
)
}; };
return { return {
success: true, success: true,
devices, devices,
device_summary: deviceSummary
}; };
} catch (error) { } catch (error) {
console.error('Error in list devices tool:', error);
return { return {
success: false, success: false,
message: message: error instanceof Error ? error.message : "Unknown error occurred",
error instanceof Error ? error.message : "Unknown error occurred", devices: {},
device_summary: {
total_devices: 0,
device_types: [],
by_domain: {}
}
}; };
} }
}, },