Add AI NLP and router modules for advanced natural language processing
- Introduced comprehensive NLP processing modules for intent classification, entity extraction, and context analysis - Created AI router with rate-limited endpoints for command interpretation and execution - Added prompt templates for different AI models with configurable system and user prompts - Implemented robust type definitions for AI-related interfaces and schemas - Enhanced security and error handling in AI processing pipeline
This commit is contained in:
207
src/ai/endpoints/ai-router.ts
Normal file
207
src/ai/endpoints/ai-router.ts
Normal file
@@ -0,0 +1,207 @@
|
||||
import express from 'express';
|
||||
import { z } from 'zod';
|
||||
import { NLPProcessor } from '../nlp/processor.js';
|
||||
import { AIRateLimit, AIContext, AIResponse, AIError, AIModel } from '../types/index.js';
|
||||
import rateLimit from 'express-rate-limit';
|
||||
|
||||
const router = express.Router();
|
||||
const nlpProcessor = new NLPProcessor();
|
||||
|
||||
// Rate limiting configuration
|
||||
const rateLimitConfig: AIRateLimit = {
|
||||
requests_per_minute: 100,
|
||||
requests_per_hour: 1000,
|
||||
concurrent_requests: 10,
|
||||
model_specific_limits: {
|
||||
claude: {
|
||||
requests_per_minute: 100,
|
||||
requests_per_hour: 1000
|
||||
},
|
||||
gpt4: {
|
||||
requests_per_minute: 50,
|
||||
requests_per_hour: 500
|
||||
},
|
||||
custom: {
|
||||
requests_per_minute: 200,
|
||||
requests_per_hour: 2000
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Request validation schemas
|
||||
const interpretRequestSchema = z.object({
|
||||
input: z.string(),
|
||||
context: z.object({
|
||||
user_id: z.string(),
|
||||
session_id: z.string(),
|
||||
timestamp: z.string(),
|
||||
location: z.string(),
|
||||
previous_actions: z.array(z.any()),
|
||||
environment_state: z.record(z.any())
|
||||
}),
|
||||
model: z.enum(['claude', 'gpt4', 'custom']).optional()
|
||||
});
|
||||
|
||||
// Rate limiters
|
||||
const globalLimiter = rateLimit({
|
||||
windowMs: 60 * 1000, // 1 minute
|
||||
max: rateLimitConfig.requests_per_minute
|
||||
});
|
||||
|
||||
const modelSpecificLimiter = (model: string) => rateLimit({
|
||||
windowMs: 60 * 1000,
|
||||
max: rateLimitConfig.model_specific_limits[model as AIModel]?.requests_per_minute ||
|
||||
rateLimitConfig.requests_per_minute
|
||||
});
|
||||
|
||||
// Error handler middleware
|
||||
const errorHandler = (
|
||||
error: Error,
|
||||
req: express.Request,
|
||||
res: express.Response,
|
||||
next: express.NextFunction
|
||||
) => {
|
||||
const aiError: AIError = {
|
||||
code: 'PROCESSING_ERROR',
|
||||
message: error.message,
|
||||
suggestion: 'Please try again with a different command format',
|
||||
recovery_options: [
|
||||
'Simplify your command',
|
||||
'Use standard command patterns',
|
||||
'Check device names and parameters'
|
||||
],
|
||||
context: req.body.context
|
||||
};
|
||||
|
||||
res.status(500).json({ error: aiError });
|
||||
};
|
||||
|
||||
// Endpoints
|
||||
router.post(
|
||||
'/interpret',
|
||||
globalLimiter,
|
||||
async (req: express.Request, res: express.Response, next: express.NextFunction) => {
|
||||
try {
|
||||
const { input, context, model = 'claude' } = interpretRequestSchema.parse(req.body);
|
||||
|
||||
// Apply model-specific rate limiting
|
||||
modelSpecificLimiter(model)(req, res, async () => {
|
||||
const { intent, confidence, error } = await nlpProcessor.processCommand(input, context);
|
||||
|
||||
if (error) {
|
||||
return res.status(400).json({ error });
|
||||
}
|
||||
|
||||
const isValid = await nlpProcessor.validateIntent(intent, confidence);
|
||||
|
||||
if (!isValid) {
|
||||
const suggestions = await nlpProcessor.suggestCorrections(input, {
|
||||
code: 'INVALID_INTENT',
|
||||
message: 'Could not understand the command with high confidence',
|
||||
suggestion: 'Please try rephrasing your command',
|
||||
recovery_options: [],
|
||||
context
|
||||
});
|
||||
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
code: 'INVALID_INTENT',
|
||||
message: 'Could not understand the command with high confidence',
|
||||
suggestion: 'Please try rephrasing your command',
|
||||
recovery_options: suggestions,
|
||||
context
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const response: AIResponse = {
|
||||
natural_language: `I'll ${intent.action} the ${intent.target.split('.').pop()}`,
|
||||
structured_data: {
|
||||
success: true,
|
||||
action_taken: intent.action,
|
||||
entities_affected: [intent.target],
|
||||
state_changes: intent.parameters
|
||||
},
|
||||
next_suggestions: [
|
||||
'Would you like to adjust any settings?',
|
||||
'Should I perform this action in other rooms?',
|
||||
'Would you like to schedule this action?'
|
||||
],
|
||||
confidence,
|
||||
context
|
||||
};
|
||||
|
||||
res.json(response);
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
router.post(
|
||||
'/execute',
|
||||
globalLimiter,
|
||||
async (req: express.Request, res: express.Response, next: express.NextFunction) => {
|
||||
try {
|
||||
const { intent, context, model = 'claude' } = req.body;
|
||||
|
||||
// Apply model-specific rate limiting
|
||||
modelSpecificLimiter(model)(req, res, async () => {
|
||||
// Execute the intent through Home Assistant
|
||||
// This would integrate with your existing Home Assistant service
|
||||
|
||||
const response: AIResponse = {
|
||||
natural_language: `Successfully executed ${intent.action} on ${intent.target}`,
|
||||
structured_data: {
|
||||
success: true,
|
||||
action_taken: intent.action,
|
||||
entities_affected: [intent.target],
|
||||
state_changes: intent.parameters
|
||||
},
|
||||
next_suggestions: [
|
||||
'Would you like to verify the state?',
|
||||
'Should I perform any related actions?',
|
||||
'Would you like to undo this action?'
|
||||
],
|
||||
confidence: { overall: 1, intent: 1, entities: 1, context: 1 },
|
||||
context
|
||||
};
|
||||
|
||||
res.json(response);
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
router.get(
|
||||
'/suggestions',
|
||||
globalLimiter,
|
||||
async (req: express.Request, res: express.Response, next: express.NextFunction) => {
|
||||
try {
|
||||
const { context, model = 'claude' } = req.body;
|
||||
|
||||
// Apply model-specific rate limiting
|
||||
modelSpecificLimiter(model)(req, res, async () => {
|
||||
// Generate context-aware suggestions
|
||||
const suggestions = [
|
||||
'Turn on the lights in the living room',
|
||||
'Set the temperature to 72 degrees',
|
||||
'Show me the current state of all devices',
|
||||
'Start the evening routine'
|
||||
];
|
||||
|
||||
res.json({ suggestions });
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Apply error handler
|
||||
router.use(errorHandler);
|
||||
|
||||
export default router;
|
||||
135
src/ai/nlp/context-analyzer.ts
Normal file
135
src/ai/nlp/context-analyzer.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { AIContext, AIIntent } from '../types/index.js';
|
||||
|
||||
interface ContextAnalysis {
|
||||
confidence: number;
|
||||
relevant_params: Record<string, any>;
|
||||
}
|
||||
|
||||
interface ContextRule {
|
||||
condition: (context: AIContext, intent: AIIntent) => boolean;
|
||||
relevance: number;
|
||||
params?: (context: AIContext) => Record<string, any>;
|
||||
}
|
||||
|
||||
export class ContextAnalyzer {
|
||||
private contextRules: ContextRule[];
|
||||
|
||||
constructor() {
|
||||
this.contextRules = [
|
||||
// Location-based context
|
||||
{
|
||||
condition: (context, intent) =>
|
||||
Boolean(context.location && intent.target.includes(context.location.toLowerCase())),
|
||||
relevance: 0.8,
|
||||
params: (context) => ({ location: context.location })
|
||||
},
|
||||
|
||||
// Time-based context
|
||||
{
|
||||
condition: (context) => {
|
||||
const hour = new Date(context.timestamp).getHours();
|
||||
return hour >= 0 && hour <= 23;
|
||||
},
|
||||
relevance: 0.6,
|
||||
params: (context) => ({
|
||||
time_of_day: this.getTimeOfDay(new Date(context.timestamp))
|
||||
})
|
||||
},
|
||||
|
||||
// Previous action context
|
||||
{
|
||||
condition: (context, intent) => {
|
||||
const recentActions = context.previous_actions.slice(-3);
|
||||
return recentActions.some(action =>
|
||||
action.target === intent.target ||
|
||||
action.action === intent.action
|
||||
);
|
||||
},
|
||||
relevance: 0.7,
|
||||
params: (context) => ({
|
||||
recent_action: context.previous_actions[context.previous_actions.length - 1]
|
||||
})
|
||||
},
|
||||
|
||||
// Environment state context
|
||||
{
|
||||
condition: (context, intent) => {
|
||||
return Object.keys(context.environment_state).some(key =>
|
||||
intent.target.includes(key) ||
|
||||
intent.parameters[key] !== undefined
|
||||
);
|
||||
},
|
||||
relevance: 0.9,
|
||||
params: (context) => ({ environment: context.environment_state })
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
async analyze(intent: AIIntent, context: AIContext): Promise<ContextAnalysis> {
|
||||
let totalConfidence = 0;
|
||||
let relevantParams: Record<string, any> = {};
|
||||
let applicableRules = 0;
|
||||
|
||||
for (const rule of this.contextRules) {
|
||||
if (rule.condition(context, intent)) {
|
||||
totalConfidence += rule.relevance;
|
||||
applicableRules++;
|
||||
|
||||
if (rule.params) {
|
||||
relevantParams = {
|
||||
...relevantParams,
|
||||
...rule.params(context)
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate normalized confidence
|
||||
const confidence = applicableRules > 0
|
||||
? totalConfidence / applicableRules
|
||||
: 0.5; // Default confidence if no rules apply
|
||||
|
||||
return {
|
||||
confidence,
|
||||
relevant_params: relevantParams
|
||||
};
|
||||
}
|
||||
|
||||
private getTimeOfDay(date: Date): string {
|
||||
const hour = date.getHours();
|
||||
|
||||
if (hour >= 5 && hour < 12) return 'morning';
|
||||
if (hour >= 12 && hour < 17) return 'afternoon';
|
||||
if (hour >= 17 && hour < 22) return 'evening';
|
||||
return 'night';
|
||||
}
|
||||
|
||||
async updateContextRules(newRules: ContextRule[]): Promise<void> {
|
||||
this.contextRules = [...this.contextRules, ...newRules];
|
||||
}
|
||||
|
||||
async validateContext(context: AIContext): Promise<boolean> {
|
||||
// Validate required context fields
|
||||
if (!context.timestamp || !context.user_id || !context.session_id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate timestamp format
|
||||
const timestamp = new Date(context.timestamp);
|
||||
if (isNaN(timestamp.getTime())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate previous actions array
|
||||
if (!Array.isArray(context.previous_actions)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate environment state
|
||||
if (typeof context.environment_state !== 'object' || context.environment_state === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
103
src/ai/nlp/entity-extractor.ts
Normal file
103
src/ai/nlp/entity-extractor.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
import { AIContext } from '../types/index.js';
|
||||
|
||||
interface ExtractedEntities {
|
||||
primary_target: string;
|
||||
parameters: Record<string, any>;
|
||||
confidence: number;
|
||||
}
|
||||
|
||||
export class EntityExtractor {
|
||||
private deviceNameMap: Map<string, string>;
|
||||
private parameterPatterns: Map<string, RegExp>;
|
||||
|
||||
constructor() {
|
||||
this.deviceNameMap = new Map();
|
||||
this.parameterPatterns = new Map();
|
||||
this.initializePatterns();
|
||||
}
|
||||
|
||||
private initializePatterns(): void {
|
||||
// Device name variations
|
||||
this.deviceNameMap.set('living room light', 'light.living_room');
|
||||
this.deviceNameMap.set('kitchen light', 'light.kitchen');
|
||||
this.deviceNameMap.set('bedroom light', 'light.bedroom');
|
||||
|
||||
// Parameter patterns
|
||||
this.parameterPatterns.set('brightness', /(\d+)\s*(%|percent)|bright(ness)?\s+(\d+)/i);
|
||||
this.parameterPatterns.set('temperature', /(\d+)\s*(degrees?|°)[CF]?/i);
|
||||
this.parameterPatterns.set('color', /(red|green|blue|white|warm|cool)/i);
|
||||
}
|
||||
|
||||
async extract(input: string): Promise<ExtractedEntities> {
|
||||
const entities: ExtractedEntities = {
|
||||
primary_target: '',
|
||||
parameters: {},
|
||||
confidence: 0
|
||||
};
|
||||
|
||||
try {
|
||||
// Find device name
|
||||
for (const [key, value] of this.deviceNameMap) {
|
||||
if (input.toLowerCase().includes(key)) {
|
||||
entities.primary_target = value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
for (const [param, pattern] of this.parameterPatterns) {
|
||||
const match = input.match(pattern);
|
||||
if (match) {
|
||||
entities.parameters[param] = this.normalizeParameterValue(param, match[1]);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate confidence based on matches
|
||||
entities.confidence = this.calculateConfidence(entities, input);
|
||||
|
||||
return entities;
|
||||
} catch (error) {
|
||||
console.error('Entity extraction error:', error);
|
||||
return {
|
||||
primary_target: '',
|
||||
parameters: {},
|
||||
confidence: 0
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private normalizeParameterValue(parameter: string, value: string): number | string {
|
||||
switch (parameter) {
|
||||
case 'brightness':
|
||||
return Math.min(100, Math.max(0, parseInt(value)));
|
||||
case 'temperature':
|
||||
return parseInt(value);
|
||||
case 'color':
|
||||
return value.toLowerCase();
|
||||
default:
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
private calculateConfidence(entities: ExtractedEntities, input: string): number {
|
||||
let confidence = 0;
|
||||
|
||||
// Device confidence
|
||||
if (entities.primary_target) {
|
||||
confidence += 0.5;
|
||||
}
|
||||
|
||||
// Parameter confidence
|
||||
const paramCount = Object.keys(entities.parameters).length;
|
||||
confidence += paramCount * 0.25;
|
||||
|
||||
// Normalize confidence to 0-1 range
|
||||
return Math.min(1, confidence);
|
||||
}
|
||||
|
||||
async updateDeviceMap(devices: Record<string, string>): Promise<void> {
|
||||
for (const [key, value] of Object.entries(devices)) {
|
||||
this.deviceNameMap.set(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
177
src/ai/nlp/intent-classifier.ts
Normal file
177
src/ai/nlp/intent-classifier.ts
Normal file
@@ -0,0 +1,177 @@
|
||||
interface ClassifiedIntent {
|
||||
action: string;
|
||||
target: string;
|
||||
confidence: number;
|
||||
parameters: Record<string, any>;
|
||||
raw_input: string;
|
||||
}
|
||||
|
||||
interface ActionPattern {
|
||||
action: string;
|
||||
patterns: RegExp[];
|
||||
parameters?: string[];
|
||||
}
|
||||
|
||||
export class IntentClassifier {
|
||||
private actionPatterns: ActionPattern[];
|
||||
|
||||
constructor() {
|
||||
this.actionPatterns = [
|
||||
{
|
||||
action: 'turn_on',
|
||||
patterns: [
|
||||
/turn\s+on/i,
|
||||
/switch\s+on/i,
|
||||
/enable/i,
|
||||
/activate/i
|
||||
]
|
||||
},
|
||||
{
|
||||
action: 'turn_off',
|
||||
patterns: [
|
||||
/turn\s+off/i,
|
||||
/switch\s+off/i,
|
||||
/disable/i,
|
||||
/deactivate/i
|
||||
]
|
||||
},
|
||||
{
|
||||
action: 'set',
|
||||
patterns: [
|
||||
/set\s+(?:the\s+)?(.+)\s+to/i,
|
||||
/change\s+(?:the\s+)?(.+)\s+to/i,
|
||||
/adjust\s+(?:the\s+)?(.+)\s+to/i
|
||||
],
|
||||
parameters: ['brightness', 'temperature', 'color']
|
||||
},
|
||||
{
|
||||
action: 'query',
|
||||
patterns: [
|
||||
/what\s+is/i,
|
||||
/get\s+(?:the\s+)?(.+)/i,
|
||||
/show\s+(?:the\s+)?(.+)/i,
|
||||
/tell\s+me/i
|
||||
]
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
async classify(
|
||||
input: string,
|
||||
extractedEntities: { parameters: Record<string, any>; primary_target: string }
|
||||
): Promise<ClassifiedIntent> {
|
||||
let bestMatch: ClassifiedIntent = {
|
||||
action: '',
|
||||
target: '',
|
||||
confidence: 0,
|
||||
parameters: {},
|
||||
raw_input: input
|
||||
};
|
||||
|
||||
for (const actionPattern of this.actionPatterns) {
|
||||
for (const pattern of actionPattern.patterns) {
|
||||
const match = input.match(pattern);
|
||||
if (match) {
|
||||
const confidence = this.calculateConfidence(match[0], input);
|
||||
if (confidence > bestMatch.confidence) {
|
||||
bestMatch = {
|
||||
action: actionPattern.action,
|
||||
target: extractedEntities.primary_target,
|
||||
confidence,
|
||||
parameters: this.extractActionParameters(actionPattern, match, extractedEntities),
|
||||
raw_input: input
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no match found, try to infer from context
|
||||
if (!bestMatch.action) {
|
||||
bestMatch = this.inferFromContext(input, extractedEntities);
|
||||
}
|
||||
|
||||
return bestMatch;
|
||||
}
|
||||
|
||||
private calculateConfidence(match: string, input: string): number {
|
||||
// Base confidence from match length relative to input length
|
||||
const lengthRatio = match.length / input.length;
|
||||
let confidence = lengthRatio * 0.7;
|
||||
|
||||
// Boost confidence for exact matches
|
||||
if (match.toLowerCase() === input.toLowerCase()) {
|
||||
confidence += 0.3;
|
||||
}
|
||||
|
||||
// Additional confidence for specific keywords
|
||||
const keywords = ['please', 'can you', 'would you'];
|
||||
for (const keyword of keywords) {
|
||||
if (input.toLowerCase().includes(keyword)) {
|
||||
confidence += 0.1;
|
||||
}
|
||||
}
|
||||
|
||||
return Math.min(1, confidence);
|
||||
}
|
||||
|
||||
private extractActionParameters(
|
||||
actionPattern: ActionPattern,
|
||||
match: RegExpMatchArray,
|
||||
extractedEntities: { parameters: Record<string, any>; primary_target: string }
|
||||
): Record<string, any> {
|
||||
const parameters: Record<string, any> = {};
|
||||
|
||||
// Copy relevant extracted entities
|
||||
if (actionPattern.parameters) {
|
||||
for (const param of actionPattern.parameters) {
|
||||
if (extractedEntities.parameters[param] !== undefined) {
|
||||
parameters[param] = extractedEntities.parameters[param];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract additional parameters from match groups
|
||||
if (match.length > 1 && match[1]) {
|
||||
parameters.raw_parameter = match[1].trim();
|
||||
}
|
||||
|
||||
return parameters;
|
||||
}
|
||||
|
||||
private inferFromContext(
|
||||
input: string,
|
||||
extractedEntities: { parameters: Record<string, any>; primary_target: string }
|
||||
): ClassifiedIntent {
|
||||
// Default to 'set' action if parameters are present
|
||||
if (Object.keys(extractedEntities.parameters).length > 0) {
|
||||
return {
|
||||
action: 'set',
|
||||
target: extractedEntities.primary_target,
|
||||
confidence: 0.5,
|
||||
parameters: extractedEntities.parameters,
|
||||
raw_input: input
|
||||
};
|
||||
}
|
||||
|
||||
// Default to 'query' for question-like inputs
|
||||
if (input.match(/^(what|when|where|who|how|why)/i)) {
|
||||
return {
|
||||
action: 'query',
|
||||
target: extractedEntities.primary_target || 'system',
|
||||
confidence: 0.6,
|
||||
parameters: {},
|
||||
raw_input: input
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback with low confidence
|
||||
return {
|
||||
action: 'unknown',
|
||||
target: extractedEntities.primary_target || 'system',
|
||||
confidence: 0.3,
|
||||
parameters: {},
|
||||
raw_input: input
|
||||
};
|
||||
}
|
||||
}
|
||||
132
src/ai/nlp/processor.ts
Normal file
132
src/ai/nlp/processor.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
import { AIIntent, AIContext, AIConfidence, AIError } from '../types/index.js';
|
||||
import { EntityExtractor } from './entity-extractor.js';
|
||||
import { IntentClassifier } from './intent-classifier.js';
|
||||
import { ContextAnalyzer } from './context-analyzer.js';
|
||||
|
||||
export class NLPProcessor {
|
||||
private entityExtractor: EntityExtractor;
|
||||
private intentClassifier: IntentClassifier;
|
||||
private contextAnalyzer: ContextAnalyzer;
|
||||
|
||||
constructor() {
|
||||
this.entityExtractor = new EntityExtractor();
|
||||
this.intentClassifier = new IntentClassifier();
|
||||
this.contextAnalyzer = new ContextAnalyzer();
|
||||
}
|
||||
|
||||
async processCommand(
|
||||
input: string,
|
||||
context: AIContext
|
||||
): Promise<{
|
||||
intent: AIIntent;
|
||||
confidence: AIConfidence;
|
||||
error?: AIError;
|
||||
}> {
|
||||
try {
|
||||
// Extract entities from the input
|
||||
const entities = await this.entityExtractor.extract(input);
|
||||
|
||||
// Classify the intent
|
||||
const intent = await this.intentClassifier.classify(input, entities);
|
||||
|
||||
// Analyze context relevance
|
||||
const contextRelevance = await this.contextAnalyzer.analyze(intent, context);
|
||||
|
||||
// Calculate confidence scores
|
||||
const confidence: AIConfidence = {
|
||||
overall: (intent.confidence + entities.confidence + contextRelevance.confidence) / 3,
|
||||
intent: intent.confidence,
|
||||
entities: entities.confidence,
|
||||
context: contextRelevance.confidence
|
||||
};
|
||||
|
||||
// Create structured intent
|
||||
const structuredIntent: AIIntent = {
|
||||
action: intent.action,
|
||||
target: entities.primary_target,
|
||||
parameters: {
|
||||
...entities.parameters,
|
||||
...intent.parameters,
|
||||
context_parameters: contextRelevance.relevant_params
|
||||
},
|
||||
raw_input: input
|
||||
};
|
||||
|
||||
return {
|
||||
intent: structuredIntent,
|
||||
confidence
|
||||
};
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
|
||||
return {
|
||||
intent: {
|
||||
action: 'error',
|
||||
target: 'system',
|
||||
parameters: {},
|
||||
raw_input: input
|
||||
},
|
||||
confidence: {
|
||||
overall: 0,
|
||||
intent: 0,
|
||||
entities: 0,
|
||||
context: 0
|
||||
},
|
||||
error: {
|
||||
code: 'NLP_PROCESSING_ERROR',
|
||||
message: errorMessage,
|
||||
suggestion: 'Please try rephrasing your command',
|
||||
recovery_options: [
|
||||
'Use simpler language',
|
||||
'Break down the command into smaller parts',
|
||||
'Specify the target device explicitly'
|
||||
],
|
||||
context
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async validateIntent(
|
||||
intent: AIIntent,
|
||||
confidence: AIConfidence,
|
||||
threshold = 0.7
|
||||
): Promise<boolean> {
|
||||
return (
|
||||
confidence.overall >= threshold &&
|
||||
confidence.intent >= threshold &&
|
||||
confidence.entities >= threshold &&
|
||||
confidence.context >= threshold
|
||||
);
|
||||
}
|
||||
|
||||
async suggestCorrections(
|
||||
input: string,
|
||||
error: AIError
|
||||
): Promise<string[]> {
|
||||
// Implement correction suggestions based on the error
|
||||
const suggestions: string[] = [];
|
||||
|
||||
if (error.code === 'ENTITY_NOT_FOUND') {
|
||||
suggestions.push(
|
||||
'Try specifying the device name more clearly',
|
||||
'Use the exact device name from your Home Assistant setup'
|
||||
);
|
||||
}
|
||||
|
||||
if (error.code === 'AMBIGUOUS_INTENT') {
|
||||
suggestions.push(
|
||||
'Please specify what you want to do with the device',
|
||||
'Use action words like "turn on", "set", "adjust"'
|
||||
);
|
||||
}
|
||||
|
||||
if (error.code === 'CONTEXT_MISMATCH') {
|
||||
suggestions.push(
|
||||
'Specify the location if referring to a device',
|
||||
'Clarify which device you mean in the current context'
|
||||
);
|
||||
}
|
||||
|
||||
return suggestions;
|
||||
}
|
||||
}
|
||||
135
src/ai/templates/prompt-templates.ts
Normal file
135
src/ai/templates/prompt-templates.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { AIModel } from '../types/index.js';
|
||||
|
||||
interface PromptTemplate {
|
||||
system: string;
|
||||
user: string;
|
||||
examples: Array<{
|
||||
user: string;
|
||||
assistant: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface PromptVariables {
|
||||
device_name?: string;
|
||||
location?: string;
|
||||
action?: string;
|
||||
parameters?: Record<string, any>;
|
||||
context?: Record<string, any>;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
class PromptTemplates {
|
||||
private templates: Record<AIModel, PromptTemplate>;
|
||||
|
||||
constructor() {
|
||||
this.templates = {
|
||||
[AIModel.CLAUDE]: {
|
||||
system: `You are Claude, an AI assistant specialized in home automation control through natural language.
|
||||
Your role is to interpret user commands and translate them into specific device control actions.
|
||||
Always maintain context awareness and consider user preferences and patterns.
|
||||
Provide clear, concise responses and suggest relevant follow-up actions.`,
|
||||
user: `Control the {device_name} in the {location} by {action} with parameters: {parameters}.
|
||||
Current context: {context}`,
|
||||
examples: [
|
||||
{
|
||||
user: "Turn on the living room lights",
|
||||
assistant: "I'll turn on the lights in the living room. Would you like me to set a specific brightness level?"
|
||||
},
|
||||
{
|
||||
user: "Set the temperature to 72 degrees",
|
||||
assistant: "I'll set the temperature to 72°F. I'll monitor the temperature and let you know when it reaches the target."
|
||||
}
|
||||
]
|
||||
},
|
||||
[AIModel.GPT4]: {
|
||||
system: `You are a home automation assistant powered by GPT-4.
|
||||
Focus on precise command interpretation and execution.
|
||||
Maintain high accuracy in device control and parameter settings.
|
||||
Provide feedback on action success and system state changes.`,
|
||||
user: `Command: {action} {device_name} in {location}
|
||||
Parameters: {parameters}
|
||||
Context: {context}`,
|
||||
examples: [
|
||||
{
|
||||
user: "Dim the bedroom lights to 50%",
|
||||
assistant: "Setting bedroom light brightness to 50%. The change has been applied successfully."
|
||||
},
|
||||
{
|
||||
user: "Start the evening routine",
|
||||
assistant: "Initiating evening routine: dimming lights, adjusting temperature, and enabling security system."
|
||||
}
|
||||
]
|
||||
},
|
||||
[AIModel.CUSTOM]: {
|
||||
system: `Custom home automation assistant configuration.
|
||||
Adapt to user preferences and patterns.
|
||||
Learn from interactions and optimize responses.
|
||||
Provide detailed feedback and suggestions.`,
|
||||
user: `Action requested: {action}
|
||||
Target device: {device_name}
|
||||
Location: {location}
|
||||
Parameters: {parameters}
|
||||
Current context: {context}`,
|
||||
examples: [
|
||||
{
|
||||
user: "Make it cooler in here",
|
||||
assistant: "Based on your preferences, I'll lower the temperature by 2 degrees. Current temperature is 74°F, adjusting to 72°F."
|
||||
},
|
||||
{
|
||||
user: "Set up movie mode",
|
||||
assistant: "Activating movie mode: dimming lights to 20%, closing blinds, setting TV input to HDMI 1, and adjusting sound system."
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
getTemplate(model: AIModel): PromptTemplate {
|
||||
return this.templates[model];
|
||||
}
|
||||
|
||||
formatPrompt(model: AIModel, variables: PromptVariables): string {
|
||||
const template = this.getTemplate(model);
|
||||
let prompt = template.user;
|
||||
|
||||
// Replace variables in the prompt
|
||||
for (const [key, value] of Object.entries(variables)) {
|
||||
const placeholder = `{${key}}`;
|
||||
if (typeof value === 'object') {
|
||||
prompt = prompt.replace(placeholder, JSON.stringify(value));
|
||||
} else {
|
||||
prompt = prompt.replace(placeholder, String(value));
|
||||
}
|
||||
}
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
getSystemPrompt(model: AIModel): string {
|
||||
return this.templates[model].system;
|
||||
}
|
||||
|
||||
getExamples(model: AIModel): Array<{ user: string; assistant: string }> {
|
||||
return this.templates[model].examples;
|
||||
}
|
||||
|
||||
addExample(
|
||||
model: AIModel,
|
||||
example: { user: string; assistant: string }
|
||||
): void {
|
||||
this.templates[model].examples.push(example);
|
||||
}
|
||||
|
||||
updateSystemPrompt(model: AIModel, newPrompt: string): void {
|
||||
this.templates[model].system = newPrompt;
|
||||
}
|
||||
|
||||
createCustomTemplate(
|
||||
model: AIModel.CUSTOM,
|
||||
template: PromptTemplate
|
||||
): void {
|
||||
this.templates[model] = template;
|
||||
}
|
||||
}
|
||||
|
||||
export default new PromptTemplates();
|
||||
123
src/ai/types/index.ts
Normal file
123
src/ai/types/index.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
// AI Model Types
|
||||
export enum AIModel {
|
||||
CLAUDE = 'claude',
|
||||
GPT4 = 'gpt4',
|
||||
CUSTOM = 'custom'
|
||||
}
|
||||
|
||||
// AI Confidence Level
|
||||
export interface AIConfidence {
|
||||
overall: number;
|
||||
intent: number;
|
||||
entities: number;
|
||||
context: number;
|
||||
}
|
||||
|
||||
// AI Intent
|
||||
export interface AIIntent {
|
||||
action: string;
|
||||
target: string;
|
||||
parameters: Record<string, any>;
|
||||
raw_input: string;
|
||||
}
|
||||
|
||||
// AI Context
|
||||
export interface AIContext {
|
||||
user_id: string;
|
||||
session_id: string;
|
||||
timestamp: string;
|
||||
location: string;
|
||||
previous_actions: AIIntent[];
|
||||
environment_state: Record<string, any>;
|
||||
}
|
||||
|
||||
// AI Response
|
||||
export interface AIResponse {
|
||||
natural_language: string;
|
||||
structured_data: {
|
||||
success: boolean;
|
||||
action_taken: string;
|
||||
entities_affected: string[];
|
||||
state_changes: Record<string, any>;
|
||||
};
|
||||
next_suggestions: string[];
|
||||
confidence: AIConfidence;
|
||||
context: AIContext;
|
||||
}
|
||||
|
||||
// AI Error
|
||||
export interface AIError {
|
||||
code: string;
|
||||
message: string;
|
||||
suggestion: string;
|
||||
recovery_options: string[];
|
||||
context: AIContext;
|
||||
}
|
||||
|
||||
// Rate Limiting
|
||||
export interface AIRateLimit {
|
||||
requests_per_minute: number;
|
||||
requests_per_hour: number;
|
||||
concurrent_requests: number;
|
||||
model_specific_limits: Record<AIModel, {
|
||||
requests_per_minute: number;
|
||||
requests_per_hour: number;
|
||||
}>;
|
||||
}
|
||||
|
||||
// Zod Schemas
|
||||
export const AIConfidenceSchema = z.object({
|
||||
overall: z.number().min(0).max(1),
|
||||
intent: z.number().min(0).max(1),
|
||||
entities: z.number().min(0).max(1),
|
||||
context: z.number().min(0).max(1)
|
||||
});
|
||||
|
||||
export const AIIntentSchema = z.object({
|
||||
action: z.string(),
|
||||
target: z.string(),
|
||||
parameters: z.record(z.any()),
|
||||
raw_input: z.string()
|
||||
});
|
||||
|
||||
export const AIContextSchema = z.object({
|
||||
user_id: z.string(),
|
||||
session_id: z.string(),
|
||||
timestamp: z.string(),
|
||||
location: z.string(),
|
||||
previous_actions: z.array(AIIntentSchema),
|
||||
environment_state: z.record(z.any())
|
||||
});
|
||||
|
||||
export const AIResponseSchema = z.object({
|
||||
natural_language: z.string(),
|
||||
structured_data: z.object({
|
||||
success: z.boolean(),
|
||||
action_taken: z.string(),
|
||||
entities_affected: z.array(z.string()),
|
||||
state_changes: z.record(z.any())
|
||||
}),
|
||||
next_suggestions: z.array(z.string()),
|
||||
confidence: AIConfidenceSchema,
|
||||
context: AIContextSchema
|
||||
});
|
||||
|
||||
export const AIErrorSchema = z.object({
|
||||
code: z.string(),
|
||||
message: z.string(),
|
||||
suggestion: z.string(),
|
||||
recovery_options: z.array(z.string()),
|
||||
context: AIContextSchema
|
||||
});
|
||||
|
||||
export const AIRateLimitSchema = z.object({
|
||||
requests_per_minute: z.number(),
|
||||
requests_per_hour: z.number(),
|
||||
concurrent_requests: z.number(),
|
||||
model_specific_limits: z.record(z.object({
|
||||
requests_per_minute: z.number(),
|
||||
requests_per_hour: z.number()
|
||||
}))
|
||||
});
|
||||
Reference in New Issue
Block a user