Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d1e9647d9 | ||
|
|
77189b6114 | ||
|
|
c32d315910 | ||
|
|
606ffd8275 | ||
|
|
601dba3fc4 | ||
|
|
00ab476a77 | ||
|
|
906079cbbb | ||
|
|
808d9c981c | ||
|
|
2b79c99dd7 | ||
|
|
77905ed3cd |
43
README.md
43
README.md
@@ -49,10 +49,10 @@ cd LocalAGI
|
|||||||
docker compose up
|
docker compose up
|
||||||
|
|
||||||
# NVIDIA GPU setup
|
# NVIDIA GPU setup
|
||||||
docker compose --profile nvidia up
|
docker compose -f docker-compose.nvidia.yaml up
|
||||||
|
|
||||||
# Intel GPU setup (for Intel Arc and integrated GPUs)
|
# Intel GPU setup (for Intel Arc and integrated GPUs)
|
||||||
docker compose --profile intel up
|
docker compose -f docker-compose.intel.yaml up
|
||||||
|
|
||||||
# Start with a specific model (see available models in models.localai.io, or localai.io to use any model in huggingface)
|
# Start with a specific model (see available models in models.localai.io, or localai.io to use any model in huggingface)
|
||||||
MODEL_NAME=gemma-3-12b-it docker compose up
|
MODEL_NAME=gemma-3-12b-it docker compose up
|
||||||
@@ -61,11 +61,40 @@ MODEL_NAME=gemma-3-12b-it docker compose up
|
|||||||
MODEL_NAME=gemma-3-12b-it \
|
MODEL_NAME=gemma-3-12b-it \
|
||||||
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
||||||
IMAGE_MODEL=flux.1-dev \
|
IMAGE_MODEL=flux.1-dev \
|
||||||
docker compose --profile nvidia up
|
docker compose -f docker-compose.nvidia.yaml up
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you can access and manage your agents at [http://localhost:8080](http://localhost:8080)
|
Now you can access and manage your agents at [http://localhost:8080](http://localhost:8080)
|
||||||
|
|
||||||
|
## 📚🆕 Local Stack Family
|
||||||
|
|
||||||
|
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="50%" valign="top">
|
||||||
|
<a href="https://github.com/mudler/LocalAI">
|
||||||
|
<img src="https://raw.githubusercontent.com/mudler/LocalAI/refs/heads/rebranding/core/http/static/logo_horizontal.png" width="300" alt="LocalAI Logo">
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td width="50%" valign="top">
|
||||||
|
<h3><a href="https://github.com/mudler/LocalRecall">LocalAI</a></h3>
|
||||||
|
<p>LocalAI is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI API specifications for local AI inferencing. Does not require GPU.</p>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td width="50%" valign="top">
|
||||||
|
<a href="https://github.com/mudler/LocalRecall">
|
||||||
|
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td width="50%" valign="top">
|
||||||
|
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
|
||||||
|
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
## 🖥️ Hardware Configurations
|
## 🖥️ Hardware Configurations
|
||||||
|
|
||||||
LocalAGI supports multiple hardware configurations through Docker Compose profiles:
|
LocalAGI supports multiple hardware configurations through Docker Compose profiles:
|
||||||
@@ -81,7 +110,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
|
|||||||
- Uses CUDA for acceleration
|
- Uses CUDA for acceleration
|
||||||
- Best for high-performance inference
|
- Best for high-performance inference
|
||||||
- Supports text, multimodal, and image generation models
|
- Supports text, multimodal, and image generation models
|
||||||
- Run with: `docker compose --profile nvidia up`
|
- Run with: `docker compose -f docker-compose.nvidia.yaml up`
|
||||||
- Default models:
|
- Default models:
|
||||||
- Text: `arcee-agent`
|
- Text: `arcee-agent`
|
||||||
- Multimodal: `minicpm-v-2_6`
|
- Multimodal: `minicpm-v-2_6`
|
||||||
@@ -97,7 +126,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
|
|||||||
- Uses SYCL for acceleration
|
- Uses SYCL for acceleration
|
||||||
- Best for Intel-based systems
|
- Best for Intel-based systems
|
||||||
- Supports text, multimodal, and image generation models
|
- Supports text, multimodal, and image generation models
|
||||||
- Run with: `docker compose --profile intel up`
|
- Run with: `docker compose -f docker-compose.intel.yaml up`
|
||||||
- Default models:
|
- Default models:
|
||||||
- Text: `arcee-agent`
|
- Text: `arcee-agent`
|
||||||
- Multimodal: `minicpm-v-2_6`
|
- Multimodal: `minicpm-v-2_6`
|
||||||
@@ -120,13 +149,13 @@ MODEL_NAME=gemma-3-12b-it docker compose up
|
|||||||
MODEL_NAME=gemma-3-12b-it \
|
MODEL_NAME=gemma-3-12b-it \
|
||||||
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
||||||
IMAGE_MODEL=flux.1-dev \
|
IMAGE_MODEL=flux.1-dev \
|
||||||
docker compose --profile nvidia up
|
docker compose -f docker-compose.nvidia.yaml up
|
||||||
|
|
||||||
# Intel GPU with custom models
|
# Intel GPU with custom models
|
||||||
MODEL_NAME=gemma-3-12b-it \
|
MODEL_NAME=gemma-3-12b-it \
|
||||||
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
MULTIMODAL_MODEL=minicpm-v-2_6 \
|
||||||
IMAGE_MODEL=sd-1.5-ggml \
|
IMAGE_MODEL=sd-1.5-ggml \
|
||||||
docker compose --profile intel up
|
docker compose -f docker-compose.intel.yaml up
|
||||||
```
|
```
|
||||||
|
|
||||||
If no models are specified, it will use the defaults:
|
If no models are specified, it will use the defaults:
|
||||||
|
|||||||
@@ -238,7 +238,7 @@ var _ = Describe("Agent test", func() {
|
|||||||
defer agent.Stop()
|
defer agent.Stop()
|
||||||
|
|
||||||
result := agent.Ask(
|
result := agent.Ask(
|
||||||
types.WithText("plan a trip to San Francisco from Venice, Italy"),
|
types.WithText("Thoroughly plan a trip to San Francisco from Venice, Italy; check flight times, visa requirements and whether electrical items are allowed in cabin luggage."),
|
||||||
)
|
)
|
||||||
Expect(len(result.State)).To(BeNumerically(">", 1))
|
Expect(len(result.State)).To(BeNumerically(">", 1))
|
||||||
|
|
||||||
@@ -260,6 +260,7 @@ var _ = Describe("Agent test", func() {
|
|||||||
WithLLMAPIURL(apiURL),
|
WithLLMAPIURL(apiURL),
|
||||||
WithModel(testModel),
|
WithModel(testModel),
|
||||||
WithLLMAPIKey(apiKeyURL),
|
WithLLMAPIKey(apiKeyURL),
|
||||||
|
WithTimeout("10m"),
|
||||||
WithNewConversationSubscriber(func(m openai.ChatCompletionMessage) {
|
WithNewConversationSubscriber(func(m openai.ChatCompletionMessage) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
message = m
|
message = m
|
||||||
@@ -274,7 +275,7 @@ var _ = Describe("Agent test", func() {
|
|||||||
EnableStandaloneJob,
|
EnableStandaloneJob,
|
||||||
EnableHUD,
|
EnableHUD,
|
||||||
WithPeriodicRuns("1s"),
|
WithPeriodicRuns("1s"),
|
||||||
WithPermanentGoal("use the new_conversation tool"),
|
WithPermanentGoal("use the new_conversation tool to initiate a conversation with the user"),
|
||||||
// EnableStandaloneJob,
|
// EnableStandaloneJob,
|
||||||
// WithRandomIdentity(),
|
// WithRandomIdentity(),
|
||||||
)
|
)
|
||||||
|
|||||||
33
docker-compose.intel.yaml
Normal file
33
docker-compose.intel.yaml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
services:
|
||||||
|
localai:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localai
|
||||||
|
environment:
|
||||||
|
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
||||||
|
- DEBUG=true
|
||||||
|
image: localai/localai:master-sycl-f32-ffmpeg-core
|
||||||
|
devices:
|
||||||
|
# On a system with integrated GPU and an Arc 770, this is the Arc 770
|
||||||
|
- /dev/dri/card1
|
||||||
|
- /dev/dri/renderD129
|
||||||
|
command:
|
||||||
|
- ${MODEL_NAME:-arcee-agent}
|
||||||
|
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||||
|
- ${IMAGE_MODEL:-sd-1.5-ggml}
|
||||||
|
- granite-embedding-107m-multilingual
|
||||||
|
|
||||||
|
localrecall:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localrecall
|
||||||
|
|
||||||
|
localrecall-healthcheck:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localrecall-healthcheck
|
||||||
|
|
||||||
|
localagi:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localagi
|
||||||
31
docker-compose.nvidia.yaml
Normal file
31
docker-compose.nvidia.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
services:
|
||||||
|
localai:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localai
|
||||||
|
environment:
|
||||||
|
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
||||||
|
- DEBUG=true
|
||||||
|
image: localai/localai:master-sycl-f32-ffmpeg-core
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
|
||||||
|
localrecall:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localrecall
|
||||||
|
|
||||||
|
localrecall-healthcheck:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localrecall-healthcheck
|
||||||
|
|
||||||
|
localagi:
|
||||||
|
extends:
|
||||||
|
file: docker-compose.yaml
|
||||||
|
service: localagi
|
||||||
@@ -7,8 +7,9 @@ services:
|
|||||||
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
|
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
|
||||||
image: localai/localai:master-ffmpeg-core
|
image: localai/localai:master-ffmpeg-core
|
||||||
command:
|
command:
|
||||||
# - gemma-3-12b-it
|
|
||||||
- ${MODEL_NAME:-arcee-agent}
|
- ${MODEL_NAME:-arcee-agent}
|
||||||
|
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||||
|
- ${IMAGE_MODEL:-flux.1-dev}
|
||||||
- granite-embedding-107m-multilingual
|
- granite-embedding-107m-multilingual
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||||
@@ -24,44 +25,6 @@ services:
|
|||||||
- ./volumes/models:/build/models:cached
|
- ./volumes/models:/build/models:cached
|
||||||
- ./volumes/images:/tmp/generated/images
|
- ./volumes/images:/tmp/generated/images
|
||||||
|
|
||||||
localai-nvidia:
|
|
||||||
profiles: ["nvidia"]
|
|
||||||
extends:
|
|
||||||
service: localai
|
|
||||||
environment:
|
|
||||||
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
|
||||||
- DEBUG=true
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
count: 1
|
|
||||||
capabilities: [gpu]
|
|
||||||
command:
|
|
||||||
- ${MODEL_NAME:-arcee-agent}
|
|
||||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
|
||||||
- ${IMAGE_MODEL:-flux.1-dev}
|
|
||||||
- granite-embedding-107m-multilingual
|
|
||||||
|
|
||||||
localai-intel:
|
|
||||||
profiles: ["intel"]
|
|
||||||
environment:
|
|
||||||
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
|
||||||
- DEBUG=true
|
|
||||||
extends:
|
|
||||||
service: localai
|
|
||||||
image: localai/localai:master-sycl-f32-ffmpeg-core
|
|
||||||
devices:
|
|
||||||
# On a system with integrated GPU and an Arc 770, this is the Arc 770
|
|
||||||
- /dev/dri/card1
|
|
||||||
- /dev/dri/renderD129
|
|
||||||
command:
|
|
||||||
- ${MODEL_NAME:-arcee-agent}
|
|
||||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
|
||||||
- ${IMAGE_MODEL:-sd-1.5-ggml}
|
|
||||||
- granite-embedding-107m-multilingual
|
|
||||||
|
|
||||||
localrecall:
|
localrecall:
|
||||||
image: quay.io/mudler/localrecall:main
|
image: quay.io/mudler/localrecall:main
|
||||||
ports:
|
ports:
|
||||||
@@ -97,6 +60,8 @@ services:
|
|||||||
#image: quay.io/mudler/localagi:master
|
#image: quay.io/mudler/localagi:master
|
||||||
environment:
|
environment:
|
||||||
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
|
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
|
||||||
|
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||||
|
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
|
||||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
- LOCALAGI_LLM_API_URL=http://localai:8080
|
||||||
#- LOCALAGI_LLM_API_KEY=sk-1234567890
|
#- LOCALAGI_LLM_API_KEY=sk-1234567890
|
||||||
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
|
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
|
||||||
@@ -106,32 +71,4 @@ services:
|
|||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
volumes:
|
volumes:
|
||||||
- ./volumes/localagi/:/pool
|
- ./volumes/localagi/:/pool
|
||||||
|
|
||||||
localagi-nvidia:
|
|
||||||
profiles: ["nvidia"]
|
|
||||||
extends:
|
|
||||||
service: localagi
|
|
||||||
environment:
|
|
||||||
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
|
|
||||||
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
|
||||||
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-flux.1-dev}
|
|
||||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
|
||||||
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
|
|
||||||
- LOCALAGI_STATE_DIR=/pool
|
|
||||||
- LOCALAGI_TIMEOUT=5m
|
|
||||||
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
|
|
||||||
|
|
||||||
localagi-intel:
|
|
||||||
profiles: ["intel"]
|
|
||||||
extends:
|
|
||||||
service: localagi
|
|
||||||
environment:
|
|
||||||
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
|
|
||||||
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
|
||||||
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
|
|
||||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
|
||||||
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
|
|
||||||
- LOCALAGI_STATE_DIR=/pool
|
|
||||||
- LOCALAGI_TIMEOUT=5m
|
|
||||||
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
|
|
||||||
@@ -128,11 +128,13 @@ func (g *GithubPRReviewer) Run(ctx context.Context, params types.ActionParams) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
actionResult := fmt.Sprintf(
|
actionResult := fmt.Sprintf(
|
||||||
"Pull request https://github.com/%s/%s/pull/%d reviewed successfully with status: %s",
|
"Pull request https://github.com/%s/%s/pull/%d reviewed successfully with status: %s, comments: %v, message: %s",
|
||||||
result.Owner,
|
result.Owner,
|
||||||
result.Repository,
|
result.Repository,
|
||||||
result.PRNumber,
|
result.PRNumber,
|
||||||
strings.ToLower(result.ReviewAction),
|
strings.ToLower(result.ReviewAction),
|
||||||
|
result.Comments,
|
||||||
|
result.ReviewComment,
|
||||||
)
|
)
|
||||||
|
|
||||||
return types.ActionResult{Result: actionResult}, nil
|
return types.ActionResult{Result: actionResult}, nil
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
import { useState, useEffect } from 'react';
|
import { useState, useEffect } from 'react';
|
||||||
import { useParams, Link, useNavigate } from 'react-router-dom';
|
import { useParams, Link } from 'react-router-dom';
|
||||||
|
|
||||||
function AgentStatus() {
|
function AgentStatus() {
|
||||||
const { name } = useParams();
|
const { name } = useParams();
|
||||||
const navigate = useNavigate();
|
|
||||||
const [statusData, setStatusData] = useState(null);
|
const [statusData, setStatusData] = useState(null);
|
||||||
const [loading, setLoading] = useState(true);
|
const [loading, setLoading] = useState(true);
|
||||||
const [error, setError] = useState(null);
|
const [error, setError] = useState(null);
|
||||||
const [eventSource, setEventSource] = useState(null);
|
const [_eventSource, setEventSource] = useState(null);
|
||||||
const [liveUpdates, setLiveUpdates] = useState([]);
|
const [liveUpdates, setLiveUpdates] = useState([]);
|
||||||
|
|
||||||
// Update document title
|
// Update document title
|
||||||
@@ -49,7 +48,7 @@ function AgentStatus() {
|
|||||||
const data = JSON.parse(event.data);
|
const data = JSON.parse(event.data);
|
||||||
setLiveUpdates(prev => [data, ...prev.slice(0, 19)]); // Keep last 20 updates
|
setLiveUpdates(prev => [data, ...prev.slice(0, 19)]); // Keep last 20 updates
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Error parsing SSE data:', err);
|
setLiveUpdates(prev => [event.data, ...prev.slice(0, 19)]);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -129,23 +128,9 @@ function AgentStatus() {
|
|||||||
<h2 className="text-sm font-semibold mb-2">Agent Action:</h2>
|
<h2 className="text-sm font-semibold mb-2">Agent Action:</h2>
|
||||||
<div className="status-details">
|
<div className="status-details">
|
||||||
<div className="status-row">
|
<div className="status-row">
|
||||||
<span className="status-label">Result:</span>
|
<span className="status-label">{index}</span>
|
||||||
<span className="status-value">{formatValue(item.Result)}</span>
|
<span className="status-value">{formatValue(item)}</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="status-row">
|
|
||||||
<span className="status-label">Action:</span>
|
|
||||||
<span className="status-value">{formatValue(item.Action)}</span>
|
|
||||||
</div>
|
|
||||||
<div className="status-row">
|
|
||||||
<span className="status-label">Parameters:</span>
|
|
||||||
<span className="status-value pre-wrap">{formatValue(item.Params)}</span>
|
|
||||||
</div>
|
|
||||||
{item.Reasoning && (
|
|
||||||
<div className="status-row">
|
|
||||||
<span className="status-label">Reasoning:</span>
|
|
||||||
<span className="status-value reasoning">{formatValue(item.Reasoning)}</span>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ export default defineConfig(({ mode }) => {
|
|||||||
'/status': backendUrl,
|
'/status': backendUrl,
|
||||||
'/action': backendUrl,
|
'/action': backendUrl,
|
||||||
'/actions': backendUrl,
|
'/actions': backendUrl,
|
||||||
|
'/avatars': backendUrl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"embed"
|
"embed"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -238,9 +239,20 @@ func (app *App) registerRoutes(pool *state.AgentPool, webapp *fiber.App) {
|
|||||||
history = &state.Status{ActionResults: []types.ActionState{}}
|
history = &state.Status{ActionResults: []types.ActionState{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entries := []string{}
|
||||||
|
for _, h := range Reverse(history.Results()) {
|
||||||
|
entries = append(entries, fmt.Sprintf(
|
||||||
|
"Result: %v Action: %v Params: %v Reasoning: %v",
|
||||||
|
h.Result,
|
||||||
|
h.Action.Definition().Name,
|
||||||
|
h.Params,
|
||||||
|
h.Reasoning,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
return c.JSON(fiber.Map{
|
return c.JSON(fiber.Map{
|
||||||
"Name": c.Params("name"),
|
"Name": c.Params("name"),
|
||||||
"History": Reverse(history.Results()),
|
"History": entries,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user