Compare commits

...

10 Commits

Author SHA1 Message Date
mudler
7d1e9647d9 chore: return more results to the LLM of the action that was done
Signed-off-by: mudler <mudler@localai.io>
2025-04-14 19:55:00 +02:00
Richard Palethorpe
77189b6114 fix(test): Encourage LLM to plan multiple searches (#36)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-04-14 17:35:19 +02:00
Richard Palethorpe
c32d315910 fix(ui): Proxy avatars endpoint in dev mode (#32)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-04-14 16:09:57 +02:00
Richard Palethorpe
606ffd8275 fix(ui): Don't try to pass unserializable Go objects to status UI (#28)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-04-14 16:09:42 +02:00
mudler
601dba3fc4 chore(tests): try to be more expressive
Signed-off-by: mudler <mudler@localai.io>
2025-04-14 16:08:54 +02:00
mudler
00ab476a77 chore(tests): try to be more extensive in timeouts
Signed-off-by: mudler <mudler@localai.io>
2025-04-14 12:37:31 +02:00
Ettore Di Giacinto
906079cbbb Update README.md 2025-04-14 10:50:19 +02:00
Ettore Di Giacinto
808d9c981c Update docker-compose.intel.yaml 2025-04-13 22:33:03 +02:00
Ettore Di Giacinto
2b79c99dd7 chore(README): reorganize docker compose files
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-13 22:31:33 +02:00
Ettore Di Giacinto
77905ed3cd Update README.md 2025-04-13 22:10:17 +02:00
9 changed files with 130 additions and 99 deletions

View File

@@ -49,10 +49,10 @@ cd LocalAGI
docker compose up
# NVIDIA GPU setup
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
# Intel GPU setup (for Intel Arc and integrated GPUs)
docker compose --profile intel up
docker compose -f docker-compose.intel.yaml up
# Start with a specific model (see available models in models.localai.io, or localai.io to use any model in huggingface)
MODEL_NAME=gemma-3-12b-it docker compose up
@@ -61,11 +61,40 @@ MODEL_NAME=gemma-3-12b-it docker compose up
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=flux.1-dev \
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
```
Now you can access and manage your agents at [http://localhost:8080](http://localhost:8080)
## 📚🆕 Local Stack Family
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
<table>
<tr>
<td width="50%" valign="top">
<a href="https://github.com/mudler/LocalAI">
<img src="https://raw.githubusercontent.com/mudler/LocalAI/refs/heads/rebranding/core/http/static/logo_horizontal.png" width="300" alt="LocalAI Logo">
</a>
</td>
<td width="50%" valign="top">
<h3><a href="https://github.com/mudler/LocalRecall">LocalAI</a></h3>
<p>LocalAI is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI API specifications for local AI inferencing. Does not require GPU.</p>
</td>
</tr>
<tr>
<td width="50%" valign="top">
<a href="https://github.com/mudler/LocalRecall">
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
</a>
</td>
<td width="50%" valign="top">
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
</td>
</tr>
</table>
## 🖥️ Hardware Configurations
LocalAGI supports multiple hardware configurations through Docker Compose profiles:
@@ -81,7 +110,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
- Uses CUDA for acceleration
- Best for high-performance inference
- Supports text, multimodal, and image generation models
- Run with: `docker compose --profile nvidia up`
- Run with: `docker compose -f docker-compose.nvidia.yaml up`
- Default models:
- Text: `arcee-agent`
- Multimodal: `minicpm-v-2_6`
@@ -97,7 +126,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
- Uses SYCL for acceleration
- Best for Intel-based systems
- Supports text, multimodal, and image generation models
- Run with: `docker compose --profile intel up`
- Run with: `docker compose -f docker-compose.intel.yaml up`
- Default models:
- Text: `arcee-agent`
- Multimodal: `minicpm-v-2_6`
@@ -120,13 +149,13 @@ MODEL_NAME=gemma-3-12b-it docker compose up
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=flux.1-dev \
docker compose --profile nvidia up
docker compose -f docker-compose.nvidia.yaml up
# Intel GPU with custom models
MODEL_NAME=gemma-3-12b-it \
MULTIMODAL_MODEL=minicpm-v-2_6 \
IMAGE_MODEL=sd-1.5-ggml \
docker compose --profile intel up
docker compose -f docker-compose.intel.yaml up
```
If no models are specified, it will use the defaults:

View File

@@ -238,7 +238,7 @@ var _ = Describe("Agent test", func() {
defer agent.Stop()
result := agent.Ask(
types.WithText("plan a trip to San Francisco from Venice, Italy"),
types.WithText("Thoroughly plan a trip to San Francisco from Venice, Italy; check flight times, visa requirements and whether electrical items are allowed in cabin luggage."),
)
Expect(len(result.State)).To(BeNumerically(">", 1))
@@ -260,6 +260,7 @@ var _ = Describe("Agent test", func() {
WithLLMAPIURL(apiURL),
WithModel(testModel),
WithLLMAPIKey(apiKeyURL),
WithTimeout("10m"),
WithNewConversationSubscriber(func(m openai.ChatCompletionMessage) {
mu.Lock()
message = m
@@ -274,7 +275,7 @@ var _ = Describe("Agent test", func() {
EnableStandaloneJob,
EnableHUD,
WithPeriodicRuns("1s"),
WithPermanentGoal("use the new_conversation tool"),
WithPermanentGoal("use the new_conversation tool to initiate a conversation with the user"),
// EnableStandaloneJob,
// WithRandomIdentity(),
)

33
docker-compose.intel.yaml Normal file
View File

@@ -0,0 +1,33 @@
services:
localai:
extends:
file: docker-compose.yaml
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
image: localai/localai:master-sycl-f32-ffmpeg-core
devices:
# On a system with integrated GPU and an Arc 770, this is the Arc 770
- /dev/dri/card1
- /dev/dri/renderD129
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-sd-1.5-ggml}
- granite-embedding-107m-multilingual
localrecall:
extends:
file: docker-compose.yaml
service: localrecall
localrecall-healthcheck:
extends:
file: docker-compose.yaml
service: localrecall-healthcheck
localagi:
extends:
file: docker-compose.yaml
service: localagi

View File

@@ -0,0 +1,31 @@
services:
localai:
extends:
file: docker-compose.yaml
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
image: localai/localai:master-sycl-f32-ffmpeg-core
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
localrecall:
extends:
file: docker-compose.yaml
service: localrecall
localrecall-healthcheck:
extends:
file: docker-compose.yaml
service: localrecall-healthcheck
localagi:
extends:
file: docker-compose.yaml
service: localagi

View File

@@ -7,8 +7,9 @@ services:
# Image list (dockerhub): https://hub.docker.com/r/localai/localai
image: localai/localai:master-ffmpeg-core
command:
# - gemma-3-12b-it
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-flux.1-dev}
- granite-embedding-107m-multilingual
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
@@ -24,44 +25,6 @@ services:
- ./volumes/models:/build/models:cached
- ./volumes/images:/tmp/generated/images
localai-nvidia:
profiles: ["nvidia"]
extends:
service: localai
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-flux.1-dev}
- granite-embedding-107m-multilingual
localai-intel:
profiles: ["intel"]
environment:
- LOCALAI_SINGLE_ACTIVE_BACKEND=true
- DEBUG=true
extends:
service: localai
image: localai/localai:master-sycl-f32-ffmpeg-core
devices:
# On a system with integrated GPU and an Arc 770, this is the Arc 770
- /dev/dri/card1
- /dev/dri/renderD129
command:
- ${MODEL_NAME:-arcee-agent}
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
- ${IMAGE_MODEL:-sd-1.5-ggml}
- granite-embedding-107m-multilingual
localrecall:
image: quay.io/mudler/localrecall:main
ports:
@@ -97,6 +60,8 @@ services:
#image: quay.io/mudler/localagi:master
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080
#- LOCALAGI_LLM_API_KEY=sk-1234567890
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
@@ -106,32 +71,4 @@ services:
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./volumes/localagi/:/pool
localagi-nvidia:
profiles: ["nvidia"]
extends:
service: localagi
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-flux.1-dev}
- LOCALAGI_LLM_API_URL=http://localai:8080
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
- LOCALAGI_STATE_DIR=/pool
- LOCALAGI_TIMEOUT=5m
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
localagi-intel:
profiles: ["intel"]
extends:
service: localagi
environment:
- LOCALAGI_MODEL=${MODEL_NAME:-arcee-agent}
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
- LOCALAGI_LLM_API_URL=http://localai:8080
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
- LOCALAGI_STATE_DIR=/pool
- LOCALAGI_TIMEOUT=5m
- LOCALAGI_ENABLE_CONVERSATIONS_LOGGING=false
- ./volumes/localagi/:/pool

View File

@@ -128,11 +128,13 @@ func (g *GithubPRReviewer) Run(ctx context.Context, params types.ActionParams) (
}
actionResult := fmt.Sprintf(
"Pull request https://github.com/%s/%s/pull/%d reviewed successfully with status: %s",
"Pull request https://github.com/%s/%s/pull/%d reviewed successfully with status: %s, comments: %v, message: %s",
result.Owner,
result.Repository,
result.PRNumber,
strings.ToLower(result.ReviewAction),
result.Comments,
result.ReviewComment,
)
return types.ActionResult{Result: actionResult}, nil

View File

@@ -1,13 +1,12 @@
import { useState, useEffect } from 'react';
import { useParams, Link, useNavigate } from 'react-router-dom';
import { useParams, Link } from 'react-router-dom';
function AgentStatus() {
const { name } = useParams();
const navigate = useNavigate();
const [statusData, setStatusData] = useState(null);
const [loading, setLoading] = useState(true);
const [error, setError] = useState(null);
const [eventSource, setEventSource] = useState(null);
const [_eventSource, setEventSource] = useState(null);
const [liveUpdates, setLiveUpdates] = useState([]);
// Update document title
@@ -49,7 +48,7 @@ function AgentStatus() {
const data = JSON.parse(event.data);
setLiveUpdates(prev => [data, ...prev.slice(0, 19)]); // Keep last 20 updates
} catch (err) {
console.error('Error parsing SSE data:', err);
setLiveUpdates(prev => [event.data, ...prev.slice(0, 19)]);
}
});
@@ -129,23 +128,9 @@ function AgentStatus() {
<h2 className="text-sm font-semibold mb-2">Agent Action:</h2>
<div className="status-details">
<div className="status-row">
<span className="status-label">Result:</span>
<span className="status-value">{formatValue(item.Result)}</span>
<span className="status-label">{index}</span>
<span className="status-value">{formatValue(item)}</span>
</div>
<div className="status-row">
<span className="status-label">Action:</span>
<span className="status-value">{formatValue(item.Action)}</span>
</div>
<div className="status-row">
<span className="status-label">Parameters:</span>
<span className="status-value pre-wrap">{formatValue(item.Params)}</span>
</div>
{item.Reasoning && (
<div className="status-row">
<span className="status-label">Reasoning:</span>
<span className="status-value reasoning">{formatValue(item.Reasoning)}</span>
</div>
)}
</div>
</div>
</div>

View File

@@ -30,6 +30,7 @@ export default defineConfig(({ mode }) => {
'/status': backendUrl,
'/action': backendUrl,
'/actions': backendUrl,
'/avatars': backendUrl
}
}
}

View File

@@ -4,6 +4,7 @@ import (
"crypto/subtle"
"embed"
"errors"
"fmt"
"math/rand"
"net/http"
"path/filepath"
@@ -238,9 +239,20 @@ func (app *App) registerRoutes(pool *state.AgentPool, webapp *fiber.App) {
history = &state.Status{ActionResults: []types.ActionState{}}
}
entries := []string{}
for _, h := range Reverse(history.Results()) {
entries = append(entries, fmt.Sprintf(
"Result: %v Action: %v Params: %v Reasoning: %v",
h.Result,
h.Action.Definition().Name,
h.Params,
h.Reasoning,
))
}
return c.JSON(fiber.Map{
"Name": c.Params("name"),
"History": Reverse(history.Results()),
"History": entries,
})
})