2
Makefile
2
Makefile
@@ -9,7 +9,7 @@ cleanup-tests:
|
||||
docker compose down
|
||||
|
||||
tests: prepare-tests
|
||||
LOCALAGI_MODEL="openthinker-7b" LOCALAI_API_URL="http://localhost:8081" LOCALAGI_API_URL="http://localhost:8080" $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --fail-fast -v -r ./...
|
||||
LOCALAGI_MODEL="gemma-3-4b-it" LOCALAI_API_URL="http://localhost:8081" LOCALAGI_API_URL="http://localhost:8080" $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --fail-fast -v -r ./...
|
||||
|
||||
run-nokb:
|
||||
$(MAKE) run KBDISABLEINDEX=true
|
||||
|
||||
@@ -83,7 +83,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
|
||||
- Supports text, multimodal, and image generation models
|
||||
- Run with: `docker compose --profile nvidia up`
|
||||
- Default models:
|
||||
- Text: `openthinker-7b`
|
||||
- Text: `gemma-3-4b-it`
|
||||
- Multimodal: `minicpm-v-2_6`
|
||||
- Image: `flux.1-dev`
|
||||
- Environment variables:
|
||||
@@ -99,7 +99,7 @@ LocalAGI supports multiple hardware configurations through Docker Compose profil
|
||||
- Supports text, multimodal, and image generation models
|
||||
- Run with: `docker compose --profile intel up`
|
||||
- Default models:
|
||||
- Text: `openthinker-7b`
|
||||
- Text: `gemma-3-4b-it`
|
||||
- Multimodal: `minicpm-v-2_6`
|
||||
- Image: `sd-1.5-ggml`
|
||||
- Environment variables:
|
||||
@@ -130,7 +130,7 @@ docker compose --profile intel up
|
||||
```
|
||||
|
||||
If no models are specified, it will use the defaults:
|
||||
- Text model: `openthinker-7b`
|
||||
- Text model: `gemma-3-4b-it`
|
||||
- Multimodal model: `minicpm-v-2_6`
|
||||
- Image model: `flux.1-dev` (NVIDIA) or `sd-1.5-ggml` (Intel)
|
||||
|
||||
|
||||
@@ -429,96 +429,28 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
}, c...)
|
||||
}
|
||||
|
||||
// thoughtPromptStringBuilder := strings.Builder{}
|
||||
// thoughtPromptStringBuilder.WriteString("You have to pick an action based on the conversation and the prompt. Describe the full reasoning process for your choice. Here is a list of actions: ")
|
||||
// for _, m := range a.availableActions() {
|
||||
// thoughtPromptStringBuilder.WriteString(
|
||||
// m.Definition().Name.String() + ": " + m.Definition().Description + "\n",
|
||||
// )
|
||||
// }
|
||||
|
||||
// thoughtPromptStringBuilder.WriteString("To not use any action, respond with 'none'")
|
||||
|
||||
//thoughtPromptStringBuilder.WriteString("\n\nConversation: " + Messages(c).RemoveIf(func(msg openai.ChatCompletionMessage) bool {
|
||||
// return msg.Role == "system"
|
||||
//}).String())
|
||||
|
||||
//thoughtPrompt := thoughtPromptStringBuilder.String()
|
||||
|
||||
//thoughtConv := []openai.ChatCompletionMessage{}
|
||||
thought, err := a.decision(ctx,
|
||||
c,
|
||||
types.Actions{action.NewReasoning()}.ToTools(),
|
||||
action.NewReasoning().Definition().Name.String(), maxRetries)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
originalReasoning := ""
|
||||
response := &action.ReasoningResponse{}
|
||||
if thought.actionParams != nil {
|
||||
if err := thought.actionParams.Unmarshal(response); err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
originalReasoning = response.Reasoning
|
||||
}
|
||||
if thought.message != "" {
|
||||
originalReasoning = thought.message
|
||||
}
|
||||
|
||||
xlog.Debug("[pickAction] picking action", "messages", c)
|
||||
thought, err := a.askLLM(ctx,
|
||||
c,
|
||||
maxRetries,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
originalReasoning := thought.Content
|
||||
xlog.Debug("[pickAction] original reasoning", "originalReasoning", originalReasoning)
|
||||
// From the thought, get the action call
|
||||
// Get all the available actions IDs
|
||||
|
||||
// by grammar, let's decide if we have achieved the goal
|
||||
// 1. analyze response and check if goal is achieved
|
||||
|
||||
// Extract the goal first
|
||||
params, err := a.decision(ctx,
|
||||
append(
|
||||
[]openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Your only task is to extract the goal from the following conversation",
|
||||
}}, messages...),
|
||||
types.Actions{action.NewGoal()}.ToTools(),
|
||||
action.NewGoal().Definition().Name.String(), maxRetries)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to get the action tool parameters: %v", err)
|
||||
}
|
||||
|
||||
goalResponse := action.GoalResponse{}
|
||||
err = params.actionParams.Unmarshal(&goalResponse)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
|
||||
if goalResponse.Goal == "" {
|
||||
xlog.Debug("[pickAction] no goal found")
|
||||
return nil, nil, "", nil
|
||||
}
|
||||
|
||||
// Check if the goal was achieved
|
||||
params, err = a.decision(ctx,
|
||||
[]openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: "system",
|
||||
Content: "You have to understand if the goal is achieved or not from the following reasoning. The goal: " + goalResponse.Goal,
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: originalReasoning,
|
||||
}},
|
||||
types.Actions{action.NewGoal()}.ToTools(),
|
||||
action.NewGoal().Definition().Name.String(), maxRetries)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf("failed to get the action tool parameters: %v", err)
|
||||
}
|
||||
|
||||
err = params.actionParams.Unmarshal(&goalResponse)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
|
||||
if goalResponse.Achieved {
|
||||
xlog.Debug("[pickAction] goal achieved", "goal", goalResponse.Goal)
|
||||
return nil, nil, "", nil
|
||||
}
|
||||
|
||||
// if the goal is not achieved, pick an action
|
||||
xlog.Debug("[pickAction] goal not achieved", "goal", goalResponse.Goal)
|
||||
|
||||
xlog.Debug("[pickAction] thought", "conv", c, "originalReasoning", originalReasoning)
|
||||
// thought, err := a.askLLM(ctx,
|
||||
// c,
|
||||
|
||||
actionsID := []string{"reply"}
|
||||
for _, m := range a.availableActions() {
|
||||
@@ -533,20 +465,11 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
// to avoid hallucinations
|
||||
|
||||
// Extract an action
|
||||
params, err = a.decision(ctx,
|
||||
[]openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: "system",
|
||||
Content: prompt,
|
||||
},
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Extract an action to perform from the following reasoning: ",
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Content: originalReasoning,
|
||||
}},
|
||||
params, err := a.decision(ctx,
|
||||
append(c, openai.ChatCompletionMessage{
|
||||
Role: "system",
|
||||
Content: "Pick the relevant action given the following reasoning: " + originalReasoning,
|
||||
}),
|
||||
types.Actions{intentionsTools}.ToTools(),
|
||||
intentionsTools.Definition().Name.String(), maxRetries)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,7 +8,7 @@ services:
|
||||
image: localai/localai:master-ffmpeg-core
|
||||
command:
|
||||
# - gemma-3-12b-it
|
||||
- ${MODEL_NAME:-openthinker-7b}
|
||||
- ${MODEL_NAME:-gemma-3-4b-it}
|
||||
- granite-embedding-107m-multilingual
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||
@@ -39,7 +39,7 @@ services:
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
command:
|
||||
- ${MODEL_NAME:-openthinker-7b}
|
||||
- ${MODEL_NAME:-gemma-3-4b-it}
|
||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- ${IMAGE_MODEL:-flux.1-dev}
|
||||
- granite-embedding-107m-multilingual
|
||||
@@ -57,7 +57,7 @@ services:
|
||||
- /dev/dri/card1
|
||||
- /dev/dri/renderD129
|
||||
command:
|
||||
- ${MODEL_NAME:-openthinker-7b}
|
||||
- ${MODEL_NAME:-gemma-3-4b-it}
|
||||
- ${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- ${IMAGE_MODEL:-sd-1.5-ggml}
|
||||
- granite-embedding-107m-multilingual
|
||||
@@ -96,7 +96,7 @@ services:
|
||||
- 8080:3000
|
||||
#image: quay.io/mudler/localagi:master
|
||||
environment:
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-openthinker-7b}
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-gemma-3-4b-it}
|
||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
||||
#- LOCALAGI_LLM_API_KEY=sk-1234567890
|
||||
- LOCALAGI_LOCALRAG_URL=http://localrecall:8080
|
||||
@@ -113,7 +113,7 @@ services:
|
||||
extends:
|
||||
service: localagi
|
||||
environment:
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-openthinker-7b}
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-gemma-3-4b-it}
|
||||
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-flux.1-dev}
|
||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
||||
@@ -127,7 +127,7 @@ services:
|
||||
extends:
|
||||
service: localagi
|
||||
environment:
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-openthinker-7b}
|
||||
- LOCALAGI_MODEL=${MODEL_NAME:-gemma-3-4b-it}
|
||||
- LOCALAGI_MULTIMODAL_MODEL=${MULTIMODAL_MODEL:-minicpm-v-2_6}
|
||||
- LOCALAGI_IMAGE_MODEL=${IMAGE_MODEL:-sd-1.5-ggml}
|
||||
- LOCALAGI_LLM_API_URL=http://localai:8080
|
||||
|
||||
Reference in New Issue
Block a user