chore: cleanup, identify goal from conversation when evaluting achievement (#29)

* chore: cleanup, identify goal from conversation when evaluting achievement

Signed-off-by: mudler <mudler@localai.io>

* change base cpu model

Signed-off-by: mudler <mudler@localai.io>

* this is not necessary anymore

Signed-off-by: mudler <mudler@localai.io>

* use 12b

Signed-off-by: mudler <mudler@localai.io>

* use openthinker, it's smaller

* chore(tests): set timeout

Signed-off-by: mudler <mudler@localai.io>

* Enable reasoning in some of the tests

Signed-off-by: mudler <mudler@localai.io>

* docker compose unification, small changes

Signed-off-by: mudler <mudler@localai.io>

* Simplify

Signed-off-by: mudler <mudler@localai.io>

* Back at arcee-agent as default

Signed-off-by: mudler <mudler@localai.io>

* Better error handling during planning

Signed-off-by: mudler <mudler@localai.io>

* Ci: do not run jobs for every branch

Signed-off-by: mudler <mudler@localai.io>

---------

Signed-off-by: mudler <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto
2025-04-12 21:01:01 +02:00
committed by GitHub
parent 209a9989c4
commit 60c249f19a
12 changed files with 267 additions and 311 deletions

View File

@@ -249,7 +249,7 @@ func (a *Agent) runAction(ctx context.Context, chosenAction types.Action, params
}
}
xlog.Info("Running action", "action", chosenAction.Definition().Name, "agent", a.Character.Name)
xlog.Info("[runAction] Running action", "action", chosenAction.Definition().Name, "agent", a.Character.Name, "params", params.String())
if chosenAction.Definition().Name.Is(action.StateActionName) {
// We need to store the result in the state
@@ -270,6 +270,8 @@ func (a *Agent) runAction(ctx context.Context, chosenAction types.Action, params
}
}
xlog.Debug("[runAction] Action result", "action", chosenAction.Definition().Name, "params", params.String(), "result", result.Result)
return result, nil
}
@@ -603,7 +605,13 @@ func (a *Agent) consumeJob(job *types.Job, role string) {
var err error
conv, err = a.handlePlanning(job.GetContext(), job, chosenAction, actionParams, reasoning, pickTemplate, conv)
if err != nil {
job.Result.Finish(fmt.Errorf("error running action: %w", err))
xlog.Error("error handling planning", "error", err)
//job.Result.Conversation = conv
//job.Result.SetResponse(msg.Content)
a.reply(job, role, append(conv, openai.ChatCompletionMessage{
Role: "assistant",
Content: fmt.Sprintf("Error handling planning: %v", err),
}), actionParams, chosenAction, reasoning)
return
}
@@ -689,26 +697,6 @@ func (a *Agent) consumeJob(job *types.Job, role string) {
job.SetNextAction(&followingAction, &followingParams, reasoning)
a.consumeJob(job, role)
return
} else if followingAction == nil {
xlog.Info("Not following another action", "agent", a.Character.Name)
if !a.options.forceReasoning {
xlog.Info("Finish conversation with reasoning", "reasoning", reasoning, "agent", a.Character.Name)
msg := openai.ChatCompletionMessage{
Role: "assistant",
Content: reasoning,
}
conv = append(conv, msg)
job.Result.SetResponse(msg.Content)
job.Result.Conversation = conv
job.Result.AddFinalizer(func(conv []openai.ChatCompletionMessage) {
a.saveCurrentConversation(conv)
})
job.Result.Finish(nil)
return
}
}
a.reply(job, role, conv, actionParams, chosenAction, reasoning)