finish handling support if no reasoning is used
This commit is contained in:
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/mudler/local-agent-framework/action"
|
||||
"github.com/mudler/local-agent-framework/xlog"
|
||||
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
@@ -204,13 +205,19 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
xlog.Debug(fmt.Sprintf("thought action Name: %v", thought.actioName))
|
||||
xlog.Debug(fmt.Sprintf("thought message: %v", thought.message))
|
||||
|
||||
// Find the action
|
||||
chosenAction := a.systemInternalActions().Find(thought.actioName)
|
||||
if chosenAction == nil {
|
||||
if chosenAction == nil || thought.actioName == "" {
|
||||
xlog.Debug(fmt.Sprintf("no answer"))
|
||||
|
||||
// LLM replied with an answer?
|
||||
//fmt.Errorf("no action found for intent:" + thought.actioName)
|
||||
return action.NewReply(), thought.message, nil
|
||||
return nil, thought.message, nil
|
||||
}
|
||||
xlog.Debug(fmt.Sprintf("chosenAction: %v", chosenAction.Definition().Name))
|
||||
return chosenAction, thought.message, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -362,16 +362,20 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
||||
var err error
|
||||
chosenAction, reasoning, err = a.pickAction(ctx, pickTemplate, a.currentConversation)
|
||||
if err != nil {
|
||||
xlog.Error("Error picking action", "error", err)
|
||||
job.Result.Finish(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//xlog.Debug("Picked action", "agent", a.Character.Name, "action", chosenAction.Definition().Name, "reasoning", reasoning)
|
||||
if chosenAction == nil {
|
||||
// If no action was picked up, the reasoning is the message returned by the assistant
|
||||
// so we can consume it as if it was a reply.
|
||||
//job.Result.SetResult(ActionState{ActionCurrentState{nil, nil, "No action to do, just reply"}, ""})
|
||||
//job.Result.Finish(fmt.Errorf("no action to do"))\
|
||||
xlog.Info("No action to do, just reply", "agent", a.Character.Name, "reasoning", reasoning)
|
||||
|
||||
a.currentConversation = append(a.currentConversation, openai.ChatCompletionMessage{
|
||||
Role: "assistant",
|
||||
Content: reasoning,
|
||||
@@ -497,6 +501,18 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
||||
job.Text = ""
|
||||
a.consumeJob(job, role)
|
||||
return
|
||||
} else if followingAction == nil {
|
||||
if !a.options.forceReasoning {
|
||||
msg := openai.ChatCompletionMessage{
|
||||
Role: "assistant",
|
||||
Content: reasoning,
|
||||
}
|
||||
|
||||
a.currentConversation = append(a.currentConversation, msg)
|
||||
job.Result.SetResponse(msg.Content)
|
||||
job.Result.Finish(nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -560,6 +576,19 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
||||
// ),
|
||||
// },
|
||||
// )
|
||||
|
||||
if !a.options.forceReasoning {
|
||||
msg := openai.ChatCompletionMessage{
|
||||
Role: "assistant",
|
||||
Content: replyResponse.Message,
|
||||
}
|
||||
|
||||
a.currentConversation = append(a.currentConversation, msg)
|
||||
job.Result.SetResponse(msg.Content)
|
||||
job.Result.Finish(nil)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := a.client.CreateChatCompletion(ctx,
|
||||
openai.ChatCompletionRequest{
|
||||
Model: a.options.LLMAPI.Model,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/local-agent-framework/xlog"
|
||||
|
||||
@@ -246,7 +247,7 @@ func (a *App) Chat(pool *AgentPool) func(c *fiber.Ctx) error {
|
||||
agentName := c.Params("name")
|
||||
manager := pool.GetManager(agentName)
|
||||
|
||||
query := payload.Message
|
||||
query := strings.Clone(payload.Message)
|
||||
if query == "" {
|
||||
_, _ = c.Write([]byte("Please enter a message."))
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user