finish handling support if no reasoning is used
This commit is contained in:
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mudler/local-agent-framework/action"
|
"github.com/mudler/local-agent-framework/action"
|
||||||
|
"github.com/mudler/local-agent-framework/xlog"
|
||||||
|
|
||||||
"github.com/sashabaranov/go-openai"
|
"github.com/sashabaranov/go-openai"
|
||||||
)
|
)
|
||||||
@@ -204,13 +205,19 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
xlog.Debug(fmt.Sprintf("thought action Name: %v", thought.actioName))
|
||||||
|
xlog.Debug(fmt.Sprintf("thought message: %v", thought.message))
|
||||||
|
|
||||||
// Find the action
|
// Find the action
|
||||||
chosenAction := a.systemInternalActions().Find(thought.actioName)
|
chosenAction := a.systemInternalActions().Find(thought.actioName)
|
||||||
if chosenAction == nil {
|
if chosenAction == nil || thought.actioName == "" {
|
||||||
|
xlog.Debug(fmt.Sprintf("no answer"))
|
||||||
|
|
||||||
// LLM replied with an answer?
|
// LLM replied with an answer?
|
||||||
//fmt.Errorf("no action found for intent:" + thought.actioName)
|
//fmt.Errorf("no action found for intent:" + thought.actioName)
|
||||||
return action.NewReply(), thought.message, nil
|
return nil, thought.message, nil
|
||||||
}
|
}
|
||||||
|
xlog.Debug(fmt.Sprintf("chosenAction: %v", chosenAction.Definition().Name))
|
||||||
return chosenAction, thought.message, nil
|
return chosenAction, thought.message, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -362,16 +362,20 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
|||||||
var err error
|
var err error
|
||||||
chosenAction, reasoning, err = a.pickAction(ctx, pickTemplate, a.currentConversation)
|
chosenAction, reasoning, err = a.pickAction(ctx, pickTemplate, a.currentConversation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
xlog.Error("Error picking action", "error", err)
|
||||||
job.Result.Finish(err)
|
job.Result.Finish(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//xlog.Debug("Picked action", "agent", a.Character.Name, "action", chosenAction.Definition().Name, "reasoning", reasoning)
|
||||||
if chosenAction == nil {
|
if chosenAction == nil {
|
||||||
// If no action was picked up, the reasoning is the message returned by the assistant
|
// If no action was picked up, the reasoning is the message returned by the assistant
|
||||||
// so we can consume it as if it was a reply.
|
// so we can consume it as if it was a reply.
|
||||||
//job.Result.SetResult(ActionState{ActionCurrentState{nil, nil, "No action to do, just reply"}, ""})
|
//job.Result.SetResult(ActionState{ActionCurrentState{nil, nil, "No action to do, just reply"}, ""})
|
||||||
//job.Result.Finish(fmt.Errorf("no action to do"))\
|
//job.Result.Finish(fmt.Errorf("no action to do"))\
|
||||||
|
xlog.Info("No action to do, just reply", "agent", a.Character.Name, "reasoning", reasoning)
|
||||||
|
|
||||||
a.currentConversation = append(a.currentConversation, openai.ChatCompletionMessage{
|
a.currentConversation = append(a.currentConversation, openai.ChatCompletionMessage{
|
||||||
Role: "assistant",
|
Role: "assistant",
|
||||||
Content: reasoning,
|
Content: reasoning,
|
||||||
@@ -497,6 +501,18 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
|||||||
job.Text = ""
|
job.Text = ""
|
||||||
a.consumeJob(job, role)
|
a.consumeJob(job, role)
|
||||||
return
|
return
|
||||||
|
} else if followingAction == nil {
|
||||||
|
if !a.options.forceReasoning {
|
||||||
|
msg := openai.ChatCompletionMessage{
|
||||||
|
Role: "assistant",
|
||||||
|
Content: reasoning,
|
||||||
|
}
|
||||||
|
|
||||||
|
a.currentConversation = append(a.currentConversation, msg)
|
||||||
|
job.Result.SetResponse(msg.Content)
|
||||||
|
job.Result.Finish(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -560,6 +576,19 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
|||||||
// ),
|
// ),
|
||||||
// },
|
// },
|
||||||
// )
|
// )
|
||||||
|
|
||||||
|
if !a.options.forceReasoning {
|
||||||
|
msg := openai.ChatCompletionMessage{
|
||||||
|
Role: "assistant",
|
||||||
|
Content: replyResponse.Message,
|
||||||
|
}
|
||||||
|
|
||||||
|
a.currentConversation = append(a.currentConversation, msg)
|
||||||
|
job.Result.SetResponse(msg.Content)
|
||||||
|
job.Result.Finish(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := a.client.CreateChatCompletion(ctx,
|
resp, err := a.client.CreateChatCompletion(ctx,
|
||||||
openai.ChatCompletionRequest{
|
openai.ChatCompletionRequest{
|
||||||
Model: a.options.LLMAPI.Model,
|
Model: a.options.LLMAPI.Model,
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/mudler/local-agent-framework/xlog"
|
"github.com/mudler/local-agent-framework/xlog"
|
||||||
|
|
||||||
@@ -246,7 +247,7 @@ func (a *App) Chat(pool *AgentPool) func(c *fiber.Ctx) error {
|
|||||||
agentName := c.Params("name")
|
agentName := c.Params("name")
|
||||||
manager := pool.GetManager(agentName)
|
manager := pool.GetManager(agentName)
|
||||||
|
|
||||||
query := payload.Message
|
query := strings.Clone(payload.Message)
|
||||||
if query == "" {
|
if query == "" {
|
||||||
_, _ = c.Write([]byte("Please enter a message."))
|
_, _ = c.Write([]byte("Please enter a message."))
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
Reference in New Issue
Block a user