feat: do not use JSON extraction for reasoning (#194)
Signed-off-by: mudler <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
56b6f7240c
commit
50cad776aa
@@ -516,6 +516,7 @@ func (a *Agent) pickAction(job *types.Job, templ string, messages []openai.ChatC
|
|||||||
return chosenAction, thought.actionParams, thought.message, nil
|
return chosenAction, thought.actionParams, thought.message, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Force the LLM to think and we extract a "reasoning" to pick a specific action and with which parameters
|
||||||
xlog.Debug("[pickAction] forcing reasoning")
|
xlog.Debug("[pickAction] forcing reasoning")
|
||||||
|
|
||||||
prompt, err := renderTemplate(templ, a.prepareHUD(), a.availableActions(), "")
|
prompt, err := renderTemplate(templ, a.prepareHUD(), a.availableActions(), "")
|
||||||
@@ -533,33 +534,35 @@ func (a *Agent) pickAction(job *types.Job, templ string, messages []openai.ChatC
|
|||||||
}, c...)
|
}, c...)
|
||||||
}
|
}
|
||||||
|
|
||||||
reasoningAction := action.NewReasoning()
|
// Create a detailed prompt for reasoning that includes available actions and their properties
|
||||||
thought, err := a.decision(job,
|
reasoningPrompt := "Analyze the current situation and determine the best course of action. Consider the following:\n\n"
|
||||||
c,
|
reasoningPrompt += "Available Actions:\n"
|
||||||
types.Actions{reasoningAction}.ToTools(),
|
for _, act := range a.availableActions() {
|
||||||
reasoningAction.Definition().Name.String(), maxRetries)
|
reasoningPrompt += fmt.Sprintf("- %s: %s\n", act.Definition().Name, act.Definition().Description)
|
||||||
if err != nil {
|
if len(act.Definition().Properties) > 0 {
|
||||||
return nil, nil, "", err
|
reasoningPrompt += " Properties:\n"
|
||||||
|
for name, prop := range act.Definition().Properties {
|
||||||
|
reasoningPrompt += fmt.Sprintf(" - %s: %s\n", name, prop.Description)
|
||||||
}
|
}
|
||||||
if thought.actionName != "" && thought.actionName != reasoningAction.Definition().Name.String() {
|
}
|
||||||
return nil, nil, "", fmt.Errorf("expected reasoning action %s, got %s", reasoningAction.Definition().Name.String(), thought.actionName)
|
reasoningPrompt += "\n"
|
||||||
|
}
|
||||||
|
reasoningPrompt += "\nProvide a detailed reasoning about what action would be most appropriate in this situation and why. You can also just reply with a simple message by choosing the 'reply' or 'answer' action."
|
||||||
|
|
||||||
|
// Get reasoning using askLLM
|
||||||
|
reasoningMsg, err := a.askLLM(job.GetContext(),
|
||||||
|
append(c, openai.ChatCompletionMessage{
|
||||||
|
Role: "system",
|
||||||
|
Content: reasoningPrompt,
|
||||||
|
}),
|
||||||
|
maxRetries)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, "", fmt.Errorf("failed to get reasoning: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
originalReasoning := ""
|
originalReasoning := reasoningMsg.Content
|
||||||
response := &action.ReasoningResponse{}
|
|
||||||
if thought.actionParams != nil {
|
|
||||||
if err := thought.actionParams.Unmarshal(response); err != nil {
|
|
||||||
return nil, nil, "", err
|
|
||||||
}
|
|
||||||
originalReasoning = response.Reasoning
|
|
||||||
}
|
|
||||||
if thought.message != "" {
|
|
||||||
originalReasoning = thought.message
|
|
||||||
}
|
|
||||||
|
|
||||||
xlog.Debug("[pickAction] picking action", "messages", c)
|
xlog.Debug("[pickAction] picking action", "messages", c)
|
||||||
// thought, err := a.askLLM(ctx,
|
|
||||||
// c,
|
|
||||||
|
|
||||||
actionsID := []string{"reply"}
|
actionsID := []string{"reply"}
|
||||||
for _, m := range a.availableActions() {
|
for _, m := range a.availableActions() {
|
||||||
|
|||||||
Reference in New Issue
Block a user