This commit is contained in:
mudler
2024-04-04 22:44:59 +02:00
parent 5c58072ad7
commit 744af19025
3 changed files with 73 additions and 15 deletions

View File

@@ -169,13 +169,13 @@ func (a *Agent) prepareHUD() PromptHUD {
} }
} }
func (a *Agent) prepareConversationParse(templ string, messages []openai.ChatCompletionMessage, canReply bool, reasoning string) ([]openai.ChatCompletionMessage, Actions, []string, error) { func (a *Agent) prepareConversationParse(templ string, messages []openai.ChatCompletionMessage, canReply bool, reasoning string) ([]openai.ChatCompletionMessage, Actions, error) {
// prepare the prompt // prepare the prompt
prompt := bytes.NewBuffer([]byte{}) prompt := bytes.NewBuffer([]byte{})
promptTemplate, err := template.New("pickAction").Parse(templ) promptTemplate, err := template.New("pickAction").Parse(templ)
if err != nil { if err != nil {
return nil, []Action{}, nil, err return nil, []Action{}, err
} }
actions := a.systemActions() actions := a.systemActions()
@@ -207,19 +207,13 @@ func (a *Agent) prepareConversationParse(templ string, messages []openai.ChatCom
HUD: promptHUD, HUD: promptHUD,
}) })
if err != nil { if err != nil {
return nil, []Action{}, nil, err return nil, []Action{}, err
} }
if a.options.debugMode { if a.options.debugMode {
fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===") fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===")
} }
// Get all the available actions IDs
actionsID := []string{}
for _, m := range actions {
actionsID = append(actionsID, m.Definition().Name.String())
}
conversation := []openai.ChatCompletionMessage{} conversation := []openai.ChatCompletionMessage{}
conversation = append(conversation, openai.ChatCompletionMessage{ conversation = append(conversation, openai.ChatCompletionMessage{
@@ -227,18 +221,75 @@ func (a *Agent) prepareConversationParse(templ string, messages []openai.ChatCom
Content: prompt.String(), Content: prompt.String(),
}) })
return conversation, actions, actionsID, nil return conversation, actions, nil
} }
// pickAction picks an action based on the conversation // pickAction picks an action based on the conversation
func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage, canReply bool) (Action, string, error) { func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage, canReply bool) (Action, string, error) {
conversation, actions, actionsID, err := a.prepareConversationParse(templ, messages, canReply, "") c := messages
// prepare the prompt
prompt := bytes.NewBuffer([]byte{})
promptTemplate, err := template.New("pickAction").Parse(templ)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
actions := a.systemActions()
if !canReply {
actions = a.systemInternalActions()
}
// Get all the actions definitions
definitions := []action.ActionDefinition{}
for _, m := range actions {
definitions = append(definitions, m.Definition())
}
var promptHUD *PromptHUD
if a.options.enableHUD {
h := a.prepareHUD()
promptHUD = &h
}
err = promptTemplate.Execute(prompt, struct {
HUD *PromptHUD
Actions []action.ActionDefinition
Reasoning string
Messages []openai.ChatCompletionMessage
}{
Actions: definitions,
Messages: messages,
HUD: promptHUD,
})
if err != nil {
return nil, "", err
}
// Get the LLM to think on what to do // Get the LLM to think on what to do
// and have a thought
found := false
for _, cc := range c {
if cc.Content == prompt.String() {
found = true
break
}
}
if !found {
c = append([]openai.ChatCompletionMessage{
{
Role: "system",
Content: prompt.String(),
},
}, c...)
}
// We also could avoid to use functions here and get just a reply from the LLM
// and then use the reply to get the action
thought, err := a.decision(ctx, thought, err := a.decision(ctx,
conversation, c,
Actions{action.NewReasoning()}.ToTools(), Actions{action.NewReasoning()}.ToTools(),
action.NewReasoning().Definition().Name) action.NewReasoning().Definition().Name)
if err != nil { if err != nil {
@@ -256,10 +307,15 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
reason = thought.message reason = thought.message
} }
// Decode tool call // From the thought, get the action call
// Get all the available actions IDs
actionsID := []string{}
for _, m := range actions {
actionsID = append(actionsID, m.Definition().Name.String())
}
intentionsTools := action.NewIntention(actionsID...) intentionsTools := action.NewIntention(actionsID...)
params, err := a.decision(ctx, params, err := a.decision(ctx,
append(conversation, openai.ChatCompletionMessage{ append(c, openai.ChatCompletionMessage{
Role: "assistent", Role: "assistent",
Content: reason, Content: reason,
}), }),

View File

@@ -167,7 +167,7 @@ var _ = Describe("Agent test", func() {
Expect(agent.State().Goal).To(ContainSubstring("guitar"), fmt.Sprint(agent.State())) Expect(agent.State().Goal).To(ContainSubstring("guitar"), fmt.Sprint(agent.State()))
}) })
FIt("it automatically performs things in the background", func() { It("it automatically performs things in the background", func() {
agent, err := New( agent, err := New(
WithLLMAPIURL(apiModel), WithLLMAPIURL(apiModel),
WithModel(testModel), WithModel(testModel),

View File

@@ -53,9 +53,11 @@ You can take any of the following tools:
To answer back to the user, use the "reply" tool. To answer back to the user, use the "reply" tool.
Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'. Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'.
{{if .Messages}}
{{range .Messages -}} {{range .Messages -}}
{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}} {{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}}
{{end}} {{end}}
{{end}}
{{if .Reasoning}}Reasoning: {{.Reasoning}}{{end}} {{if .Reasoning}}Reasoning: {{.Reasoning}}{{end}}
` `