From 2ebbf1007fbc5b0655348d2f8ce5db5c85f18b1b Mon Sep 17 00:00:00 2001 From: mudler Date: Mon, 1 Apr 2024 18:27:09 +0200 Subject: [PATCH] Allow to display HUD/character informations to the LLM --- agent/actions.go | 71 +++++++++++++++++++++++++++--------------------- agent/jobs.go | 27 ++++++++++++++++++ agent/options.go | 6 ++++ agent/state.go | 7 +++++ 4 files changed, 80 insertions(+), 31 deletions(-) diff --git a/agent/actions.go b/agent/actions.go index 8106c69..e08c706 100644 --- a/agent/actions.go +++ b/agent/actions.go @@ -85,50 +85,46 @@ func (a *Agent) generateParameters(ctx context.Context, action Action, conversat action.Definition().Name) } -const pickActionTemplate = `You can take any of the following tools: +func (a *Agent) prepareHUD() PromptHUD { + return PromptHUD{ + Character: a.Character, + } +} -{{range .Actions -}} -- {{.Name}}: {{.Description }} -{{ end }} -To answer back to the user, use the "reply" tool. -Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'. - -{{range .Messages -}} -{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}} +const hudTemplate = `You have a character and your replies and actions might be influenced by it. +{{if .Character.Name}}Name: {{.Character.Name}} +{{end}}{{if .Character.Age}}Age: {{.Character.Age}} +{{end}}{{if .Character.Occupation}}Occupation: {{.Character.Occupation}} +{{end}}{{if .Character.NowDoing}}Now doing: {{.Character.NowDoing}} +{{end}}{{if .Character.DoingNext}}Doing next: {{.Character.DoingNext}} +{{end}}{{if .Character.DoneHistory}}Done history: {{.Character.DoneHistory}} +{{end}}{{if .Character.Memories}}Memories: {{.Character.Memories}} +{{end}}{{if .Character.Hobbies}}Hobbies: {{.Character.Hobbies}} +{{end}}{{if .Character.MusicTaste}}Music taste: {{.Character.MusicTaste}} {{end}} ` -const reEvalTemplate = `You can take any of the following tools: - -{{range .Actions -}} -- {{.Name}}: {{.Description }} -{{ end }} -To answer back to the user, use the "reply" tool. -Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'. - -{{range .Messages -}} -{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}} -{{end}} - -We already have called tools. Evaluate the current situation and decide if we need to execute other tools or answer back with a result.` - // pickAction picks an action based on the conversation func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage) (Action, string, error) { // prepare the prompt prompt := bytes.NewBuffer([]byte{}) + hud := bytes.NewBuffer([]byte{}) - tmpl, err := template.New("pickAction").Parse(templ) + promptTemplate, err := template.New("pickAction").Parse(templ) + if err != nil { + return nil, "", err + } + hudTmpl, err := template.New("HUD").Parse(hudTemplate) if err != nil { return nil, "", err } - // Get all the actions definitions definitions := []action.ActionDefinition{action.NewReply().Definition()} for _, m := range a.options.actions { definitions = append(definitions, m.Definition()) } - err = tmpl.Execute(prompt, struct { + err = promptTemplate.Execute(prompt, struct { Actions []action.ActionDefinition Messages []openai.ChatCompletionMessage }{ @@ -139,6 +135,12 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai. return nil, "", err } + err = hudTmpl.Execute(prompt, a.prepareHUD()) + if err != nil { + return nil, "", err + } + fmt.Println("=== HUD START ===", hud.String(), "=== HUD END ===") + fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===") // Get all the available actions IDs @@ -147,13 +149,20 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai. actionsID = append(actionsID, m.Definition().Name.String()) } - conversation := []openai.ChatCompletionMessage{ - { - Role: "user", - Content: prompt.String(), - }, + conversation := []openai.ChatCompletionMessage{} + + if a.options.enableHUD { + conversation = append(conversation, openai.ChatCompletionMessage{ + Role: "system", + Content: hud.String(), + }) } + conversation = append(conversation, openai.ChatCompletionMessage{ + Role: "user", + Content: prompt.String(), + }) + // Get the LLM to think on what to do thought, err := a.decision(ctx, conversation, diff --git a/agent/jobs.go b/agent/jobs.go index 7b576fe..5c3c0ce 100644 --- a/agent/jobs.go +++ b/agent/jobs.go @@ -70,6 +70,33 @@ func (j *JobResult) WaitResult() []string { return j.Data } +const pickActionTemplate = `You can take any of the following tools: + +{{range .Actions -}} +- {{.Name}}: {{.Description }} +{{ end }} +To answer back to the user, use the "reply" tool. +Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'. + +{{range .Messages -}} +{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}} +{{end}} +` + +const reEvalTemplate = `You can take any of the following tools: + +{{range .Actions -}} +- {{.Name}}: {{.Description }} +{{ end }} +To answer back to the user, use the "reply" tool. +Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'. + +{{range .Messages -}} +{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}} +{{end}} + +We already have called tools. Evaluate the current situation and decide if we need to execute other tools or answer back with a result.` + func (a *Agent) consumeJob(job *Job) { // Consume the job and generate a response a.Lock() diff --git a/agent/options.go b/agent/options.go index 84699e1..bb25539 100644 --- a/agent/options.go +++ b/agent/options.go @@ -18,6 +18,7 @@ type options struct { randomIdentityGuidance string randomIdentity bool actions Actions + enableHUD bool context context.Context } @@ -51,6 +52,11 @@ func newOptions(opts ...Option) (*options, error) { return options, nil } +var EnableHUD = func(o *options) error { + o.enableHUD = true + return nil +} + func WithLLMAPIURL(url string) Option { return func(o *options) error { o.LLMAPI.APIURL = url diff --git a/agent/state.go b/agent/state.go index 8f73935..2a2d279 100644 --- a/agent/state.go +++ b/agent/state.go @@ -8,6 +8,13 @@ import ( "github.com/mudler/local-agent-framework/llm" ) +// PromptHUD contains +// all information that should be displayed to the LLM +// in the prompts +type PromptHUD struct { + Character Character `json:"character"` +} + type Character struct { Name string `json:"name"` Age int `json:"age"`