Allow to display HUD/character informations to the LLM
This commit is contained in:
@@ -85,50 +85,46 @@ func (a *Agent) generateParameters(ctx context.Context, action Action, conversat
|
|||||||
action.Definition().Name)
|
action.Definition().Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
const pickActionTemplate = `You can take any of the following tools:
|
func (a *Agent) prepareHUD() PromptHUD {
|
||||||
|
return PromptHUD{
|
||||||
|
Character: a.Character,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{{range .Actions -}}
|
const hudTemplate = `You have a character and your replies and actions might be influenced by it.
|
||||||
- {{.Name}}: {{.Description }}
|
{{if .Character.Name}}Name: {{.Character.Name}}
|
||||||
{{ end }}
|
{{end}}{{if .Character.Age}}Age: {{.Character.Age}}
|
||||||
To answer back to the user, use the "reply" tool.
|
{{end}}{{if .Character.Occupation}}Occupation: {{.Character.Occupation}}
|
||||||
Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'.
|
{{end}}{{if .Character.NowDoing}}Now doing: {{.Character.NowDoing}}
|
||||||
|
{{end}}{{if .Character.DoingNext}}Doing next: {{.Character.DoingNext}}
|
||||||
{{range .Messages -}}
|
{{end}}{{if .Character.DoneHistory}}Done history: {{.Character.DoneHistory}}
|
||||||
{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}}
|
{{end}}{{if .Character.Memories}}Memories: {{.Character.Memories}}
|
||||||
|
{{end}}{{if .Character.Hobbies}}Hobbies: {{.Character.Hobbies}}
|
||||||
|
{{end}}{{if .Character.MusicTaste}}Music taste: {{.Character.MusicTaste}}
|
||||||
{{end}}
|
{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
const reEvalTemplate = `You can take any of the following tools:
|
|
||||||
|
|
||||||
{{range .Actions -}}
|
|
||||||
- {{.Name}}: {{.Description }}
|
|
||||||
{{ end }}
|
|
||||||
To answer back to the user, use the "reply" tool.
|
|
||||||
Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'.
|
|
||||||
|
|
||||||
{{range .Messages -}}
|
|
||||||
{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
We already have called tools. Evaluate the current situation and decide if we need to execute other tools or answer back with a result.`
|
|
||||||
|
|
||||||
// pickAction picks an action based on the conversation
|
// pickAction picks an action based on the conversation
|
||||||
func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage) (Action, string, error) {
|
func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage) (Action, string, error) {
|
||||||
// prepare the prompt
|
// prepare the prompt
|
||||||
prompt := bytes.NewBuffer([]byte{})
|
prompt := bytes.NewBuffer([]byte{})
|
||||||
|
hud := bytes.NewBuffer([]byte{})
|
||||||
|
|
||||||
tmpl, err := template.New("pickAction").Parse(templ)
|
promptTemplate, err := template.New("pickAction").Parse(templ)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
hudTmpl, err := template.New("HUD").Parse(hudTemplate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all the actions definitions
|
// Get all the actions definitions
|
||||||
definitions := []action.ActionDefinition{action.NewReply().Definition()}
|
definitions := []action.ActionDefinition{action.NewReply().Definition()}
|
||||||
for _, m := range a.options.actions {
|
for _, m := range a.options.actions {
|
||||||
definitions = append(definitions, m.Definition())
|
definitions = append(definitions, m.Definition())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tmpl.Execute(prompt, struct {
|
err = promptTemplate.Execute(prompt, struct {
|
||||||
Actions []action.ActionDefinition
|
Actions []action.ActionDefinition
|
||||||
Messages []openai.ChatCompletionMessage
|
Messages []openai.ChatCompletionMessage
|
||||||
}{
|
}{
|
||||||
@@ -139,6 +135,12 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
|||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = hudTmpl.Execute(prompt, a.prepareHUD())
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
fmt.Println("=== HUD START ===", hud.String(), "=== HUD END ===")
|
||||||
|
|
||||||
fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===")
|
fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===")
|
||||||
|
|
||||||
// Get all the available actions IDs
|
// Get all the available actions IDs
|
||||||
@@ -147,12 +149,19 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
|||||||
actionsID = append(actionsID, m.Definition().Name.String())
|
actionsID = append(actionsID, m.Definition().Name.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
conversation := []openai.ChatCompletionMessage{
|
conversation := []openai.ChatCompletionMessage{}
|
||||||
{
|
|
||||||
|
if a.options.enableHUD {
|
||||||
|
conversation = append(conversation, openai.ChatCompletionMessage{
|
||||||
|
Role: "system",
|
||||||
|
Content: hud.String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
conversation = append(conversation, openai.ChatCompletionMessage{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
Content: prompt.String(),
|
Content: prompt.String(),
|
||||||
},
|
})
|
||||||
}
|
|
||||||
|
|
||||||
// Get the LLM to think on what to do
|
// Get the LLM to think on what to do
|
||||||
thought, err := a.decision(ctx,
|
thought, err := a.decision(ctx,
|
||||||
|
|||||||
@@ -70,6 +70,33 @@ func (j *JobResult) WaitResult() []string {
|
|||||||
return j.Data
|
return j.Data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const pickActionTemplate = `You can take any of the following tools:
|
||||||
|
|
||||||
|
{{range .Actions -}}
|
||||||
|
- {{.Name}}: {{.Description }}
|
||||||
|
{{ end }}
|
||||||
|
To answer back to the user, use the "reply" tool.
|
||||||
|
Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'.
|
||||||
|
|
||||||
|
{{range .Messages -}}
|
||||||
|
{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}}
|
||||||
|
{{end}}
|
||||||
|
`
|
||||||
|
|
||||||
|
const reEvalTemplate = `You can take any of the following tools:
|
||||||
|
|
||||||
|
{{range .Actions -}}
|
||||||
|
- {{.Name}}: {{.Description }}
|
||||||
|
{{ end }}
|
||||||
|
To answer back to the user, use the "reply" tool.
|
||||||
|
Given the text below, decide which action to take and explain the detailed reasoning behind it. For answering without picking a choice, reply with 'none'.
|
||||||
|
|
||||||
|
{{range .Messages -}}
|
||||||
|
{{.Role}}{{if .FunctionCall}}(tool_call){{.FunctionCall}}{{end}}: {{if .FunctionCall}}{{.FunctionCall}}{{else if .ToolCalls -}}{{range .ToolCalls -}}{{.Name}} called with {{.Arguments}}{{end}}{{ else }}{{.Content -}}{{end}}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
We already have called tools. Evaluate the current situation and decide if we need to execute other tools or answer back with a result.`
|
||||||
|
|
||||||
func (a *Agent) consumeJob(job *Job) {
|
func (a *Agent) consumeJob(job *Job) {
|
||||||
// Consume the job and generate a response
|
// Consume the job and generate a response
|
||||||
a.Lock()
|
a.Lock()
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ type options struct {
|
|||||||
randomIdentityGuidance string
|
randomIdentityGuidance string
|
||||||
randomIdentity bool
|
randomIdentity bool
|
||||||
actions Actions
|
actions Actions
|
||||||
|
enableHUD bool
|
||||||
context context.Context
|
context context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,6 +52,11 @@ func newOptions(opts ...Option) (*options, error) {
|
|||||||
return options, nil
|
return options, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var EnableHUD = func(o *options) error {
|
||||||
|
o.enableHUD = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func WithLLMAPIURL(url string) Option {
|
func WithLLMAPIURL(url string) Option {
|
||||||
return func(o *options) error {
|
return func(o *options) error {
|
||||||
o.LLMAPI.APIURL = url
|
o.LLMAPI.APIURL = url
|
||||||
|
|||||||
@@ -8,6 +8,13 @@ import (
|
|||||||
"github.com/mudler/local-agent-framework/llm"
|
"github.com/mudler/local-agent-framework/llm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PromptHUD contains
|
||||||
|
// all information that should be displayed to the LLM
|
||||||
|
// in the prompts
|
||||||
|
type PromptHUD struct {
|
||||||
|
Character Character `json:"character"`
|
||||||
|
}
|
||||||
|
|
||||||
type Character struct {
|
type Character struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Age int `json:"age"`
|
Age int `json:"age"`
|
||||||
|
|||||||
Reference in New Issue
Block a user