re-eval
This commit is contained in:
@@ -11,6 +11,10 @@ func NewIntention(s ...string) *IntentAction {
|
||||
type IntentAction struct {
|
||||
tools []string
|
||||
}
|
||||
type IntentResponse struct {
|
||||
Tool string `json:"tool"`
|
||||
Reasoning string `json:"reasoning"`
|
||||
}
|
||||
|
||||
func (a *IntentAction) Run(ActionParams) (string, error) {
|
||||
return "no-op", nil
|
||||
|
||||
@@ -39,11 +39,16 @@ func (a Actions) Find(name string) Action {
|
||||
return nil
|
||||
}
|
||||
|
||||
type decisionResult struct {
|
||||
actionParams action.ActionParams
|
||||
message string
|
||||
}
|
||||
|
||||
// decision forces the agent to take on of the available actions
|
||||
func (a *Agent) decision(
|
||||
ctx context.Context,
|
||||
conversation []openai.ChatCompletionMessage,
|
||||
tools []openai.Tool, toolchoice any) (action.ActionParams, error) {
|
||||
tools []openai.Tool, toolchoice any) (*decisionResult, error) {
|
||||
|
||||
decision := openai.ChatCompletionRequest{
|
||||
Model: a.options.LLMAPI.Model,
|
||||
@@ -61,7 +66,7 @@ func (a *Agent) decision(
|
||||
msg := resp.Choices[0].Message
|
||||
if len(msg.ToolCalls) != 1 {
|
||||
fmt.Println(msg)
|
||||
return nil, fmt.Errorf("len(toolcalls): %v", len(msg.ToolCalls))
|
||||
return &decisionResult{message: msg.Content}, nil
|
||||
}
|
||||
|
||||
params := action.ActionParams{}
|
||||
@@ -70,10 +75,10 @@ func (a *Agent) decision(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return params, nil
|
||||
return &decisionResult{actionParams: params}, nil
|
||||
}
|
||||
|
||||
func (a *Agent) generateParameters(ctx context.Context, action Action, conversation []openai.ChatCompletionMessage) (action.ActionParams, error) {
|
||||
func (a *Agent) generateParameters(ctx context.Context, action Action, conversation []openai.ChatCompletionMessage) (*decisionResult, error) {
|
||||
return a.decision(ctx,
|
||||
conversation,
|
||||
a.options.actions.ToTools(),
|
||||
@@ -121,11 +126,7 @@ We already have called tools. Evaluate the current situation and decide if we ne
|
||||
|
||||
// pickAction picks an action based on the conversation
|
||||
func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.ChatCompletionMessage) (Action, string, error) {
|
||||
actionChoice := struct {
|
||||
Intent string `json:"tool"`
|
||||
Reasoning string `json:"reasoning"`
|
||||
}{}
|
||||
|
||||
// prepare the prompt
|
||||
prompt := bytes.NewBuffer([]byte{})
|
||||
|
||||
tmpl, err := template.New("pickAction").Parse(templ)
|
||||
@@ -133,7 +134,7 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// It can pick the reply action too
|
||||
// Get all the actions definitions
|
||||
definitions := []action.ActionDefinition{action.NewReply().Definition()}
|
||||
for _, m := range a.options.actions {
|
||||
definitions = append(definitions, m.Definition())
|
||||
@@ -152,11 +153,11 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
|
||||
fmt.Println("=== PROMPT START ===", prompt.String(), "=== PROMPT END ===")
|
||||
|
||||
// Get all the available actions IDs
|
||||
actionsID := []string{}
|
||||
for _, m := range a.options.actions {
|
||||
actionsID = append(actionsID, m.Definition().Name.String())
|
||||
}
|
||||
intentionsTools := action.NewIntention(actionsID...)
|
||||
|
||||
conversation := []openai.ChatCompletionMessage{
|
||||
{
|
||||
@@ -165,8 +166,36 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
},
|
||||
}
|
||||
|
||||
params, err := a.decision(ctx,
|
||||
// Get the LLM to think on what to do
|
||||
thought, err := a.decision(ctx,
|
||||
conversation,
|
||||
Actions{action.NewReasoning()}.ToTools(),
|
||||
action.NewReasoning().Definition().Name)
|
||||
if err != nil {
|
||||
fmt.Println("failed thinking", err)
|
||||
return nil, "", err
|
||||
}
|
||||
reason := ""
|
||||
response := &action.ReasoningResponse{}
|
||||
if thought.actionParams != nil {
|
||||
if err := thought.actionParams.Unmarshal(response); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
reason = response.Reasoning
|
||||
}
|
||||
if thought.message != "" {
|
||||
reason = thought.message
|
||||
}
|
||||
|
||||
fmt.Println(reason)
|
||||
|
||||
// Decode tool call
|
||||
intentionsTools := action.NewIntention(actionsID...)
|
||||
params, err := a.decision(ctx,
|
||||
append(conversation, openai.ChatCompletionMessage{
|
||||
Role: "assistent",
|
||||
Content: reason,
|
||||
}),
|
||||
Actions{intentionsTools}.ToTools(),
|
||||
intentionsTools.Definition().Name)
|
||||
if err != nil {
|
||||
@@ -174,22 +203,28 @@ func (a *Agent) pickAction(ctx context.Context, templ string, messages []openai.
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
err = params.Unmarshal(&actionChoice)
|
||||
actionChoice := action.IntentResponse{}
|
||||
|
||||
if params.actionParams == nil {
|
||||
return nil, params.message, nil
|
||||
}
|
||||
|
||||
err = params.actionParams.Unmarshal(&actionChoice)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
fmt.Printf("Action choice: %v\n", actionChoice)
|
||||
|
||||
if actionChoice.Intent == "" || actionChoice.Intent == "none" {
|
||||
if actionChoice.Tool == "" || actionChoice.Tool == "none" {
|
||||
return nil, "", fmt.Errorf("no intent detected")
|
||||
}
|
||||
|
||||
// Find the action
|
||||
chosenAction := append(a.options.actions, action.NewReply()).Find(actionChoice.Intent)
|
||||
chosenAction := append(a.options.actions, action.NewReply()).Find(actionChoice.Tool)
|
||||
if chosenAction == nil {
|
||||
fmt.Println("No action found for intent: ", actionChoice.Intent)
|
||||
return nil, "", fmt.Errorf("No action found for intent:" + actionChoice.Intent)
|
||||
fmt.Println("No action found for intent: ", actionChoice.Tool)
|
||||
return nil, "", fmt.Errorf("No action found for intent:" + actionChoice.Tool)
|
||||
}
|
||||
|
||||
return chosenAction, actionChoice.Reasoning, nil
|
||||
|
||||
@@ -103,7 +103,7 @@ func (a *Agent) consumeJob(job *Job) {
|
||||
return
|
||||
}
|
||||
|
||||
if chosenAction.Definition().Name.Is(action.ReplyActionName) {
|
||||
if chosenAction == nil || chosenAction.Definition().Name.Is(action.ReplyActionName) {
|
||||
fmt.Println("No action to do, just reply")
|
||||
job.Result.SetResult(reasoning)
|
||||
job.Result.Finish()
|
||||
@@ -116,12 +116,17 @@ func (a *Agent) consumeJob(job *Job) {
|
||||
return
|
||||
}
|
||||
|
||||
if params.actionParams == nil {
|
||||
fmt.Println("no parameters")
|
||||
return
|
||||
}
|
||||
|
||||
var result string
|
||||
for _, action := range a.options.actions {
|
||||
fmt.Println("Checking action: ", action.Definition().Name, chosenAction.Definition().Name)
|
||||
if action.Definition().Name == chosenAction.Definition().Name {
|
||||
fmt.Printf("Running action: %v\n", action.Definition().Name)
|
||||
if result, err = action.Run(params); err != nil {
|
||||
if result, err = action.Run(params.actionParams); err != nil {
|
||||
fmt.Printf("error running action: %v\n", err)
|
||||
return
|
||||
}
|
||||
@@ -134,7 +139,7 @@ func (a *Agent) consumeJob(job *Job) {
|
||||
Role: "assistant",
|
||||
FunctionCall: &openai.FunctionCall{
|
||||
Name: chosenAction.Definition().Name.String(),
|
||||
Arguments: params.String(),
|
||||
Arguments: params.actionParams.String(),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -153,7 +158,10 @@ func (a *Agent) consumeJob(job *Job) {
|
||||
fmt.Printf("error picking action: %v\n", err)
|
||||
return
|
||||
}
|
||||
if !chosenAction.Definition().Name.Is(action.ReplyActionName) {
|
||||
|
||||
if followingAction == nil || followingAction.Definition().Name.Is(action.ReplyActionName) {
|
||||
fmt.Println("No action to do, just reply")
|
||||
} else if !chosenAction.Definition().Name.Is(action.ReplyActionName) {
|
||||
// We need to do another action (?)
|
||||
// The agent decided to do another action
|
||||
fmt.Println("Another action to do: ", followingAction.Definition().Name)
|
||||
@@ -161,11 +169,11 @@ func (a *Agent) consumeJob(job *Job) {
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a human-readable response
|
||||
resp, err := a.client.CreateChatCompletion(ctx,
|
||||
openai.ChatCompletionRequest{
|
||||
Model: a.options.LLMAPI.Model,
|
||||
Messages: messages,
|
||||
// Tools: tools,
|
||||
},
|
||||
)
|
||||
if err != nil || len(resp.Choices) != 1 {
|
||||
|
||||
Reference in New Issue
Block a user