Add long term memory
This commit is contained in:
@@ -68,6 +68,7 @@ func (a *Agent) decision(
|
||||
Tools: tools,
|
||||
ToolChoice: toolchoice,
|
||||
}
|
||||
|
||||
resp, err := a.client.CreateChatCompletion(ctx, decision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -96,6 +97,14 @@ func (m Messages) ToOpenAI() []openai.ChatCompletionMessage {
|
||||
return []openai.ChatCompletionMessage(m)
|
||||
}
|
||||
|
||||
func (m Messages) String() string {
|
||||
s := ""
|
||||
for _, cc := range m {
|
||||
s += cc.Role + ": " + cc.Content + "\n"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (m Messages) Exist(content string) bool {
|
||||
for _, cc := range m {
|
||||
if cc.Content == content {
|
||||
|
||||
@@ -151,14 +151,50 @@ func (a *Agent) SetConversation(conv []openai.ChatCompletionMessage) {
|
||||
a.currentConversation = conv
|
||||
}
|
||||
|
||||
func (a *Agent) askLLM(ctx context.Context, conversation []openai.ChatCompletionMessage) (openai.ChatCompletionMessage, error) {
|
||||
resp, err := a.client.CreateChatCompletion(ctx,
|
||||
openai.ChatCompletionRequest{
|
||||
Model: a.options.LLMAPI.Model,
|
||||
Messages: conversation,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return openai.ChatCompletionMessage{}, err
|
||||
}
|
||||
|
||||
if len(resp.Choices) != 1 {
|
||||
return openai.ChatCompletionMessage{}, fmt.Errorf("no enough choices: %w", err)
|
||||
}
|
||||
|
||||
return resp.Choices[0].Message, nil
|
||||
}
|
||||
|
||||
func (a *Agent) ResetConversation() {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
xlog.Info("Resetting conversation", "agent", a.Character.Name)
|
||||
|
||||
// store into memory the conversation before pruning it
|
||||
// TODO: Shall we summarize the conversation into a bullet list of highlights
|
||||
// using the LLM instead?
|
||||
if a.options.enableKB {
|
||||
if a.options.enableLongTermMemory {
|
||||
xlog.Info("Saving conversation", "agent", a.Character.Name, "conversation size", len(a.currentConversation))
|
||||
|
||||
if a.options.enableSummaryMemory {
|
||||
|
||||
msg, err := a.askLLM(a.context.Context, []openai.ChatCompletionMessage{{
|
||||
Role: "user",
|
||||
Content: "Summarize the conversation below, keep the highlights as a bullet list:\n" + Messages(a.currentConversation).String(),
|
||||
}})
|
||||
if err != nil {
|
||||
xlog.Error("Error summarizing conversation", "error", err)
|
||||
}
|
||||
|
||||
if err := a.options.ragdb.Store(msg.Content); err != nil {
|
||||
xlog.Error("Error storing into memory", "error", err)
|
||||
}
|
||||
} else {
|
||||
for _, message := range a.currentConversation {
|
||||
if message.Role == "user" {
|
||||
if err := a.options.ragdb.Store(message.Content); err != nil {
|
||||
@@ -168,6 +204,8 @@ func (a *Agent) ResetConversation() {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
a.currentConversation = []openai.ChatCompletionMessage{}
|
||||
}
|
||||
|
||||
@@ -612,34 +650,15 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := a.client.CreateChatCompletion(ctx,
|
||||
openai.ChatCompletionRequest{
|
||||
Model: a.options.LLMAPI.Model,
|
||||
// Force the AI to response without using any tool
|
||||
// Why: some models might be silly enough to attempt to call tools even if evaluated
|
||||
// that a reply was not necessary anymore
|
||||
Messages: append(a.currentConversation, openai.ChatCompletionMessage{
|
||||
msg, err := a.askLLM(ctx, append(a.currentConversation, openai.ChatCompletionMessage{
|
||||
Role: "system",
|
||||
Content: "The assistant needs to reply without using any tool.",
|
||||
// + replyResponse.Message,
|
||||
},
|
||||
),
|
||||
//Messages: a.currentConversation,
|
||||
},
|
||||
)
|
||||
}))
|
||||
if err != nil {
|
||||
job.Result.Finish(err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(resp.Choices) != 1 {
|
||||
job.Result.Finish(fmt.Errorf("no enough choices: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// display OpenAI's response to the original question utilizing our function
|
||||
msg := resp.Choices[0].Message
|
||||
|
||||
// If we didn't got any message, we can use the response from the action
|
||||
if chosenAction.Definition().Name.Is(action.ReplyActionName) && msg.Content == "" ||
|
||||
strings.Contains(msg.Content, "<tool_call>") {
|
||||
@@ -661,9 +680,6 @@ func (a *Agent) periodicallyRun(timer *time.Timer) {
|
||||
// Remember always to reset the timer - if we don't the agent will stop..
|
||||
defer timer.Reset(a.options.periodicRuns)
|
||||
|
||||
if !a.options.standaloneJob {
|
||||
return
|
||||
}
|
||||
a.StopAction()
|
||||
xlog.Debug("Agent is running periodically", "agent", a.Character.Name)
|
||||
|
||||
@@ -675,18 +691,24 @@ func (a *Agent) periodicallyRun(timer *time.Timer) {
|
||||
|
||||
xlog.Info("START -- Periodically run is starting")
|
||||
|
||||
if len(a.CurrentConversation()) != 0 {
|
||||
// Here the LLM could decide to store some part of the conversation too in the memory
|
||||
evaluateMemory := NewJob(
|
||||
WithText(
|
||||
`Evaluate the current conversation and decide if we need to store some relevant informations from it`,
|
||||
),
|
||||
WithReasoningCallback(a.options.reasoningCallback),
|
||||
WithResultCallback(a.options.resultCallback),
|
||||
)
|
||||
a.consumeJob(evaluateMemory, SystemRole)
|
||||
// if len(a.CurrentConversation()) != 0 {
|
||||
// // Here the LLM could decide to store some part of the conversation too in the memory
|
||||
// evaluateMemory := NewJob(
|
||||
// WithText(
|
||||
// `Evaluate the current conversation and decide if we need to store some relevant informations from it`,
|
||||
// ),
|
||||
// WithReasoningCallback(a.options.reasoningCallback),
|
||||
// WithResultCallback(a.options.resultCallback),
|
||||
// )
|
||||
// a.consumeJob(evaluateMemory, SystemRole)
|
||||
|
||||
// a.ResetConversation()
|
||||
// }
|
||||
|
||||
if !a.options.standaloneJob {
|
||||
a.ResetConversation()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Here we go in a loop of
|
||||
|
||||
@@ -19,7 +19,7 @@ type options struct {
|
||||
randomIdentityGuidance string
|
||||
randomIdentity bool
|
||||
userActions Actions
|
||||
enableHUD, standaloneJob, showCharacter, enableKB bool
|
||||
enableHUD, standaloneJob, showCharacter, enableKB, enableSummaryMemory, enableLongTermMemory bool
|
||||
|
||||
canStopItself bool
|
||||
initiateConversations bool
|
||||
@@ -127,6 +127,16 @@ var EnablePersonality = func(o *options) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var EnableSummaryMemory = func(o *options) error {
|
||||
o.enableSummaryMemory = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var EnableLongTermMemory = func(o *options) error {
|
||||
o.enableLongTermMemory = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithRAGDB(db RAGDB) Option {
|
||||
return func(o *options) error {
|
||||
o.ragdb = db
|
||||
|
||||
@@ -28,4 +28,6 @@ type AgentConfig struct {
|
||||
KnowledgeBaseResults int `json:"kb_results" form:"kb_results"`
|
||||
CanStopItself bool `json:"can_stop_itself" form:"can_stop_itself"`
|
||||
SystemPrompt string `json:"system_prompt" form:"system_prompt"`
|
||||
LongTermMemory bool `json:"long_term_memory" form:"long_term_memory"`
|
||||
SummaryLongTermMemory bool `json:"summary_long_term_memory" form:"summary_long_term_memory"`
|
||||
}
|
||||
|
||||
@@ -248,6 +248,15 @@ func (a *AgentPool) startAgentWithConfig(name string, config *AgentConfig) error
|
||||
if config.StandaloneJob {
|
||||
opts = append(opts, EnableStandaloneJob)
|
||||
}
|
||||
|
||||
if config.LongTermMemory {
|
||||
opts = append(opts, EnableLongTermMemory)
|
||||
}
|
||||
|
||||
if config.SummaryLongTermMemory {
|
||||
opts = append(opts, EnableSummaryMemory)
|
||||
}
|
||||
|
||||
if config.CanStopItself {
|
||||
opts = append(opts, CanStopItself)
|
||||
}
|
||||
|
||||
@@ -104,6 +104,11 @@
|
||||
|
||||
<label for="random_identity" class="block text-lg font-medium text-gray-400">Random Identity</label>
|
||||
<input type="checkbox" name="random_identity" id="random_identity" class="mt-1 focus:ring-indigo-500 focus:border-indigo-500 block w-full shadow-sm sm:text-lg border-gray-300 rounded-md bg-gray-700 text-white">
|
||||
|
||||
<label for="long_term_memory" class="block text-lg font-medium text-gray-400">Long term memory</label>
|
||||
<input type="checkbox" name="long_term_memory" id="long_term_memory" class="mt-1 focus:ring-indigo-500 focus:border-indigo-500 block w-full shadow-sm sm:text-lg border-gray-300 rounded-md bg-gray-700 text-white">
|
||||
<label for="summary_long_term_memory" class="block text-lg font-medium text-gray-400">Long term memory (summarize!)</label>
|
||||
<input type="checkbox" name="summary_long_term_memory" id="summary_long_term_memory" class="mt-1 focus:ring-indigo-500 focus:border-indigo-500 block w-full shadow-sm sm:text-lg border-gray-300 rounded-md bg-gray-700 text-white">
|
||||
</div>
|
||||
<div class="mb-4">
|
||||
<label for="identity_guidance" class="block text-lg font-medium text-gray-400">Identity Guidance</label>
|
||||
|
||||
Reference in New Issue
Block a user