comments
This commit is contained in:
@@ -281,7 +281,6 @@ func (a *Agent) consumeJob(job *Job, role string) {
|
||||
}
|
||||
|
||||
func (a *Agent) periodicallyRun() {
|
||||
|
||||
// Here the LLM could decide to store some part of the conversation too in the memory
|
||||
evaluateMemory := NewJob(
|
||||
WithText(
|
||||
@@ -319,12 +318,6 @@ func (a *Agent) periodicallyRun() {
|
||||
a.consumeJob(evaluateAction, SystemRole)
|
||||
|
||||
a.ResetConversation()
|
||||
|
||||
// TODO: decide to do something on its own with the conversation result
|
||||
// before clearing it out
|
||||
|
||||
// Clear the conversation
|
||||
// a.currentConversation = []openai.ChatCompletionMessage{}
|
||||
}
|
||||
|
||||
func (a *Agent) Run() error {
|
||||
|
||||
@@ -23,7 +23,7 @@ type PromptHUD struct {
|
||||
// And a context memory (that is always powered by a vector database),
|
||||
// this memory is the shorter one that the LLM keeps across conversation and across its
|
||||
// reasoning process's and life time.
|
||||
// A special action is then used to let the LLM itself update its memory
|
||||
// TODO: A special action is then used to let the LLM itself update its memory
|
||||
// periodically during self-processing, and the same action is ALSO exposed
|
||||
// during the conversation to let the user put for example, a new goal to the agent.
|
||||
type State struct {
|
||||
|
||||
Reference in New Issue
Block a user