Enable critic, and give more context while running subtasks

This commit is contained in:
mudler
2023-08-20 11:59:37 +02:00
parent 4c1c601417
commit b9b083f059
2 changed files with 49 additions and 21 deletions

18
main.py
View File

@@ -93,6 +93,9 @@ parser.add_argument('--force-action', dest='force_action', action='store', defau
# Debug mode # Debug mode
parser.add_argument('--debug', dest='debug', action='store_true', default=False, parser.add_argument('--debug', dest='debug', action='store_true', default=False,
help='Debug mode') help='Debug mode')
# Critic mode
parser.add_argument('--critic', dest='critic', action='store_true', default=False,
help='Enable critic')
# Parse arguments # Parse arguments
args = parser.parse_args() args = parser.parse_args()
@@ -172,7 +175,6 @@ def ask_user_confirmation(action_name, action_parameters):
logger.info("==> Skipping action") logger.info("==> Skipping action")
return False return False
### Agent capabilities ### Agent capabilities
### These functions are called by the agent to perform actions ### These functions are called by the agent to perform actions
### ###
@@ -198,7 +200,7 @@ def search_memory(query, agent_actions={}, localagi=None):
# write file to disk with content # write file to disk with content
def write_file(arg, agent_actions={}, localagi=None): def save_file(arg, agent_actions={}, localagi=None):
arg = json.loads(arg) arg = json.loads(arg)
filename = arg["filename"] filename = arg["filename"]
content = arg["content"] content = arg["content"]
@@ -264,7 +266,7 @@ def search_duckduckgo(a, agent_actions={}, localagi=None):
text_res="" text_res=""
for doc in list: for doc in list:
text_res+=f"""- {doc["snippet"]}. Source: {doc["title"]} - {doc["link"]}\n""" text_res+=f"""{doc["link"]}: {doc["title"]} {doc["snippet"]}\n"""
#if args.postprocess: #if args.postprocess:
# return post_process(text_res) # return post_process(text_res)
@@ -295,12 +297,12 @@ agent_actions = {
} }
}, },
}, },
"write_file": { "save_file": {
"function": write_file, "function": save_file,
"plannable": True, "plannable": True,
"description": 'The assistant replies with the action "write_file", the filename and content to save for writing a file to disk permanently. This can be used to store the result of complex actions locally.', "description": 'The assistant replies with the action "save_file", the filename and content to save for writing a file to disk permanently. This can be used to store the result of complex actions locally.',
"signature": { "signature": {
"name": "write_file", "name": "save_file",
"description": """For saving a file to disk with content.""", "description": """For saving a file to disk with content.""",
"parameters": { "parameters": {
"type": "object", "type": "object",
@@ -407,6 +409,7 @@ if __name__ == "__main__":
conversation_history=localagi.evaluate( conversation_history=localagi.evaluate(
args.prompt, args.prompt,
conversation_history, conversation_history,
critic=args.critic,
re_evaluate=args.re_evaluate, re_evaluate=args.re_evaluate,
# Enable to lower context usage but increases LLM calls # Enable to lower context usage but increases LLM calls
postprocess=args.postprocess, postprocess=args.postprocess,
@@ -424,6 +427,7 @@ if __name__ == "__main__":
conversation_history=localagi.evaluate( conversation_history=localagi.evaluate(
user_input, user_input,
conversation_history, conversation_history,
critic=args.critic,
re_evaluate=args.re_evaluate, re_evaluate=args.re_evaluate,
# Enable to lower context usage but increases LLM calls # Enable to lower context usage but increases LLM calls
postprocess=args.postprocess, postprocess=args.postprocess,

View File

@@ -140,11 +140,11 @@ class LocalAGI:
"type": "number", "type": "number",
"description": "confidence of the action" "description": "confidence of the action"
}, },
"reasoning": { "detailed_reasoning": {
"type": "string", "type": "string",
"description": "reasoning behind the intent" "description": "reasoning behind the intent"
}, },
# "observation": { # "detailed_reasoning": {
# "type": "string", # "type": "string",
# "description": "reasoning behind the intent" # "description": "reasoning behind the intent"
# }, # },
@@ -214,7 +214,7 @@ class LocalAGI:
if response_message.get("function_call"): if response_message.get("function_call"):
function_name = response.choices[0].message["function_call"].name function_name = response.choices[0].message["function_call"].name
function_parameters = response.choices[0].message["function_call"].arguments function_parameters = response.choices[0].message["function_call"].arguments
logger.debug("==> function parameters: {function_parameters}",function_parameters=function_parameters) logger.info("==> function parameters: {function_parameters}",function_parameters=function_parameters)
function_to_call = self.agent_actions[function_name]["function"] function_to_call = self.agent_actions[function_name]["function"]
function_result = function_to_call(function_parameters, agent_actions=self.agent_actions, localagi=self) function_result = function_to_call(function_parameters, agent_actions=self.agent_actions, localagi=self)
@@ -406,7 +406,7 @@ class LocalAGI:
"items": { "items": {
"type": "object", "type": "object",
"properties": { "properties": {
"reasoning": { "detailed_reasoning": {
"type": "string", "type": "string",
"description": "subtask list", "description": "subtask list",
}, },
@@ -443,7 +443,7 @@ class LocalAGI:
return res return res
return {"action": self.reply_action} return {"action": self.reply_action}
def evaluate(self,user_input, conversation_history = [],re_evaluate=False,re_evaluation_in_progress=False, postprocess=False, subtaskContext=False): def evaluate(self,user_input, conversation_history = [], critic=True, re_evaluate=False,re_evaluation_in_progress=False, postprocess=False, subtaskContext=False):
messages = [ messages = [
{ {
"role": "user", "role": "user",
@@ -486,11 +486,11 @@ class LocalAGI:
if action["action"] != self.reply_action: if action["action"] != self.reply_action:
logger.info("==> LocalAGI wants to call '{action}'", action=action["action"]) logger.info("==> LocalAGI wants to call '{action}'", action=action["action"])
#logger.info("==> Observation '{reasoning}'", reasoning=action["observation"]) #logger.info("==> Observation '{reasoning}'", reasoning=action["detailed_reasoning"])
logger.info("==> Reasoning '{reasoning}'", reasoning=action["reasoning"]) logger.info("==> Reasoning '{reasoning}'", reasoning=action["detailed_reasoning"])
# Force executing a plan instead # Force executing a plan instead
reasoning = action["reasoning"] reasoning = action["detailed_reasoning"]
if action["action"] == self.reply_action: if action["action"] == self.reply_action:
logger.info("==> LocalAGI wants to create a plan that involves more actions ") logger.info("==> LocalAGI wants to create a plan that involves more actions ")
@@ -500,7 +500,29 @@ class LocalAGI:
if self.processed_messages > 0: if self.processed_messages > 0:
function_completion_message += self.process_history(conversation_history)+"\n" function_completion_message += self.process_history(conversation_history)+"\n"
function_completion_message += "Request: "+user_input+"\nReasoning: "+reasoning function_completion_message += "Request: "+user_input+"\nReasoning: "+reasoning
responses, function_results = self.process_functions(function_completion_message, action=action["action"]) responses, function_results = self.process_functions(function_completion_message, action=action["action"])
# Critic re-evaluates the action
# if critic:
# critic = self.analyze(responses[1:-1], suffix=f"Analyze if the function that was picked is correct and satisfies the user request from the context above. Suggest a different action if necessary. If the function picked was correct, write the picked function.\n")
# logger.info("==> Critic action: {critic}", critic=critic)
# previous_action = action["action"]
# try:
# action = self.needs_to_do_action(critic,agent_actions=picker_actions)
# if action["action"] != previous_action:
# logger.info("==> Critic decided to change action to: {action}", action=action["action"])
# responses, function_results = self.process_functions(function_completion_message, action=action["action"])
# except Exception as e:
# logger.error("==> error: ")
# logger.error(e)
# action = {"action": self.reply_action}
# Critic re-evaluates the plan
if critic and isinstance(function_results, dict) and function_results.get("subtasks") and len(function_results["subtasks"]) > 0:
critic = self.analyze(responses[1:], prefix="", suffix=f"Analyze if the plan is correct and satisfies the user request from the context above. Suggest a revised plan if necessary.\n")
logger.info("==> Critic plan: {critic}", critic=critic)
responses, function_results = self.process_functions(function_completion_message+"\n"+critic, action=action["action"])
# if there are no subtasks, we can just reply, # if there are no subtasks, we can just reply,
# otherwise we execute the subtasks # otherwise we execute the subtasks
# First we check if it's an object # First we check if it's an object
@@ -508,18 +530,18 @@ class LocalAGI:
# cycle subtasks and execute functions # cycle subtasks and execute functions
subtask_result="" subtask_result=""
for subtask in function_results["subtasks"]: for subtask in function_results["subtasks"]:
#ctr="Context: "+user_input+"\nThought: "+action["reasoning"]+ "\nRequest: "+subtask["reasoning"] cr="Request: "+user_input+"\nReasoning: "+action["detailed_reasoning"]+ "\n"
#cr="Request: "+user_input+"\n" #cr="Request: "+user_input+"\n"
cr="" #cr=""
if subtask_result != "" and subtaskContext: if subtask_result != "" and subtaskContext:
# Include cumulative results of previous subtasks # Include cumulative results of previous subtasks
# TODO: this grows context, maybe we should use a different approach or summarize # TODO: this grows context, maybe we should use a different approach or summarize
##if postprocess: ##if postprocess:
## cr+= "Subtask results: "+post_process(subtask_result)+"\n" ## cr+= "Subtask results: "+post_process(subtask_result)+"\n"
##else: ##else:
cr+="\n"+subtask_result+"\n" cr+="\nAdditional context: ```\n"+subtask_result+"\n```\n"
subtask_reasoning = subtask["reasoning"] subtask_reasoning = subtask["detailed_reasoning"]
cr+="Reasoning: "+action["reasoning"]+ "\n" #cr+="Reasoning: "+action["detailed_reasoning"]+ "\n"
cr+="\nFunction to call:" +subtask["function"]+"\n" cr+="\nFunction to call:" +subtask["function"]+"\n"
logger.info("==> subtask '{subtask}' ({reasoning})", subtask=subtask["function"], reasoning=subtask_reasoning) logger.info("==> subtask '{subtask}' ({reasoning})", subtask=subtask["function"], reasoning=subtask_reasoning)
if postprocess: if postprocess:
@@ -558,7 +580,9 @@ class LocalAGI:
#responses = converse(responses) #responses = converse(responses)
# TODO: this needs to be optimized # TODO: this needs to be optimized
responses = self.analyze(responses[1:], suffix=f"Return an appropriate answer given the context above\n") responses = self.analyze(responses[1:],
prefix="",
suffix=f"Return an appropriate answer given the context above\n")
# add responses to conversation history by extending the list # add responses to conversation history by extending the list
conversation_history.append( conversation_history.append(