wip python repl - minor notes
This commit is contained in:
35
main.py
35
main.py
@@ -346,6 +346,8 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
|||||||
cr+= "Request: "+subtask["reasoning"]
|
cr+= "Request: "+subtask["reasoning"]
|
||||||
subtask_response, function_results = process_functions(cr, subtask["function"],agent_actions=agent_actions)
|
subtask_response, function_results = process_functions(cr, subtask["function"],agent_actions=agent_actions)
|
||||||
subtask_result+=process_history(subtask_response)
|
subtask_result+=process_history(subtask_response)
|
||||||
|
if postprocess:
|
||||||
|
subtask_result=post_process(subtask_result)
|
||||||
responses.extend(subtask_response)
|
responses.extend(subtask_response)
|
||||||
if re_evaluate:
|
if re_evaluate:
|
||||||
## Better output or this infinite loops..
|
## Better output or this infinite loops..
|
||||||
@@ -363,6 +365,7 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
|||||||
conversation_history.extend(responses)
|
conversation_history.extend(responses)
|
||||||
return conversation_history
|
return conversation_history
|
||||||
|
|
||||||
|
# TODO: this needs to be optimized
|
||||||
responses.append(
|
responses.append(
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
@@ -396,6 +399,7 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
|||||||
logger.info("==> μAGI will reply to the user")
|
logger.info("==> μAGI will reply to the user")
|
||||||
return conversation_history
|
return conversation_history
|
||||||
|
|
||||||
|
# TODO: this needs to be optimized
|
||||||
# get the response from the model
|
# get the response from the model
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
model=LLM_MODEL,
|
model=LLM_MODEL,
|
||||||
@@ -456,6 +460,14 @@ def search(query, agent_actions={}):
|
|||||||
text_res+="- "+doc.page_content+"\n"
|
text_res+="- "+doc.page_content+"\n"
|
||||||
return text_res
|
return text_res
|
||||||
|
|
||||||
|
# Python REPL
|
||||||
|
def python_repl(args, agent_actions={}):
|
||||||
|
args = json.loads(args)
|
||||||
|
try:
|
||||||
|
return eval(args["code"])
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
def calculate_plan(user_input, agent_actions={}):
|
def calculate_plan(user_input, agent_actions={}):
|
||||||
res = json.loads(user_input)
|
res = json.loads(user_input)
|
||||||
logger.info("--> Calculating plan: {description}", description=res["description"])
|
logger.info("--> Calculating plan: {description}", description=res["description"])
|
||||||
@@ -660,6 +672,29 @@ agent_actions = {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"python": {
|
||||||
|
"function": python_repl,
|
||||||
|
"plannable": True,
|
||||||
|
"description": 'The assistant replies with the action "python" to execute Python code.',
|
||||||
|
"signature": {
|
||||||
|
"name": "python",
|
||||||
|
"description": """Search in memory""",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"code": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "reasoning behind the intent"
|
||||||
|
},
|
||||||
|
"reasoning": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "reasoning behind the intent"
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["code"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
"search_memory": {
|
"search_memory": {
|
||||||
"function": search,
|
"function": search,
|
||||||
"plannable": True,
|
"plannable": True,
|
||||||
|
|||||||
Reference in New Issue
Block a user