updates
This commit is contained in:
251
main.py
251
main.py
@@ -19,14 +19,28 @@ from chromadb.config import Settings
|
||||
import json
|
||||
import os
|
||||
|
||||
# Parse arguments such as system prompt and batch mode
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description='microAGI')
|
||||
parser.add_argument('--system-prompt', dest='system_prompt', action='store',
|
||||
help='System prompt to use')
|
||||
parser.add_argument('--batch-mode', dest='batch_mode', action='store_true', default=False,
|
||||
help='Batch mode')
|
||||
# skip avatar creation
|
||||
parser.add_argument('--skip-avatar', dest='skip_avatar', action='store_true', default=False,
|
||||
help='Skip avatar creation')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
FUNCTIONS_MODEL = os.environ.get("FUNCTIONS_MODEL", "functions")
|
||||
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
|
||||
VOICE_MODEL= os.environ.get("TTS_MODEL","en-us-kathleen-low.onnx")
|
||||
DEFAULT_SD_MODEL = os.environ.get("DEFAULT_SD_MODEL", "stablediffusion")
|
||||
DEFAULT_SD_PROMPT = os.environ.get("DEFAULT_SD_PROMPT", "floating hair, portrait, ((loli)), ((one girl)), cute face, hidden hands, asymmetrical bangs, beautiful detailed eyes, eye shadow, hair ornament, ribbons, bowties, buttons, pleated skirt, (((masterpiece))), ((best quality)), colorful|((part of the head)), ((((mutated hands and fingers)))), deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, poorly drawn hands, missing limb, blurry, floating limbs, disconnected limbs, malformed hands, blur, out of focus, long neck, long body, Octane renderer, lowres, bad anatomy, bad hands, text")
|
||||
PERSISTENT_DIR = os.environ.get("PERSISTENT_DIR", "/data")
|
||||
|
||||
REPLY_ACTION = "reply"
|
||||
|
||||
PLAN_ACTION = "plan"
|
||||
#embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
||||
embeddings = LocalAIEmbeddings(model="all-MiniLM-L6-v2")
|
||||
|
||||
@@ -65,9 +79,9 @@ def tts(input_text, model=VOICE_MODEL):
|
||||
if response.status_code == 200:
|
||||
with open(output_file_path, 'wb') as f:
|
||||
f.write(response.content)
|
||||
print('Audio file saved successfully:', output_file_path)
|
||||
logger.info('Audio file saved successfully:', output_file_path)
|
||||
else:
|
||||
print('Request failed with status code', response.status_code)
|
||||
logger.info('Request failed with status code', response.status_code)
|
||||
|
||||
# Use aplay to play the audio
|
||||
os.system('aplay ' + output_file_path)
|
||||
@@ -84,9 +98,9 @@ def needs_to_do_action(user_input,agent_actions={}):
|
||||
messages = [
|
||||
{"role": "user",
|
||||
"content": f"""Transcript of AI assistant responding to user requests. Replies with the action to perform, including reasoning, and the confidence interval from 0 to 100.
|
||||
{descriptions}
|
||||
|
||||
Request: {user_input}
|
||||
{descriptions}"""},
|
||||
{"role": "user",
|
||||
"content": f"""{user_input}
|
||||
Function call: """
|
||||
}
|
||||
]
|
||||
@@ -119,10 +133,10 @@ Function call: """
|
||||
#model="gpt-3.5-turbo",
|
||||
model=FUNCTIONS_MODEL,
|
||||
messages=messages,
|
||||
request_timeout=1200,
|
||||
functions=functions,
|
||||
max_tokens=200,
|
||||
stop=None,
|
||||
temperature=0.5,
|
||||
temperature=0.1,
|
||||
#function_call="auto"
|
||||
function_call={"name": "intent"},
|
||||
)
|
||||
@@ -132,8 +146,8 @@ Function call: """
|
||||
function_parameters = response.choices[0].message["function_call"].arguments
|
||||
# read the json from the string
|
||||
res = json.loads(function_parameters)
|
||||
print(">>> function name: "+function_name)
|
||||
print(">>> function parameters: "+function_parameters)
|
||||
logger.info(">>> function name: "+function_name)
|
||||
logger.info(">>> function parameters: "+function_parameters)
|
||||
return res
|
||||
return {"action": REPLY_ACTION}
|
||||
|
||||
@@ -146,10 +160,10 @@ def process_functions(user_input, action="", agent_actions={}):
|
||||
messages = [
|
||||
# {"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user",
|
||||
"content": f"""Transcript of AI assistant responding to user requests.
|
||||
{descriptions}
|
||||
|
||||
Request: {user_input}
|
||||
"content": f"""Transcript of AI assistant responding to user requests. Replies with the action to perform, including reasoning, and the confidence interval from 0 to 100.
|
||||
{descriptions}"""},
|
||||
{"role": "user",
|
||||
"content": f"""{user_input}
|
||||
Function call: """
|
||||
}
|
||||
]
|
||||
@@ -160,21 +174,14 @@ Function call: """
|
||||
if response_message.get("function_call"):
|
||||
function_name = response.choices[0].message["function_call"].name
|
||||
function_parameters = response.choices[0].message["function_call"].arguments
|
||||
print("==> function name: ")
|
||||
print(function_name)
|
||||
print("==> function parameters: ")
|
||||
print(function_parameters)
|
||||
logger.info("==> function name: ")
|
||||
logger.info(function_name)
|
||||
logger.info("==> function parameters: ")
|
||||
logger.info(function_parameters)
|
||||
function_to_call = agent_actions[function_name]["function"]
|
||||
function_result = function_to_call(function_parameters, agent_actions=agent_actions)
|
||||
print("==> function result: ")
|
||||
print(function_result)
|
||||
messages = [
|
||||
# {"role": "system", "content": "You are a helpful assistant."},
|
||||
{
|
||||
"role": "user",
|
||||
"content": user_input,
|
||||
}
|
||||
]
|
||||
logger.info("==> function result: ")
|
||||
logger.info(function_result)
|
||||
messages.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
@@ -186,7 +193,7 @@ Function call: """
|
||||
{
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": f'{{"result": "{str(function_result)}"}}'
|
||||
"content": str(function_result)
|
||||
}
|
||||
)
|
||||
return messages, function_result
|
||||
@@ -195,8 +202,8 @@ def function_completion(messages, action="", agent_actions={}):
|
||||
function_call = "auto"
|
||||
if action != "":
|
||||
function_call={"name": action}
|
||||
print("==> function_call: ")
|
||||
print(function_call)
|
||||
logger.info("==> function_call: ")
|
||||
logger.info(function_call)
|
||||
|
||||
# get the functions from the signatures of the agent actions, if exists
|
||||
functions = []
|
||||
@@ -208,7 +215,7 @@ def function_completion(messages, action="", agent_actions={}):
|
||||
model=FUNCTIONS_MODEL,
|
||||
messages=messages,
|
||||
functions=functions,
|
||||
max_tokens=200,
|
||||
request_timeout=1200,
|
||||
stop=None,
|
||||
temperature=0.1,
|
||||
function_call=function_call
|
||||
@@ -221,55 +228,92 @@ def process_history(conversation_history):
|
||||
messages = ""
|
||||
for message in conversation_history:
|
||||
# if there is content append it
|
||||
if message.get("content"):
|
||||
messages+=message["content"]+"\n"
|
||||
if message.get("function_call"):
|
||||
if message.get("content") and message["role"] == "function":
|
||||
messages+="Function result: " + message["content"]+"\n"
|
||||
elif message.get("function_call"):
|
||||
# encode message["function_call" to json and appends it
|
||||
fcall = json.dumps(message["function_call"])
|
||||
messages+=fcall+"\n"
|
||||
messages+="Assistant calls function: " +fcall+"\n"
|
||||
elif message.get("content") and message["role"] == "user":
|
||||
messages+="User message: "+message["content"]+"\n"
|
||||
elif message.get("content") and message["role"] == "assistant":
|
||||
messages+="Assistant message: "+message["content"]+"\n"
|
||||
return messages
|
||||
|
||||
|
||||
def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_actions={}):
|
||||
def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_actions={},re_evaluation_in_progress=False):
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": user_input,
|
||||
}
|
||||
]
|
||||
|
||||
conversation_history.extend(messages)
|
||||
|
||||
# pulling the old history make the context grow exponentially
|
||||
# and most importantly it repeates the first message with the commands again and again.
|
||||
# it needs a bit of cleanup and process the messages and piggyback more LocalAI functions templates
|
||||
# old_history = process_history(conversation_history)
|
||||
# action_picker_message = "Conversation history:\n"+old_history
|
||||
# action_picker_message += "\n"
|
||||
action_picker_message = "Request: "+user_input
|
||||
|
||||
if re_evaluation_in_progress:
|
||||
action_picker_message+="\nRe-evaluation if another action is needed or we have completed the user request."
|
||||
action_picker_message+="\nReasoning: If no action is needed, I will use "+REPLY_ACTION+" to reply to the user."
|
||||
|
||||
try:
|
||||
action = needs_to_do_action(user_input,agent_actions=agent_actions)
|
||||
action = needs_to_do_action(action_picker_message,agent_actions=agent_actions)
|
||||
except Exception as e:
|
||||
print("==> error: ")
|
||||
print(e)
|
||||
logger.error("==> error: ")
|
||||
logger.error(e)
|
||||
action = {"action": REPLY_ACTION}
|
||||
|
||||
if action["action"] != REPLY_ACTION:
|
||||
print("==> miniAGI wants to do an action: ")
|
||||
print(action["action"])
|
||||
print("==> Reasoning: ")
|
||||
print(action["reasoning"])
|
||||
if action["action"] == "generate_plan":
|
||||
print("==> It's a plan <==: ")
|
||||
logger.info("==> microAGI wants to call '{action}'", action=action["action"])
|
||||
logger.info("==> Reasoning '{reasoning}'", reasoning=action["reasoning"])
|
||||
if action["action"] == PLAN_ACTION:
|
||||
logger.info("==> It's a plan <==: ")
|
||||
|
||||
responses, function_results = process_functions(user_input+"\nReasoning: "+action["reasoning"], action=action["action"], agent_actions=agent_actions)
|
||||
#function_completion_message = "Conversation history:\n"+old_history+"\n"+
|
||||
function_completion_message = "Request: "+user_input+"\nReasoning: "+action["reasoning"]
|
||||
responses, function_results = process_functions(function_completion_message, action=action["action"], agent_actions=agent_actions)
|
||||
# if there are no subtasks, we can just reply,
|
||||
# otherwise we execute the subtasks
|
||||
# First we check if it's an object
|
||||
if isinstance(function_results, dict) and function_results.get("subtasks") and len(function_results["subtasks"]) > 0:
|
||||
# cycle subtasks and execute functions
|
||||
for subtask in function_results["subtasks"]:
|
||||
print("==> subtask: ")
|
||||
print(subtask)
|
||||
subtask_response, function_results = process_functions(subtask["reasoning"], subtask["function"],agent_actions=agent_actions)
|
||||
logger.info("==> subtask: ")
|
||||
logger.info(subtask)
|
||||
#ctr="Context: "+user_input+"\nThought: "+action["reasoning"]+ "\nRequest: "+subtask["reasoning"]
|
||||
cr="Context: "+user_input+"\nRequest: "+subtask["reasoning"]
|
||||
subtask_response, function_results = process_functions(cr, subtask["function"],agent_actions=agent_actions)
|
||||
responses.extend(subtask_response)
|
||||
if re_evaluate:
|
||||
all = process_history(responses)
|
||||
print("==> all: ")
|
||||
print(all)
|
||||
## Better output or this infinite loops..
|
||||
print("-> Re-evaluate if another action is needed")
|
||||
responses = evaluate(user_input+process_history(responses), responses, re_evaluate,agent_actions=agent_actions)
|
||||
logger.info("-> Re-evaluate if another action is needed")
|
||||
responses = evaluate(user_input+"\n Conversation history: \n"+process_history(responses[1:]), responses, re_evaluate,agent_actions=agent_actions,re_evaluation_in_progress=True)
|
||||
|
||||
if re_evaluation_in_progress:
|
||||
conversation_history.extend(responses)
|
||||
return conversation_history
|
||||
|
||||
responses.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Return an appropriate answer to the user given the context above."
|
||||
}
|
||||
)
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model=LLM_MODEL,
|
||||
messages=responses,
|
||||
max_tokens=200,
|
||||
stop=None,
|
||||
temperature=0.5,
|
||||
request_timeout=1200,
|
||||
temperature=0.1,
|
||||
)
|
||||
responses.append(
|
||||
{
|
||||
@@ -279,28 +323,29 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
||||
)
|
||||
# add responses to conversation history by extending the list
|
||||
conversation_history.extend(responses)
|
||||
# print the latest response from the conversation history
|
||||
print(conversation_history[-1]["content"])
|
||||
# logger.info the latest response from the conversation history
|
||||
logger.info(conversation_history[-1]["content"])
|
||||
tts(conversation_history[-1]["content"])
|
||||
else:
|
||||
print("==> no action needed")
|
||||
# construct the message and add it to the conversation history
|
||||
message = {"role": "user", "content": user_input}
|
||||
conversation_history.append(message)
|
||||
#conversation_history.append({ "role": "assistant", "content": "No action needed from my side."})
|
||||
logger.info("==> no action needed")
|
||||
|
||||
if re_evaluation_in_progress:
|
||||
logger.info("==> microAGI has completed the user request")
|
||||
logger.info("==> microAGI will reply to the user")
|
||||
return conversation_history
|
||||
|
||||
# get the response from the model
|
||||
response = openai.ChatCompletion.create(
|
||||
model=LLM_MODEL,
|
||||
messages=conversation_history,
|
||||
max_tokens=200,
|
||||
stop=None,
|
||||
temperature=0.5,
|
||||
temperature=0.1,
|
||||
request_timeout=1200,
|
||||
)
|
||||
# add the response to the conversation history by extending the list
|
||||
conversation_history.append({ "role": "assistant", "content": response.choices[0].message["content"]})
|
||||
# print the latest response from the conversation history
|
||||
print(conversation_history[-1]["content"])
|
||||
# logger.info the latest response from the conversation history
|
||||
logger.info(conversation_history[-1]["content"])
|
||||
tts(conversation_history[-1]["content"])
|
||||
return conversation_history
|
||||
|
||||
@@ -309,19 +354,20 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
||||
### Agent capabilities
|
||||
|
||||
def save(memory, agent_actions={}):
|
||||
print(">>> saving to memories: ")
|
||||
print(memory)
|
||||
chroma_client.add_texts([memory],[{"id": str(uuid.uuid4())}])
|
||||
q = json.loads(memory)
|
||||
logger.info(">>> saving to memories: ")
|
||||
logger.info(q["thought"])
|
||||
chroma_client.add_texts([q["thought"]],[{"id": str(uuid.uuid4())}])
|
||||
chroma_client.persist()
|
||||
return f"The object was saved permanently to memory."
|
||||
|
||||
def search(query, agent_actions={}):
|
||||
res = chroma_client.similarity_search(query)
|
||||
print(">>> query: ")
|
||||
print(query)
|
||||
print(">>> retrieved memories: ")
|
||||
print(res)
|
||||
return res
|
||||
q = json.loads(query)
|
||||
docs = chroma_client.similarity_search(q["reasoning"])
|
||||
text_res="Memories found in the database:\n"
|
||||
for doc in docs:
|
||||
text_res+="- "+doc.page_content+"\n"
|
||||
return text_res
|
||||
|
||||
def calculate_plan(user_input, agent_actions={}):
|
||||
res = json.loads(user_input)
|
||||
@@ -329,7 +375,7 @@ def calculate_plan(user_input, agent_actions={}):
|
||||
messages = [
|
||||
{"role": "user",
|
||||
"content": f"""Transcript of AI assistant responding to user requests.
|
||||
Replies with a plan to achieve the user's goal with a list of subtasks with logical steps.
|
||||
Replies with a plan to achieve the user's goal with a list of subtasks with logical steps. The reasoning includes a self-contained, detailed instruction to fullfill the task.
|
||||
|
||||
Request: {res["description"]}
|
||||
Function call: """
|
||||
@@ -375,9 +421,8 @@ Function call: """
|
||||
model=FUNCTIONS_MODEL,
|
||||
messages=messages,
|
||||
functions=functions,
|
||||
max_tokens=200,
|
||||
stop=None,
|
||||
temperature=0.5,
|
||||
temperature=0.1,
|
||||
#function_call="auto"
|
||||
function_call={"name": "plan"},
|
||||
)
|
||||
@@ -391,11 +436,17 @@ Function call: """
|
||||
return res
|
||||
return {"action": REPLY_ACTION}
|
||||
|
||||
|
||||
# write file to disk with content
|
||||
def write_file(arg, agent_actions={}):
|
||||
arg = json.loads(arg)
|
||||
filename = arg["filename"]
|
||||
content = arg["content"]
|
||||
# create persistent dir if does not exist
|
||||
if not os.path.exists(PERSISTENT_DIR):
|
||||
os.makedirs(PERSISTENT_DIR)
|
||||
# write the file in the directory specified
|
||||
filename = os.path.join(PERSISTENT_DIR, filename)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(content)
|
||||
return f"File {filename} saved successfully."
|
||||
@@ -478,7 +529,7 @@ agent_actions = {
|
||||
"write_file": {
|
||||
"function": write_file,
|
||||
"plannable": True,
|
||||
"description": 'For writing a file to disk with content, the assistant replies with the action "write_file" and the filename and content to save.',
|
||||
"description": 'The assistant replies with the action "write_file", the filename and content to save for writing a file to disk permanently. This can be used to store the result of complex actions locally.',
|
||||
"signature": {
|
||||
"name": "write_file",
|
||||
"description": """For saving a file to disk with content.""",
|
||||
@@ -497,12 +548,12 @@ agent_actions = {
|
||||
}
|
||||
},
|
||||
},
|
||||
"save_memory": {
|
||||
"remember": {
|
||||
"function": save,
|
||||
"plannable": True,
|
||||
"description": 'For saving a memory, the assistant replies with the action "save_memory" and the string to save.',
|
||||
"description": 'The assistant replies with the action "remember" and the string to save in order to remember something or save an information that thinks it is relevant permanently.',
|
||||
"signature": {
|
||||
"name": "save_memory",
|
||||
"name": "remember",
|
||||
"description": """Save or store informations into memory.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
@@ -516,35 +567,31 @@ agent_actions = {
|
||||
}
|
||||
},
|
||||
},
|
||||
"search_memory": {
|
||||
"recall": {
|
||||
"function": search,
|
||||
"plannable": True,
|
||||
"description": 'For searching a memory, the assistant replies with the action "search_memory" and the query to search to find information stored previously.',
|
||||
"description": 'The assistant replies with the action "recall" for searching between its memories with a query term.',
|
||||
"signature": {
|
||||
"name": "search_memory",
|
||||
"name": "recall",
|
||||
"description": """Search in memory""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The query to be used to search informations"
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "reasoning behind the intent"
|
||||
},
|
||||
},
|
||||
"required": ["query"]
|
||||
"required": ["reasoning"]
|
||||
}
|
||||
},
|
||||
},
|
||||
"generate_plan": {
|
||||
PLAN_ACTION: {
|
||||
"function": calculate_plan,
|
||||
"plannable": False,
|
||||
"description": 'For generating a plan for complex tasks, the assistant replies with the action "generate_plan" and a detailed list of all the subtasks needed to execute the user goal using the available actions.',
|
||||
"description": 'The assistant for solving complex tasks that involves more than one action or planning actions in sequence, replies with the action "'+PLAN_ACTION+'" and a detailed list of all the subtasks.',
|
||||
"signature": {
|
||||
"name": "generate_plan",
|
||||
"name": PLAN_ACTION,
|
||||
"description": """Plan complex tasks.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
@@ -567,12 +614,24 @@ agent_actions = {
|
||||
|
||||
conversation_history = []
|
||||
|
||||
logger.info("Welcome to miniAGI")
|
||||
# Set a system prompt if SYSTEM_PROMPT is set
|
||||
if os.environ.get("SYSTEM_PROMPT"):
|
||||
conversation_history.append({
|
||||
"role": "system",
|
||||
"content": os.environ.get("SYSTEM_PROMPT")
|
||||
})
|
||||
|
||||
logger.info("Welcome to microAGI")
|
||||
logger.info("Creating avatar, please wait...")
|
||||
|
||||
display_avatar()
|
||||
|
||||
logger.info("Welcome to microAGI")
|
||||
logger.info("microAGI has the following actions available at its disposal:")
|
||||
for action in agent_actions:
|
||||
logger.info("{action} - {description}", action=action, description=agent_actions[action]["description"])
|
||||
|
||||
# TODO: process functions also considering the conversation history? conversation history + input
|
||||
while True:
|
||||
user_input = input("> ")
|
||||
conversation_history=evaluate(user_input, conversation_history, re_evaluate=False, agent_actions=agent_actions)
|
||||
conversation_history=evaluate(user_input, conversation_history, re_evaluate=True, agent_actions=agent_actions)
|
||||
Reference in New Issue
Block a user