cumulative subtask context
This commit is contained in:
5
.env
5
.env
@@ -1,12 +1,15 @@
|
||||
DEBUG=true
|
||||
MODELS_PATH=/models
|
||||
GALLERIES=[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}, {"url": "github:go-skynet/model-gallery/huggingface.yaml","name":"huggingface"}]
|
||||
|
||||
# 30b setup
|
||||
PRELOAD_MODELS=[{"id":"huggingface@thebloke/gplatty-30b-ggml/gplatty-30b.ggmlv3.q2_k.bin","name":"gpt-4","overrides":{"context_size":4096,"mmap":true,"f16":true,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95}}},{"id":"model-gallery@stablediffusion"},{"id":"model-gallery@voice-en-us-kathleen-low"},{"url":"github:go-skynet/model-gallery/base.yaml","name":"all-MiniLM-L6-v2","overrides":{"embeddings":true,"backend":"huggingface-embeddings","parameters":{"model":"all-MiniLM-L6-v2"}}},{"id":"huggingface@thebloke/gplatty-30b-ggml/gplatty-30b.ggmlv3.q2_k.bin","name":"functions","overrides":{"context_size":4096,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"template":{"chat":"","completion":""},"roles":{"assistant":"ASSISTANT:","system":"SYSTEM:","assistant_function_call":"FUNCTION_CALL:","function":"FUNCTION CALL RESULT:"},"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95},"function":{"disable_no_action":true},"mmap":true,"f16":true}}]
|
||||
# 13b setup
|
||||
# PRELOAD_MODELS=[{"id":"huggingface@thebloke/wizardlm-13b-v1.0-uncensored-ggml/wizardlm-13b-v1.0-uncensored.ggmlv3.q4_k_m.bin","name":"gpt-4","overrides":{"context_size":2048,"mmap":true,"f16":true,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95}}},{"id":"model-gallery@stablediffusion"},{"id":"model-gallery@voice-en-us-kathleen-low"},{"url":"github:go-skynet/model-gallery/base.yaml","name":"all-MiniLM-L6-v2","overrides":{"embeddings":true,"backend":"huggingface-embeddings","parameters":{"model":"all-MiniLM-L6-v2"}}},{"id":"huggingface@thebloke/wizardlm-13b-v1.0-uncensored-ggml/wizardlm-13b-v1.0-uncensored.ggmlv3.q4_0.bin","name":"functions","overrides":{"context_size":2048,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"template":{"chat":"","completion":""},"roles":{"assistant":"ASSISTANT:","system":"SYSTEM:","assistant_function_call":"FUNCTION_CALL:","function":"FUNCTION CALL RESULT:"},"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95},"function":{"disable_no_action":true},"mmap":true,"f16":true}}]
|
||||
|
||||
#PRELOAD_MODELS_CONFIG=/config/preload-models.yaml
|
||||
# 30b superhot setup
|
||||
PRELOAD_MODELS=[{"id":"huggingface@thebloke/gplatty-30b-superhot-8k-ggml/gplatty-30b-superhot-8k.ggmlv3.q2_k.bin","name":"gpt-4","overrides":{"context_size":8192,"mmap":true,"f16":true,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95,"rope_freq_scale":0.25}}},{"id":"model-gallery@stablediffusion"},{"id":"model-gallery@voice-en-us-kathleen-low"},{"url":"github:go-skynet/model-gallery/base.yaml","name":"all-MiniLM-L6-v2","overrides":{"embeddings":true,"backend":"huggingface-embeddings","parameters":{"model":"all-MiniLM-L6-v2"}}},{"id":"huggingface@thebloke/gplatty-30b-superhot-8k-ggml/gplatty-30b-superhot-8k.ggmlv3.q2_k.bin","name":"functions","overrides":{"context_size":8192,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"template":{"chat":"","completion":""},"roles":{"assistant":"ASSISTANT:","system":"SYSTEM:","assistant_function_call":"FUNCTION_CALL:","function":"FUNCTION CALL RESULT:"},"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95,"rope_freq_scale":0.25},"function":{"disable_no_action":true},"mmap":true,"f16":true}}]
|
||||
#PRELOAD_MODELS=[{"id":"huggingface@thebloke/gplatty-30b-superhot-8k-ggml/gplatty-30b-superhot-8k.ggmlv3.q2_k.bin","name":"gpt-4","overrides":{"context_size":8192,"mmap":true,"f16":true,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95,"rope_freq_scale":0.25}}},{"id":"model-gallery@stablediffusion"},{"id":"model-gallery@voice-en-us-kathleen-low"},{"url":"github:go-skynet/model-gallery/base.yaml","name":"all-MiniLM-L6-v2","overrides":{"embeddings":true,"backend":"huggingface-embeddings","parameters":{"model":"all-MiniLM-L6-v2"}}},{"id":"huggingface@thebloke/gplatty-30b-superhot-8k-ggml/gplatty-30b-superhot-8k.ggmlv3.q2_k.bin","name":"functions","overrides":{"context_size":8192,"mirostat":2,"mirostat_tau":5,"mirostat_eta":0.1,"template":{"chat":"","completion":""},"roles":{"assistant":"ASSISTANT:","system":"SYSTEM:","assistant_function_call":"FUNCTION_CALL:","function":"FUNCTION CALL RESULT:"},"parameters":{"temperature":0.1,"top_k":40,"top_p":0.95,"rope_freq_scale":0.25},"function":{"disable_no_action":true},"mmap":true,"f16":true}}]
|
||||
OPENAI_API_KEY=sk---
|
||||
OPENAI_API_BASE=http://api:8080
|
||||
IMAGE_PATH=/tmp
|
||||
|
||||
11
README.md
11
README.md
@@ -1,10 +1,15 @@
|
||||
# microAGI
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<img height="300" src="https://github.com/mudler/microAGI/assets/2420543/7717fafb-de72-4a2d-a47a-229fc64b5716"> <br>
|
||||
μAGI (microAGI)
|
||||
<br>
|
||||
</h1>
|
||||
|
||||
From the [LocalAI](https://localai.io) author, microAGI. 100% Local AI assistant.
|
||||
|
||||
Note: this is a fun project, not a serious one. Be warned!
|
||||
|
||||
## What is microAGI?
|
||||
## What is μAGI?
|
||||
|
||||
It is a dead simple experiment to show how to tie the various LocalAI functionalities to create a virtual assistant that can do tasks. It is simple on purpose, trying to be minimalistic and easy to understand and customize.
|
||||
|
||||
@@ -57,5 +62,5 @@ Under the hood LocalAI converts functions to llama.cpp BNF grammars. While OpenA
|
||||
Run docker-compose with main.py checked-out:
|
||||
|
||||
```bash
|
||||
docker-compose run -v main.py:/app/main.py -i --rm microAGI
|
||||
docker-compose run -v main.py:/app/main.py -i --rm microagi
|
||||
```
|
||||
9
main.py
9
main.py
@@ -285,12 +285,19 @@ def evaluate(user_input, conversation_history = [],re_evaluate=False, agent_acti
|
||||
# First we check if it's an object
|
||||
if isinstance(function_results, dict) and function_results.get("subtasks") and len(function_results["subtasks"]) > 0:
|
||||
# cycle subtasks and execute functions
|
||||
subtask_result=""
|
||||
for subtask in function_results["subtasks"]:
|
||||
logger.info("==> subtask: ")
|
||||
logger.info(subtask)
|
||||
#ctr="Context: "+user_input+"\nThought: "+action["reasoning"]+ "\nRequest: "+subtask["reasoning"]
|
||||
cr="Context: "+user_input+"\nRequest: "+subtask["reasoning"]
|
||||
cr="Context: "+user_input+"\n"
|
||||
if subtask_result != "":
|
||||
# Include cumulative results of previous subtasks
|
||||
# TODO: this grows context, maybe we should use a different approach or summarize
|
||||
cr+="Subtask results: "+subtask_result+"\n"
|
||||
cr+="Request: "+subtask["reasoning"]
|
||||
subtask_response, function_results = process_functions(cr, subtask["function"],agent_actions=agent_actions)
|
||||
subtask_result+=process_history(subtask_response)
|
||||
responses.extend(subtask_response)
|
||||
if re_evaluate:
|
||||
## Better output or this infinite loops..
|
||||
|
||||
Reference in New Issue
Block a user