Add discord bot, github pipelines

This commit is contained in:
mudler
2023-08-23 00:30:17 +02:00
parent 11514a0e0c
commit d32940e604
8 changed files with 728 additions and 1 deletions

142
.github/workflows/image.yaml vendored Normal file
View File

@@ -0,0 +1,142 @@
---
name: 'build container images'
on:
pull_request:
push:
branches:
- main
jobs:
localagi:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/go-skynet/localagi
VERSION=main
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
if: github.event_name != 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
push: true
tags: ${{ steps.prep.outputs.tags }}
- name: Build PRs
if: github.event_name == 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
push: false
tags: ${{ steps.prep.outputs.tags }}
discord-localagi:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/go-skynet/localagi-discord
VERSION=main
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
if: github.event_name != 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: ./examples/discord
file: ./examples/discord/Dockerfile
platforms: linux/amd64
push: true
tags: ${{ steps.prep.outputs.tags }}
- name: Build PRs
if: github.event_name == 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: ./examples/discord
file: ./examples/discord/Dockerfile
platforms: linux/amd64
push: false
tags: ${{ steps.prep.outputs.tags }}

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
db/ db/
models/ models/
config.ini

View File

@@ -0,0 +1,8 @@
FROM python:3.10-bullseye
WORKDIR /app
COPY ./requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
COPY . /app
ENTRYPOINT [ "python", "./main.py" ];

281
examples/discord/agent.py Normal file
View File

@@ -0,0 +1,281 @@
import openai
#from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings import LocalAIEmbeddings
import uuid
import sys
from queue import Queue
import asyncio
import threading
from localagi import LocalAGI
from loguru import logger
from ascii_magic import AsciiArt
from duckduckgo_search import DDGS
from typing import Dict, List
import os
import discord
import openai
import urllib.request
from datetime import datetime
# these three lines swap the stdlib sqlite3 lib with the pysqlite3 package for chroma
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
from langchain.vectorstores import Chroma
from chromadb.config import Settings
import json
import os
from io import StringIO
FILE_NAME_FORMAT = '%Y_%m_%d_%H_%M_%S'
EMBEDDINGS_MODEL = os.environ.get("EMBEDDINGS_MODEL", "all-MiniLM-L6-v2")
EMBEDDINGS_API_BASE = os.environ.get("EMBEDDINGS_API_BASE", "http://api:8080")
PERSISTENT_DIR = os.environ.get("PERSISTENT_DIR", "/data/")
embeddings = LocalAIEmbeddings(model=EMBEDDINGS_MODEL,openai_api_base=EMBEDDINGS_API_BASE)
chroma_client = Chroma(collection_name="memories", persist_directory="/data/db", embedding_function=embeddings)
loop = None
channel = None
def call(thing):
return asyncio.run_coroutine_threadsafe(thing,loop).result()
def create_image(a, agent_actions={}, localagi=None):
q = json.loads(a)
logger.info(">>> creating image: ")
logger.info(q["caption"])
size=f"{q['width']}x{q['height']}"
response = openai.Image.create(prompt=q["caption"], n=1, size=size)
image_url = response["data"][0]["url"]
image_name = download_image(image_url)
image_path = f"{PERSISTENT_DIR}{image_name}"
file = discord.File(image_path, filename=image_name)
embed = discord.Embed(title="Generated image")
embed.set_image(url=f"attachment://{image_name}")
call(channel.send(file=file, content=f"Here is what I have generated", embed=embed))
return f"Image created: {response['data'][0]['url']}"
def download_image(url: str):
file_name = f"{datetime.now().strftime(FILE_NAME_FORMAT)}.jpg"
full_path = f"{PERSISTENT_DIR}{file_name}"
urllib.request.urlretrieve(url, full_path)
return file_name
### Agent capabilities
### These functions are called by the agent to perform actions
###
def save(memory, agent_actions={}, localagi=None):
q = json.loads(memory)
logger.info(">>> saving to memories: ")
logger.info(q["content"])
chroma_client.add_texts([q["content"]],[{"id": str(uuid.uuid4())}])
chroma_client.persist()
return f"The object was saved permanently to memory."
def search_memory(query, agent_actions={}, localagi=None):
q = json.loads(query)
docs = chroma_client.similarity_search(q["reasoning"])
text_res="Memories found in the database:\n"
for doc in docs:
text_res+="- "+doc.page_content+"\n"
#if args.postprocess:
# return post_process(text_res)
#return text_res
return localagi.post_process(text_res)
# write file to disk with content
def save_file(arg, agent_actions={}, localagi=None):
arg = json.loads(arg)
file = filename = arg["filename"]
content = arg["content"]
# create persistent dir if does not exist
if not os.path.exists(PERSISTENT_DIR):
os.makedirs(PERSISTENT_DIR)
# write the file in the directory specified
file = os.path.join(PERSISTENT_DIR, filename)
# Check if the file already exists
if os.path.exists(file):
mode = 'a' # Append mode
else:
mode = 'w' # Write mode
with open(file, mode) as f:
f.write(content)
file = discord.File(file, filename=filename)
call(channel.send(file=file, content=f"Here is what I have generated"))
return f"File {file} saved successfully."
def ddg(query: str, num_results: int, backend: str = "api") -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
ddgs = DDGS()
try:
results = ddgs.text(
query,
backend=backend,
)
if results is None:
return [{"Result": "No good DuckDuckGo Search Result was found"}]
def to_metadata(result: Dict) -> Dict[str, str]:
if backend == "news":
return {
"date": result["date"],
"title": result["title"],
"snippet": result["body"],
"source": result["source"],
"link": result["url"],
}
return {
"snippet": result["body"],
"title": result["title"],
"link": result["href"],
}
formatted_results = []
for i, res in enumerate(results, 1):
if res is not None:
formatted_results.append(to_metadata(res))
if len(formatted_results) == num_results:
break
except Exception as e:
logger.error(e)
return []
return formatted_results
## Search on duckduckgo
def search_duckduckgo(a, agent_actions={}, localagi=None):
a = json.loads(a)
list=ddg(a["query"], 2)
text_res=""
for doc in list:
text_res+=f"""{doc["link"]}: {doc["title"]} {doc["snippet"]}\n"""
#if args.postprocess:
# return post_process(text_res)
return text_res
#l = json.dumps(list)
#return l
### End Agent capabilities
###
### Agent action definitions
agent_actions = {
"create_image": {
"function": create_image,
"plannable": True,
"description": 'If the user wants to generate an image, the assistant replies with "create_image", a detailed caption, the width and height of the image to generate.',
"signature": {
"name": "create_image",
"parameters": {
"type": "object",
"properties": {
"caption": {
"type": "string",
},
"width": {
"type": "number",
},
"height": {
"type": "number",
},
},
}
},
},
"search_internet": {
"function": search_duckduckgo,
"plannable": True,
"description": 'For searching the internet with a query, the assistant replies with the action "search_internet" and the query to search.',
"signature": {
"name": "search_internet",
"description": """For searching internet.""",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "information to save"
},
},
}
},
},
"save_file": {
"function": save_file,
"plannable": True,
"description": 'The assistant replies with the action "save_file", the filename and content to save for writing a file to disk permanently. This can be used to store the result of complex actions locally.',
"signature": {
"name": "save_file",
"description": """For saving a file to disk with content.""",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "information to save"
},
"content": {
"type": "string",
"description": "information to save"
},
},
}
},
},
"save_memory": {
"function": save,
"plannable": True,
"description": 'The assistant replies with the action "save_memory" and the string to remember or store an information that thinks it is relevant permanently.',
"signature": {
"name": "save_memory",
"description": """Save or store informations into memory.""",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "information to save"
},
},
"required": ["content"]
}
},
},
"search_memory": {
"function": search_memory,
"plannable": True,
"description": 'The assistant replies with the action "search_memory" for searching between its memories with a query term.',
"signature": {
"name": "search_memory",
"description": """Search in memory""",
"parameters": {
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "reasoning behind the intent"
},
},
"required": ["reasoning"]
}
},
},
}

View File

@@ -0,0 +1,21 @@
[discord]
server_id =
api_key =
[openai]
organization = fff
api_key = sl-d-d-d
[settings]
default_size = 1024x1024
file_path = images/
file_name_format = %Y_%m_%d_%H_%M_%S
[agent]
llm_model = gpt-4
tts_model = en-us-kathleen-low.onnx
tts_api_base = http://api:8080
functions_model = functions
api_base = http://api:8080
stablediffusion_api_base = http://api:8080
stablediffusion_model = stablediffusion

6
examples/discord/entrypoint.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
pip uninstall hnswlib chromadb-hnswlib -y
pip install hnswlib chromadb-hnswlib
cd /app
python3 /app/main.py

260
examples/discord/main.py Normal file
View File

@@ -0,0 +1,260 @@
"""
This is a discord bot for generating images using OpenAI's DALL-E
Author: Stefan Rial
YouTube: https://youtube.com/@StefanRial
GitHub: https://https://github.com/StefanRial/ClaudeBot
E-Mail: mail.stefanrial@gmail.com
"""
import discord
import openai
import urllib.request
import os
from datetime import datetime
from configparser import ConfigParser
from queue import Queue
import agent
from agent import agent_actions
from localagi import LocalAGI
import asyncio
import threading
from discord import app_commands
import functools
import typing
config_file = "config.ini"
config = ConfigParser(interpolation=None)
config.read(config_file)
SERVER_ID = config["discord"]["server_id"]
DISCORD_API_KEY = config["discord"][str("api_key")]
OPENAI_ORG = config["openai"][str("organization")]
OPENAI_API_KEY = config["openai"][str("api_key")]
FILE_PATH = config["settings"][str("file_path")]
FILE_NAME_FORMAT = config["settings"][str("file_name_format")]
SIZE_LARGE = "1024x1024"
SIZE_MEDIUM = "512x512"
SIZE_SMALL = "256x256"
SIZE_DEFAULT = config["settings"][str("default_size")]
GUILD = discord.Object(id=SERVER_ID)
if not os.path.isdir(FILE_PATH):
os.mkdir(FILE_PATH)
class Client(discord.Client):
def __init__(self, *, intents: discord.Intents):
super().__init__(intents=intents)
self.tree = app_commands.CommandTree(self)
async def setup_hook(self):
self.tree.copy_global_to(guild=GUILD)
await self.tree.sync(guild=GUILD)
claude_intents = discord.Intents.default()
claude_intents.messages = True
claude_intents.message_content = True
client = Client(intents=claude_intents)
openai.organization = OPENAI_ORG
openai.api_key = OPENAI_API_KEY
openai.Model.list()
async def close_thread(thread: discord.Thread):
await thread.edit(name="closed")
await thread.send(
embed=discord.Embed(
description="**Thread closed** - Context limit reached, closing...",
color=discord.Color.blue(),
)
)
await thread.edit(archived=True, locked=True)
@client.event
async def on_ready():
print(f"We have logged in as {client.user}")
def run_localagi_thread_history(history, message, thread, loop):
def call(thing):
return asyncio.run_coroutine_threadsafe(thing,loop).result()
sent_message = call(thread.send(f"⚙️ LocalAGI starts"))
user = message.author
def action_callback(name, parameters):
call(sent_message.edit(content=f"⚙️ Calling function '{name}' with {parameters}"))
def reasoning_callback(name, reasoning):
call(sent_message.edit(content=f"🤔 I'm thinking... '{reasoning}' (calling '{name}'), please wait.."))
localagi = LocalAGI(
agent_actions=agent_actions,
llm_model=config["agent"]["llm_model"],
tts_model=config["agent"]["tts_model"],
action_callback=action_callback,
reasoning_callback=reasoning_callback,
tts_api_base=config["agent"]["tts_api_base"],
functions_model=config["agent"]["functions_model"],
api_base=config["agent"]["api_base"],
stablediffusion_api_base=config["agent"]["stablediffusion_api_base"],
stablediffusion_model=config["agent"]["stablediffusion_model"],
)
# remove bot ID from the message content
message.content = message.content.replace(f"<@{client.user.id}>", "")
conversation_history = localagi.evaluate(
message.content,
history,
subtaskContext=True,
)
call(sent_message.edit(content=f"<@{user.id}> {conversation_history[-1]['content']}"))
def run_localagi_message(message, loop):
def call(thing):
return asyncio.run_coroutine_threadsafe(thing,loop).result()
sent_message = call(message.channel.send(f"⚙️ LocalAGI starts"))
user = message.author
def action_callback(name, parameters):
call(sent_message.edit(content=f"⚙️ Calling function '{name}' with {parameters}"))
def reasoning_callback(name, reasoning):
call(sent_message.edit(content=f"🤔 I'm thinking... '{reasoning}' (calling '{name}'), please wait.."))
localagi = LocalAGI(
agent_actions=agent_actions,
llm_model=config["agent"]["llm_model"],
tts_model=config["agent"]["tts_model"],
action_callback=action_callback,
reasoning_callback=reasoning_callback,
tts_api_base=config["agent"]["tts_api_base"],
functions_model=config["agent"]["functions_model"],
api_base=config["agent"]["api_base"],
stablediffusion_api_base=config["agent"]["stablediffusion_api_base"],
stablediffusion_model=config["agent"]["stablediffusion_model"],
)
# remove bot ID from the message content
message.content = message.content.replace(f"<@{client.user.id}>", "")
conversation_history = localagi.evaluate(
message.content,
[],
subtaskContext=True,
)
call(sent_message.edit(content=f"<@{user.id}> {conversation_history[-1]['content']}"))
def run_localagi(interaction, prompt, loop):
def call(thing):
return asyncio.run_coroutine_threadsafe(thing,loop).result()
user = interaction.user
embed = discord.Embed(
description=f"<@{user.id}> wants to chat! 🤖💬",
color=discord.Color.green(),
)
embed.add_field(name=user.name, value=prompt)
call(interaction.response.send_message(embed=embed))
response = call(interaction.original_response())
# create the thread
thread = call(response.create_thread(
name=prompt,
slowmode_delay=1,
reason="gpt-bot",
auto_archive_duration=60,
))
thread.typing()
sent_message = call(thread.send(f"⚙️ LocalAGI starts"))
messages = []
def action_callback(name, parameters):
call(sent_message.edit(content=f"⚙️ Calling function '{name}' with {parameters}"))
def reasoning_callback(name, reasoning):
call(sent_message.edit(content=f"🤔 I'm thinking... '{reasoning}' (calling '{name}'), please wait.."))
localagi = LocalAGI(
agent_actions=agent_actions,
llm_model=config["agent"]["llm_model"],
tts_model=config["agent"]["tts_model"],
action_callback=action_callback,
reasoning_callback=reasoning_callback,
tts_api_base=config["agent"]["tts_api_base"],
functions_model=config["agent"]["functions_model"],
api_base=config["agent"]["api_base"],
stablediffusion_api_base=config["agent"]["stablediffusion_api_base"],
stablediffusion_model=config["agent"]["stablediffusion_model"],
)
# remove bot ID from the message content
prompt = prompt.replace(f"<@{client.user.id}>", "")
conversation_history = localagi.evaluate(
prompt,
messages,
subtaskContext=True,
)
call(sent_message.edit(content=f"<@{user.id}> {conversation_history[-1]['content']}"))
@client.tree.command()
@app_commands.describe(prompt="Ask me anything!")
async def localai(interaction: discord.Interaction, prompt: str):
loop = asyncio.get_running_loop()
agent.loop = loop
agent.channel = interaction.channel
threading.Thread(target=run_localagi, args=[interaction, prompt,loop]).start()
# https://github.com/openai/gpt-discord-bot/blob/1161634a59c6fb642e58edb4f4fa1a46d2883d3b/src/utils.py#L15
def discord_message_to_message(message):
if (
message.type == discord.MessageType.thread_starter_message
and message.reference.cached_message
and len(message.reference.cached_message.embeds) > 0
and len(message.reference.cached_message.embeds[0].fields) > 0
):
field = message.reference.cached_message.embeds[0].fields[0]
if field.value:
return { "role": "user", "content": field.value }
else:
if message.content:
return { "role": "user", "content": message.content }
return None
@client.event
async def on_message(message):
# ignore messages from the bot
if message.author == client.user:
return
loop = asyncio.get_running_loop()
agent.loop = loop
# ignore messages not in a thread
channel = message.channel
agent.channel = channel
if not isinstance(channel, discord.Thread) and client.user.mentioned_in(message):
threading.Thread(target=run_localagi_message, args=[message,loop]).start()
return
if not isinstance(channel, discord.Thread):
return
# ignore threads not created by the bot
thread = channel
if thread.owner_id != client.user.id:
return
if thread.message_count > 5:
# too many messages, no longer going to reply
await close_thread(thread=thread)
return
channel_messages = [
discord_message_to_message(message)
async for message in thread.history(limit=5)
]
channel_messages = [x for x in channel_messages if x is not None]
channel_messages.reverse()
threading.Thread(target=run_localagi_thread_history, args=[channel_messages[:-1],message,thread,loop]).start()
client.run(DISCORD_API_KEY)

View File

@@ -0,0 +1,8 @@
discord
openai
git+https://github.com/mudler/LocalAGI
ascii-magic
loguru
duckduckgo_search
chromadb
pysqlite3-binary