Spaces:
Sleeping
Sleeping
File size: 4,790 Bytes
3a0abc1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import discord
import aiohttp
import ast
import os
import threading
intents = discord.Intents.default()
intents.message_content = True
bot = discord.Bot(intents = intents)
token = os.environ.get('TOKEN_DISCORD')
class Like_Dislike(discord.ui.View):
@discord.ui.button(style=discord.ButtonStyle.primary, emoji="π")
async def like_button(self, button, interaction):
await interaction.response.send_message("You liked the response")
@discord.ui.button(style=discord.ButtonStyle.primary, emoji="π")
async def dislike_button(self, button, interaction):
await interaction.response.send_message("You disliked the response")
@bot.event
async def on_ready():
print(f"{bot.user} is ready and online!")
@bot.slash_command(name="help", description="list of commands and other info.")
async def help(ctx: discord.ApplicationContext):
await ctx.respond("Hello! FURY Bot responds to all your messages\
\n1)Inside Forum channel and\
\n2)Those that tag the bot.")
async def llm_output(question: str, context: str) -> str:
"""
Returns output from the LLM using the given user-question and retrived context
"""
URL_LLM = 'https://robinroy03-fury-bot.hf.space'
# URL_LLM = 'http://localhost:11434' # NOTE: FOR TESTING
prompt = f"""
You are a senior FURY developer. FURY is a high level python graphics API similar to VTK.
Question: {question}
Context: {context}
"""
obj = {
'model': 'llama3-70b-8192',
'prompt': prompt,
'stream': False
}
async with aiohttp.ClientSession() as session:
async with session.post(URL_LLM + "/api/generate", json=obj) as response:
response_json = await response.json()
return response_json['choices'][0]['message']['content']
async def embedding_output(message: str) -> list:
"""
Returns embeddings for the given message
rtype: list of embeddings. Length depends on the model.
"""
URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space'
async with aiohttp.ClientSession() as session:
async with session.post(URL_EMBEDDING + "/embedding", json={"text": message}) as response:
response_json = await response.json(content_type=None)
return response_json['output']
async def db_output(embedding: list) -> dict:
"""
Returns the KNN results.
rtype: JSON
"""
URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space'
async with aiohttp.ClientSession() as session:
async with session.post(URL_DB + "/query", json={"embeddings": embedding}) as response:
response_json = await response.json()
return response_json
@bot.event
async def on_message(message):
"""
Returns llm answer with the relevant context.
"""
if (message.author == bot.user) or not(bot.user.mentioned_in(message)):
return
print(message.content)
await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")
question = message.content.replace("<@1243428204124045385>", "")
embedding: list = await embedding_output(question)
db_knn: dict = await db_output(embedding)
db_context = ""
references = ""
for i in range(len(db_knn['matches'])):
data = db_knn['matches'][i]['metadata']['data']
db_context += (data + "\n")
data = ast.literal_eval(data)
references += ("<https://github.com/fury-gl/fury/tree/master/" + data['path'] + ">")
if data.get("function_name"):
references += f"\tFunction Name: {data.get('function_name')}"
else:
references += f"\tClass Name: {data.get('class_name')}"
references += "\n"
llm_answer: str = await llm_output(question, db_context) # for the highest knn result (for the test only right now) TODO: make this better
try:
await message.reply(content=llm_answer[:1990], view=Like_Dislike()) # TODO: handle large responses (>2000)
await message.reply(content=f"**References**\n{references}")
except Exception as e: # TODO: make exception handling better
print(e)
await message.reply("An error occurred. Retry again.")
def run_bot():
bot.run(token)
# ===========================================================================================================================================================
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
return "The bot is online."
threading.Thread(target=run_bot).start()
app.run()
|