Spaces:
Sleeping
Sleeping
Commit
·
a06b399
1
Parent(s):
cdbd107
moved logic to engine
Browse files
main.py
CHANGED
@@ -33,65 +33,23 @@ async def help(ctx: discord.ApplicationContext):
|
|
33 |
\n2)Those that tag the bot.")
|
34 |
|
35 |
|
36 |
-
async def llm_output(question: str
|
37 |
-
"""
|
38 |
-
Returns output from the LLM using the given user-question and retrived context
|
39 |
-
"""
|
40 |
-
|
41 |
-
URL_LLM = 'https://robinroy03-fury-bot.hf.space'
|
42 |
-
# URL_LLM = 'http://localhost:11434' # NOTE: FOR TESTING
|
43 |
|
44 |
-
|
45 |
-
You are a senior FURY developer. FURY is a high level python graphics API similar to VTK.
|
46 |
-
|
47 |
-
Question: {question}
|
48 |
-
|
49 |
-
Context: {context}
|
50 |
-
"""
|
51 |
obj = {
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
56 |
|
57 |
async with aiohttp.ClientSession() as session:
|
58 |
async with session.post(URL_LLM + "/api/generate", json=obj) as response:
|
59 |
if response.status == 500:
|
60 |
-
return "
|
61 |
response_json = await response.json()
|
62 |
|
63 |
-
return response_json['
|
64 |
-
|
65 |
-
async def embedding_output(message: str) -> list:
|
66 |
-
"""
|
67 |
-
Returns embeddings for the given message
|
68 |
-
|
69 |
-
rtype: list of embeddings. Length depends on the model.
|
70 |
-
"""
|
71 |
-
|
72 |
-
URL_EMBEDDING = 'https://robinroy03-fury-embeddings-endpoint.hf.space'
|
73 |
-
|
74 |
-
async with aiohttp.ClientSession() as session:
|
75 |
-
async with session.post(URL_EMBEDDING + "/embedding", json={"text": message}) as response:
|
76 |
-
response_json = await response.json(content_type=None)
|
77 |
-
|
78 |
-
return response_json['output']
|
79 |
-
|
80 |
-
|
81 |
-
async def db_output(embedding: list) -> dict:
|
82 |
-
"""
|
83 |
-
Returns the KNN results.
|
84 |
-
|
85 |
-
rtype: JSON
|
86 |
-
"""
|
87 |
-
|
88 |
-
URL_DB = 'https://robinroy03-fury-db-endpoint.hf.space'
|
89 |
-
|
90 |
-
async with aiohttp.ClientSession() as session:
|
91 |
-
async with session.post(URL_DB + "/query", json={"embeddings": embedding}) as response:
|
92 |
-
response_json = await response.json()
|
93 |
-
|
94 |
-
return response_json
|
95 |
|
96 |
|
97 |
@bot.event
|
@@ -107,28 +65,9 @@ async def on_message(message):
|
|
107 |
await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")
|
108 |
|
109 |
question = message.content.replace("<@1243428204124045385>", "")
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
db_knn: dict = await db_output(embedding)
|
114 |
-
db_context = ""
|
115 |
-
references = ""
|
116 |
-
for i in range(len(db_knn['matches'])):
|
117 |
-
data = db_knn['matches'][i]['metadata']['data']
|
118 |
-
db_context += (data + "\n")
|
119 |
-
data = ast.literal_eval(data)
|
120 |
-
references += ("<https://github.com/fury-gl/fury/tree/master/" + data['path'] + ">").replace("//home/robin/Desktop/l/fury", "")
|
121 |
-
if data.get("function_name"):
|
122 |
-
references += f"\tFunction Name: {data.get('function_name')}"
|
123 |
-
elif data.get("class_name"):
|
124 |
-
references += f"\tClass Name: {data.get('class_name')}"
|
125 |
-
elif data['type'] == 'rst':
|
126 |
-
references += f"\tDocumentation: {data['path'].split("/")[-1]}"
|
127 |
-
elif data['type'] == 'documentation_examples':
|
128 |
-
references += f"\tDocumentation: {data['path'].split("/")[-1]}"
|
129 |
-
references += "\n"
|
130 |
-
|
131 |
-
llm_answer: str = await llm_output(question, db_context)
|
132 |
|
133 |
try:
|
134 |
start = 0
|
@@ -141,7 +80,7 @@ async def on_message(message):
|
|
141 |
await message.reply(content=f"**References**\n{references}", view=Like_Dislike())
|
142 |
except Exception as e: # TODO: make exception handling better
|
143 |
print(e)
|
144 |
-
await message.reply("An error occurred. Retry again.")
|
145 |
|
146 |
def run_bot():
|
147 |
bot.run(token)
|
|
|
33 |
\n2)Those that tag the bot.")
|
34 |
|
35 |
|
36 |
+
async def llm_output(question: str) -> tuple[str, str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
URL_LLM = 'https://robinroy03-fury-engine.hf.space'
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
obj = {
|
40 |
+
"query": question,
|
41 |
+
"llm": "llama3-70b-8192",
|
42 |
+
"knn": 3,
|
43 |
+
"stream": False
|
44 |
+
}
|
45 |
|
46 |
async with aiohttp.ClientSession() as session:
|
47 |
async with session.post(URL_LLM + "/api/generate", json=obj) as response:
|
48 |
if response.status == 500:
|
49 |
+
return "Error 500"
|
50 |
response_json = await response.json()
|
51 |
|
52 |
+
return response_json['response'], response_json['references']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
@bot.event
|
|
|
65 |
await message.reply(content="Your message was received, it'll take around 30 seconds for FURY to process an answer.")
|
66 |
|
67 |
question = message.content.replace("<@1243428204124045385>", "")
|
68 |
+
engine_response = await llm_output(question)
|
69 |
+
if llm_output != "Error 500":
|
70 |
+
llm_answer, references = engine_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
try:
|
73 |
start = 0
|
|
|
80 |
await message.reply(content=f"**References**\n{references}", view=Like_Dislike())
|
81 |
except Exception as e: # TODO: make exception handling better
|
82 |
print(e)
|
83 |
+
await message.reply("An error occurred. Retry again. Try different prompts.")
|
84 |
|
85 |
def run_bot():
|
86 |
bot.run(token)
|