Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,135 +1,137 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import pymongo
|
3 |
-
import certifi
|
4 |
-
from llama_index.core import VectorStoreIndex
|
5 |
-
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
-
from llama_index.llms.groq import Groq
|
7 |
-
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
|
8 |
-
from llama_index.core.prompts import PromptTemplate
|
9 |
-
from dotenv import load_dotenv
|
10 |
-
import os
|
11 |
-
import base64
|
12 |
-
import markdown as md
|
13 |
-
|
14 |
-
# Load environment variables
|
15 |
-
load_dotenv()
|
16 |
-
|
17 |
-
# --- MongoDB Config ---
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
-
|
42 |
-
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
)
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
)
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
with gr.
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
title="ποΈ RamayanaGPT",
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pymongo
|
3 |
+
import certifi
|
4 |
+
from llama_index.core import VectorStoreIndex
|
5 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
+
from llama_index.llms.groq import Groq
|
7 |
+
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
|
8 |
+
from llama_index.core.prompts import PromptTemplate
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
import os
|
11 |
+
import base64
|
12 |
+
import markdown as md
|
13 |
+
|
14 |
+
# Load environment variables
|
15 |
+
load_dotenv()
|
16 |
+
|
17 |
+
# --- MongoDB Config ---
|
18 |
+
ATLAS_CONNECTION_STRING = os.getenv("ATLAS_CONNECTION_STRING")
|
19 |
+
DB_NAME = "RAG"
|
20 |
+
COLLECTION_NAME = "ramayana"
|
21 |
+
VECTOR_INDEX_NAME = "ramayana_vector_index"
|
22 |
+
|
23 |
+
# --- Embedding Model ---
|
24 |
+
embed_model = HuggingFaceEmbedding(model_name="intfloat/multilingual-e5-base")
|
25 |
+
|
26 |
+
# --- Prompt Template ---
|
27 |
+
ramayana_qa_template = PromptTemplate(
|
28 |
+
"""You are an expert on the Valmiki Ramayana and a guide who always inspires people with the great Itihasa like the Ramayana.
|
29 |
+
|
30 |
+
Below is text from the epic, including shlokas and their explanations:
|
31 |
+
---------------------
|
32 |
+
{context_str}
|
33 |
+
---------------------
|
34 |
+
|
35 |
+
Using only this information, answer the following query.
|
36 |
+
|
37 |
+
Query: {query_str}
|
38 |
+
|
39 |
+
Answer:
|
40 |
+
- Intro or general description to ```Query```
|
41 |
+
- Related shloka/shlokas followed by its explanation
|
42 |
+
- Overview of ```Query```
|
43 |
+
"""
|
44 |
+
)
|
45 |
+
|
46 |
+
# --- Connect to MongoDB once at startup ---
|
47 |
+
def get_vector_index_once():
|
48 |
+
mongo_client = pymongo.MongoClient(
|
49 |
+
ATLAS_CONNECTION_STRING,
|
50 |
+
tlsCAFile=certifi.where(),
|
51 |
+
tlsAllowInvalidCertificates=False,
|
52 |
+
connectTimeoutMS=30000,
|
53 |
+
serverSelectionTimeoutMS=30000,
|
54 |
+
)
|
55 |
+
mongo_client.server_info()
|
56 |
+
print("β
Connected to MongoDB Atlas.")
|
57 |
+
|
58 |
+
vector_store = MongoDBAtlasVectorSearch(
|
59 |
+
mongo_client,
|
60 |
+
db_name=DB_NAME,
|
61 |
+
collection_name=COLLECTION_NAME,
|
62 |
+
vector_index_name=VECTOR_INDEX_NAME,
|
63 |
+
)
|
64 |
+
return VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
|
65 |
+
|
66 |
+
# Connect once
|
67 |
+
vector_index = get_vector_index_once()
|
68 |
+
|
69 |
+
# --- Respond Function (uses API key from state) ---
|
70 |
+
def chat_with_groq(message, history, groq_key):
|
71 |
+
llm = Groq(model="llama-3.1-8b-instant", api_key=groq_key)
|
72 |
+
|
73 |
+
query_engine = vector_index.as_query_engine(
|
74 |
+
llm=llm,
|
75 |
+
text_qa_template=ramayana_qa_template,
|
76 |
+
similarity_top_k=5,
|
77 |
+
verbose=True,
|
78 |
+
)
|
79 |
+
|
80 |
+
response = query_engine.query(message)
|
81 |
+
return str(response)
|
82 |
+
|
83 |
+
def encode_image(image_path):
|
84 |
+
with open(image_path, "rb") as image_file:
|
85 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
86 |
+
|
87 |
+
# Encode the images
|
88 |
+
github_logo_encoded = encode_image("Images/github-logo.png")
|
89 |
+
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
|
90 |
+
website_logo_encoded = encode_image("Images/ai-logo.png")
|
91 |
+
|
92 |
+
# --- Gradio UI ---
|
93 |
+
with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as demo:
|
94 |
+
with gr.Tabs():
|
95 |
+
with gr.TabItem("Intro"):
|
96 |
+
gr.Markdown(md.description)
|
97 |
+
|
98 |
+
with gr.TabItem("GPT"):
|
99 |
+
with gr.Column(visible=True) as accordion_container:
|
100 |
+
with gr.Accordion("How to get Groq API KEY", open=False):
|
101 |
+
gr.Markdown(md.groq_api_key)
|
102 |
+
|
103 |
+
groq_key_box = gr.Textbox(
|
104 |
+
label="Enter Groq API Key",
|
105 |
+
type="password",
|
106 |
+
placeholder="Paste your Groq API key here..."
|
107 |
+
)
|
108 |
+
|
109 |
+
start_btn = gr.Button("Start Chat")
|
110 |
+
|
111 |
+
groq_state = gr.State(value="")
|
112 |
+
|
113 |
+
# Chat container, initially hidden
|
114 |
+
with gr.Column(visible=False) as chatbot_container:
|
115 |
+
chatbot = gr.ChatInterface(
|
116 |
+
fn=lambda message, history, groq_key: chat_with_groq(message, history, groq_key),
|
117 |
+
additional_inputs=[groq_state],
|
118 |
+
chatbot=gr.Chatbot(height=500),
|
119 |
+
title="ποΈ RamayanaGPT",
|
120 |
+
show_progress="full",
|
121 |
+
fill_height=True,
|
122 |
+
# description="Ask questions from the Valmiki Ramayana. Powered by RAG + MongoDB + LlamaIndex.",
|
123 |
+
)
|
124 |
+
|
125 |
+
# Show chat and hide inputs
|
126 |
+
def save_key_and_show_chat(key):
|
127 |
+
return key, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
|
128 |
+
|
129 |
+
start_btn.click(
|
130 |
+
fn=save_key_and_show_chat,
|
131 |
+
inputs=[groq_key_box],
|
132 |
+
outputs=[groq_state, groq_key_box, start_btn, accordion_container, chatbot_container]
|
133 |
+
)
|
134 |
+
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
|
135 |
+
|
136 |
+
if __name__ == "__main__":
|
137 |
+
demo.launch()
|