sagar007's picture
Update app.py
d64ad42 verified
raw
history blame
6.52 kB
import gradio as gr
import spaces # Required for ZeroGPU
from transformers import pipeline
from duckduckgo_search import DDGS
from datetime import datetime
import re # Added for regular expressions
# Initialize a lightweight text generation model on CPU
generator = pipeline("text-generation", model="distilgpt2", device=-1) # -1 ensures CPU
# Web search function (CPU-based)
def get_web_results(query: str, max_results: int = 3) -> list:
"""Fetch web results synchronously for Zero GPU compatibility."""
try:
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=max_results))
return [{"title": r.get("title", "No Title"), "snippet": r["body"], "url": r["href"]} for r in results]
except Exception as e:
return [{"title": "Error", "snippet": f"Failed to fetch results: {str(e)}", "url": "#"}]
# Format prompt for the AI model (CPU-based) - IMPROVED
def format_prompt(query: str, web_results: list) -> str:
"""Create a concise prompt with web context, explicitly instructing citation."""
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
context = ""
for i, r in enumerate(web_results, 1): # Start index at 1 for citations
context += f"- [{i}] {r['title']}: {r['snippet']}\n"
return f"""
Time: {current_time}
Query: {query}
Web Context:
{context}
Provide a concise answer in markdown format. Cite relevant sources using the bracketed numbers provided (e.g., [1], [2]). Focus on direct answers. If the context doesn't contain the answer, say that the information wasn't found in the provided sources.
""".strip()
# GPU-decorated answer generation - IMPROVED
@spaces.GPU(duration=120) # Allow up to 120 seconds of GPU time
def generate_answer(prompt: str, web_results: list) -> str:
"""Generate and post-process the research answer."""
response = generator(prompt, max_new_tokens=150, num_return_sequences=1, truncation=True, return_full_text=False)[0]["generated_text"]
# Basic post-processing (can be expanded):
response = response.strip()
# Replace citation placeholders *if* they exist in the web_results.
for i in range(1, len(web_results) + 1):
response = response.replace(f"[{i}]", f"[^{i}^](#{i})") #Markdown link to source
return response
# Format sources for display (CPU-based) - IMPROVED
def format_sources(web_results: list) -> str:
"""Create an HTML list of sources with anchors."""
if not web_results:
return "<div>No sources available</div>"
sources_html = "<div class='sources-list'>"
for i, res in enumerate(web_results, 1):
sources_html += f"""
<div class='source-item' id='{i}'>
<span class='source-number'>[{i}]</span>
<a href='{res['url']}' target='_blank'>{res['title']}</a>: {res['snippet'][:100]}...
</div>
"""
sources_html += "</div>"
return sources_html
# Main processing function - IMPROVED
def process_deep_research(query: str, history: list):
"""Handle the deep research process, including history updates."""
# Fetch web results (CPU)
web_results = get_web_results(query)
# Generate answer (GPU)
prompt = format_prompt(query, web_results)
answer = generate_answer(prompt, web_results)
sources_html = format_sources(web_results)
# Update history (using the Gradio Chatbot's expected format)
new_history = history + [[query, answer + "\n\n" + sources_html]]
return answer, sources_html, new_history
# Custom CSS - Slightly adjusted for better spacing
css = """
body {
font-family: 'Arial', sans-serif;
background: #1a1a1a;
color: #ffffff;
}
.gradio-container {
max-width: 900px;
margin: 0 auto;
padding: 15px;
}
.header {
text-align: center;
padding: 15px;
background: linear-gradient(135deg, #2c3e50, #3498db);
border-radius: 8px;
margin-bottom: 15px;
}
.header h1 { font-size: 2em; margin: 0; color: #ffffff; }
.header p { color: #bdc3c7; font-size: 1em; }
.search-box {
background: #2c2c2c;
padding: 10px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
}
.search-box input {
background: #3a3a3a !important;
color: #ffffff !important;
border: none !important;
border-radius: 5px !important;
}
.search-box button {
background: #3498db !important;
border: none !important;
border-radius: 5px !important;
}
.results-container {
margin-top: 15px;
display: flex;
flex-direction: column; /* Stack answer and sources vertically */
gap: 15px;
}
.answer-box {
/* flex: 2; Removed flex property */
background: #2c2c2c;
padding: 15px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
}
.answer-box .markdown { color: #ecf0f1; line-height: 1.5; }
.sources-list {
/* flex: 1; Removed flex property */
background: #2c2c2c;
padding: 10px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
}
.source-item { margin-bottom: 8px; }
.source-number { color: #3498db; font-weight: bold; margin-right: 5px; }
.source-item a { color: #3498db; text-decoration: none; }
.source-item a:hover { text-decoration: underline; }
.history-box {
margin-top: 15px;
background: #2c2c2c;
padding: 10px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
}
"""
# Gradio app setup with Blocks
with gr.Blocks(title="Deep Research Engine - ZeroGPU", css=css) as demo:
# Header
with gr.Column(elem_classes="header"):
gr.Markdown("# Deep Research Engine")
gr.Markdown("Fast, in-depth answers powered by web insights (ZeroGPU).")
# Search input and button
with gr.Row(elem_classes="search-box"):
search_input = gr.Textbox(label="", placeholder="Ask anything...", lines=2)
search_btn = gr.Button("Research", variant="primary")
# Results layout - Now using a single Chatbot component
history = gr.Chatbot(elem_classes="history-box", label="Research Results & History")
# Event handling - Simplified
def handle_search(query, history_data):
answer, sources, new_history = process_deep_research(query, history_data)
return new_history
search_btn.click(
fn=handle_search,
inputs=[search_input, history],
outputs=[history]
)
search_input.submit(
fn=handle_search,
inputs=[search_input, history],
outputs=[history]
)
# Launch the app
if __name__ == "__main__":
demo.launch()