import gradio as gr import spaces # Required for ZeroGPU from transformers import pipeline from duckduckgo_search import DDGS from datetime import datetime # Initialize a lightweight text generation model on CPU generator = pipeline("text-generation", model="distilgpt2", device=-1) # -1 ensures CPU by default # Web search function (CPU-based) def get_web_results(query: str, max_results: int = 3) -> list: """Fetch web results synchronously for Zero GPU compatibility.""" try: with DDGS() as ddgs: results = list(ddgs.text(query, max_results=max_results)) return [{"title": r.get("title", "No Title"), "snippet": r["body"], "url": r["href"]} for r in results] except Exception as e: return [{"title": "Error", "snippet": f"Failed to fetch results: {str(e)}", "url": "#"}] # Format prompt for the AI model (CPU-based) def format_prompt(query: str, web_results: list) -> str: """Create a concise prompt with web context.""" current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") context = "\n".join([f"- {r['title']}: {r['snippet']}" for r in web_results]) return f"""Time: {current_time} Query: {query} Web Context: {context} Provide a concise answer in markdown format with citations [1], [2], etc.""" # GPU-decorated answer generation @spaces.GPU(duration=120) # Allow up to 120 seconds of GPU time def generate_answer(prompt: str) -> str: """Generate a concise research answer using GPU.""" # Use max_new_tokens instead of max_length to allow new token generation response = generator(prompt, max_new_tokens=150, num_return_sequences=1, truncation=True)[0]["generated_text"] answer_start = response.find("Provide a concise") + len("Provide a concise answer in markdown format with citations [1], [2], etc.") return response[answer_start:].strip() if answer_start > -1 else "No detailed answer generated." # Format sources for display (CPU-based) def format_sources(web_results: list) -> str: """Create a simple HTML list of sources.""" if not web_results: return "
No sources available
" sources_html = "
" for i, res in enumerate(web_results, 1): sources_html += f"""
[{i}] {res['title']}: {res['snippet'][:100]}...
""" sources_html += "
" return sources_html # Main processing function def process_deep_research(query: str, history: list): """Handle the deep research process.""" if not history: history = [] # Fetch web results (CPU) web_results = get_web_results(query) sources_html = format_sources(web_results) # Generate answer (GPU via @spaces.GPU) prompt = format_prompt(query, web_results) answer = generate_answer(prompt) # Convert history to messages format (role/content) new_history = history + [{"role": "user", "content": query}, {"role": "assistant", "content": answer}] return answer, sources_html, new_history # Custom CSS for a cool, lightweight UI css = """ body { font-family: 'Arial', sans-serif; background: #1a1a1a; color: #ffffff; } .gradio-container { max-width: 900px; margin: 0 auto; padding: 15px; } .header { text-align: center; padding: 15px; background: linear-gradient(135deg, #2c3e50, #3498db); border-radius: 8px; margin-bottom: 15px; } .header h1 { font-size: 2em; margin: 0; color: #ffffff; } .header p { color: #bdc3c7; font-size: 1em; } .search-box { background: #2c2c2c; padding: 10px; border-radius: 8px; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); } .search-box input { background: #3a3a3a !important; color: #ffffff !important; border: none !important; border-radius: 5px !important; } .search-box button { background: #3498db !important; border: none !important; border-radius: 5px !important; } .results-container { margin-top: 15px; display: flex; gap: 15px; } .answer-box { flex: 2; background: #2c2c2c; padding: 15px; border-radius: 8px; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); } .answer-box .markdown { color: #ecf0f1; line-height: 1.5; } .sources-list { flex: 1; background: #2c2c2c; padding: 10px; border-radius: 8px; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); } .source-item { margin-bottom: 8px; } .source-number { color: #3498db; font-weight: bold; margin-right: 5px; } .source-item a { color: #3498db; text-decoration: none; } .source-item a:hover { text-decoration: underline; } .history-box { margin-top: 15px; background: #2c2c2c; padding: 10px; border-radius: 8px; max-height: 250px; overflow-y: auto; box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); } """ # Gradio app setup with Blocks with gr.Blocks(title="Deep Research Engine - ZeroGPU", css=css) as demo: history_state = gr.State([]) # Header with gr.Column(elem_classes="header"): gr.Markdown("# Deep Research Engine") gr.Markdown("Fast, in-depth answers powered by web insights (ZeroGPU).") # Search input and button with gr.Row(elem_classes="search-box"): search_input = gr.Textbox(label="", placeholder="Ask anything...", lines=2) search_btn = gr.Button("Research", variant="primary") # Results layout with gr.Row(elem_classes="results-container"): with gr.Column(): answer_output = gr.Markdown(label="Research Findings", elem_classes="answer-box") with gr.Column(): sources_output = gr.HTML(label="Sources", elem_classes="sources-list") # Chat history (using messages format) with gr.Row(): history_display = gr.Chatbot(label="History", elem_classes="history-box", type="messages") # Event handling def handle_search(query, history): answer, sources, new_history = process_deep_research(query, history) return answer, sources, new_history search_btn.click( fn=handle_search, inputs=[search_input, history_state], outputs=[answer_output, sources_output, history_display] ).then( fn=lambda x: x, inputs=[history_display], outputs=[history_state] ) search_input.submit( fn=handle_search, inputs=[search_input, history_state], outputs=[answer_output, sources_output, history_display] ).then( fn=lambda x: x, inputs=[history_display], outputs=[history_state] ) # Launch the app if __name__ == "__main__": demo.launch()