import gradio as gr from typing import Optional from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types from smolagents.agents import ActionStep, MultiStepAgent from smolagents.memory import MemoryStep class GradioUI: """Simplified Gradio interface for Hugging Face Spaces""" def __init__(self, agent: MultiStepAgent): self.agent = agent def launch(self): with gr.Blocks(title="AI Assistant", theme=gr.themes.Soft()) as demo: gr.Markdown(""" # 🤖 AI Assistant **Capabilities:** - Time zone conversions - Weather lookups - Unit conversions - Web search - Image generation - Code execution """) chatbot = gr.Chatbot( height=500, avatar_images=( None, "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png", ) ) with gr.Row(): msg = gr.Textbox( placeholder="Ask me anything...", container=False, scale=7 ) submit = gr.Button("Send", scale=1) def respond(message, chat_history): chat_history.append((message, "")) full_response = "" for step_log in self.agent.run(message, stream=True): if isinstance(step_log, ActionStep): if hasattr(step_log, 'model_output') and step_log.model_output: full_response += step_log.model_output + "\n" if hasattr(step_log, 'observations') and step_log.observations: full_response += step_log.observations + "\n" chat_history[-1] = (message, full_response) return "", chat_history msg.submit(respond, [msg, chatbot], [msg, chatbot]) submit.click(respond, [msg, chatbot], [msg, chatbot]) demo.launch()