leofltt's picture
minimal version
364f05f
raw
history blame
6.68 kB
# app.py (Minimal "Dummy" Version for Step 1)
import os
import gradio as gr
import requests
import pandas as pd
import logging
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# -----------------------------------------------------------------
# --- AGENT LOGIC IS COMPLETELY REMOVED OR COMMENTED OUT ---
# -----------------------------------------------------------------
# We are replacing the entire GaiaAgent with this simple class.
# It has no external dependencies.
class DummyAgent:
def __init__(self):
logging.info("DummyAgent initialized. No models or tools loaded.")
def __call__(self, question: str) -> str:
logging.info(f"DummyAgent received question: {question[:50]}...")
fixed_answer = "dummy_answer"
logging.info(f"DummyAgent returning fixed answer: {fixed_answer}")
return fixed_answer
# All other imports and tool definitions are removed for this test.
# from langchain_community.llms import HuggingFaceHub
# from langchain_community.tools import DuckDuckGoSearchRun
# ... and so on ...
# All tool definitions are removed.
# @tool
# def web_search...
# The AgentState class is removed.
# class AgentState...
# The GaiaAgent class is replaced by DummyAgent above.
# -----------------------------------------------------------------
# --- GRADIO APP AND SUBMISSION LOGIC (largely unchanged) ---
# -----------------------------------------------------------------
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the DUMMY agent, and submits the answers.
"""
if not profile:
logging.warning("User not logged in.")
return "Please Login to Hugging Face with the button.", None
username = profile.username
logging.info(f"User logged in: {username}")
space_id = os.getenv("SPACE_ID")
if not space_id:
logging.error("SPACE_ID environment variable is not set.")
return "CRITICAL ERROR: SPACE_ID environment variable is not set.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent (Using the DummyAgent for this test)
try:
# We instantiate our simple, harmless agent.
agent = DummyAgent()
except Exception as e:
logging.critical(
f"Fatal error instantiating even the DummyAgent: {e}", exc_info=True
)
return f"Fatal error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
logging.info(f"Agent code URL: {agent_code}")
# 2. Fetch Questions
logging.info(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=20)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
logging.warning("Fetched questions list is empty.")
return "Fetched questions list is empty.", None
logging.info(f"Fetched {len(questions_data)} questions.")
except Exception as e:
logging.error(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
# 3. Run your Agent
results_log = []
answers_payload = []
logging.info(f"Running dummy agent on {len(questions_data)} questions...")
for i, item in enumerate(questions_data):
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
# The agent call is now super fast and simple.
submitted_answer = agent(question_text)
answers_payload.append(
{"task_id": task_id, "submitted_answer": submitted_answer}
)
results_log.append(
{
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": submitted_answer,
}
)
except Exception as e:
logging.error(
f"Error running dummy agent on task {task_id}: {e}", exc_info=True
)
results_log.append(
{
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": f"DUMMY AGENT ERROR: {e}",
}
)
if not answers_payload:
logging.warning("Dummy agent did not produce any answers.")
return "Dummy agent did not produce any answers.", pd.DataFrame(results_log)
# 4. Prepare and Submit
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload,
}
logging.info(f"Submitting {len(answers_payload)} answers for user '{username}'...")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'This was a test with a dummy agent.')}"
)
logging.info("Submission successful.")
return final_status, pd.DataFrame(results_log)
except Exception as e:
logging.critical(
f"An unexpected error occurred during submission: {e}", exc_info=True
)
return f"An unexpected error occurred during submission: {e}", pd.DataFrame(
results_log
)
# --- Build Gradio Interface (Unchanged) ---
with gr.Blocks() as demo:
gr.Markdown("# GAIA Agent Evaluation Runner (Minimal Test)")
gr.Markdown("This is a minimal version to test the basic app stability.")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(
label="Run Status / Submission Result", lines=5, interactive=False
)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table],
api_name="run_evaluation",
)
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logging.info("App Starting (Minimal Version)...")
demo.launch(debug=True, share=False)