demo / backend /routes /questions.py
tfrere's picture
update benchmark displayed questions
7839773
from fastapi import APIRouter, HTTPException
import random
from datasets import load_dataset
from huggingface_hub import HfApi, dataset_info
import os
router = APIRouter(tags=["benchmark"])
@router.get("/benchmark-questions/{session_id}")
async def get_benchmark_questions(session_id: str):
"""
Get example questions from the generated benchmark
Args:
session_id: Session ID for the benchmark
Returns:
Dictionary with sample questions from the dataset
"""
try:
# Dataset path on Hugging Face
dataset_repo_id = f"yourbench/yourbench_{session_id}"
# Initialize response
response = {
"success": False,
"questions": [],
"dataset_url": f"https://huggingface.co/datasets/{dataset_repo_id}"
}
# Try to load the dataset
questions = []
try:
# Try to load single-shot questions directly with the config name
single_dataset = load_dataset(dataset_repo_id, 'single_shot_questions')
if single_dataset and len(single_dataset['train']) > 0:
# Take all questions starting from index 1 (avoiding the first question)
start_idx = 1
for idx in range(start_idx, len(single_dataset['train'])):
questions.append({
"id": str(idx),
"question": single_dataset['train'][idx].get("question", ""),
"answer": single_dataset['train'][idx].get("self_answer", "No answer available"),
"type": "single_shot"
})
print(f"Loaded {len(questions)} single-shot questions")
except Exception as e:
print(f"Error loading single-shot questions: {str(e)}")
# If we couldn't load any questions, the dataset might not exist
if len(questions) == 0:
# Check if we have a directory for this session locally as fallback
session_dir = os.path.join("uploaded_files", session_id)
if not os.path.exists(session_dir):
raise HTTPException(status_code=404, detail="Dataset not found")
# Update the response
response["success"] = len(questions) > 0
response["questions"] = questions
return response
except HTTPException:
# Re-raise HTTP exceptions
raise
except Exception as e:
return {
"success": False,
"error": f"Error retrieving benchmark questions: {str(e)}"
}