Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 3,947 Bytes
81e0b0c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import { useState, useRef, useEffect } from "react";
import API_CONFIG from "../../../config/api";
// Define all evaluation steps in sequence
const EVALUATION_STEPS = [
"initializing",
"finding_available_model_providers",
"starting_evaluation_process",
"evaluating_models",
"storing_evaluation_results",
];
// Step labels for display
const STEP_LABELS = {
initializing: "Initializing evaluation environment",
finding_available_model_providers: "Finding available model providers",
starting_evaluation_process: "Starting evaluation process",
evaluating_models: "Evaluating models",
storing_evaluation_results: "Storing evaluation results",
};
// Error messages that should be treated as errors
const ERROR_MESSAGES = [
"heavy load",
"try again later",
"rate limit",
"RATE_LIMIT_EXCEEDED",
];
export const useEvaluation = (sessionId, onComplete) => {
const [error, setError] = useState(null);
const [evaluationComplete, setEvaluationComplete] = useState(false);
const [currentStep, setCurrentStep] = useState(0);
const [evaluationStarted, setEvaluationStarted] = useState(false);
const pollingIntervalRef = useRef(null);
const mapStepToIndex = (step) => {
return EVALUATION_STEPS.indexOf(step);
};
const checkForErrors = (logs) => {
if (!logs) return false;
const hasError = ERROR_MESSAGES.some((errorMessage) =>
logs.some((log) => log.toLowerCase().includes(errorMessage.toLowerCase()))
);
if (hasError) {
setError(
"The demo is currently under heavy load, please try again later."
);
setEvaluationComplete(true);
if (pollingIntervalRef.current) {
clearInterval(pollingIntervalRef.current);
}
return true;
}
return false;
};
const startEvaluation = async () => {
if (!sessionId) {
setError("Missing session ID");
return;
}
setEvaluationStarted(true);
try {
const response = await fetch(
`${API_CONFIG.BASE_URL}/evaluate-benchmark`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
session_id: sessionId,
}),
}
);
const result = await response.json();
if (response.ok) {
setupPolling();
} else {
setError(result.error || "Benchmark evaluation failed");
}
} catch (error) {
console.error("Error starting evaluation:", error);
setError("Error connecting to server");
}
};
const setupPolling = () => {
pollingIntervalRef.current = setInterval(async () => {
try {
const logsResponse = await fetch(
`${API_CONFIG.BASE_URL}/evaluation-logs/${sessionId}`
);
if (logsResponse.ok) {
const logsResult = await logsResponse.json();
// Check for error messages in logs
if (checkForErrors(logsResult.logs)) {
return;
}
if (logsResult.is_completed) {
setEvaluationComplete(true);
clearInterval(pollingIntervalRef.current);
if (onComplete) {
onComplete();
}
} else if (logsResult.current_step) {
const newStepIndex = mapStepToIndex(logsResult.current_step);
if (newStepIndex !== -1) {
setCurrentStep(newStepIndex);
}
}
}
} catch (error) {
console.log("Error polling logs:", error);
}
}, 2000);
};
useEffect(() => {
return () => {
if (pollingIntervalRef.current) {
clearInterval(pollingIntervalRef.current);
}
};
}, []);
return {
error,
evaluationComplete,
currentStep,
evaluationStarted,
startEvaluation,
currentStepLabel:
STEP_LABELS[EVALUATION_STEPS[currentStep]] || "Processing",
totalSteps: EVALUATION_STEPS.length,
};
};
|