SondosMB commited on
Commit
9f7748a
·
verified ·
1 Parent(s): 104bf5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -20
app.py CHANGED
@@ -194,20 +194,6 @@ def clean_answer(answer):
194
  return clean[0].upper()
195
  return None
196
 
197
- def update_leaderboard(results):
198
- new_entry = {
199
- "Model Name": results['model_name'],
200
- "Overall Accuracy": round(results['overall_accuracy'] * 100, 2),
201
- "Valid Accuracy": round(results['valid_accuracy'] * 100, 2),
202
- "Correct Predictions": results['correct_predictions'],
203
- "Total Questions": results['total_questions'],
204
- "Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
205
- }
206
- leaderboard_df = pd.DataFrame([new_entry])
207
- if os.path.exists(LEADERBOARD_FILE):
208
- existing_df = pd.read_csv(LEADERBOARD_FILE)
209
- leaderboard_df = pd.concat([existing_df, leaderboard_df], ignore_index=True)
210
- leaderboard_df.to_csv(LEADERBOARD_FILE, index=False)
211
 
212
  def evaluate_predictions(prediction_file):
213
  ground_truth_file = "ground_truth.csv"
@@ -245,18 +231,50 @@ def evaluate_predictions(prediction_file):
245
  except Exception as e:
246
  return f"Error during evaluation: {str(e)}"
247
 
248
- def load_leaderboard():
249
- if not os.path.exists(LEADERBOARD_FILE):
250
- return pd.DataFrame({"Message": ["Leaderboard is empty."]})
251
- return pd.read_csv(LEADERBOARD_FILE)
252
 
253
  # Build Gradio App
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  def load_leaderboard():
 
 
 
256
  if not os.path.exists(LEADERBOARD_FILE):
257
- return pd.DataFrame({"Message": ["Leaderboard is empty."]})
258
- print("Loading leaderboard data...")
 
 
 
 
 
 
259
  return pd.read_csv(LEADERBOARD_FILE)
 
 
260
  def evaluate_predictions_and_update_leaderboard(prediction_file):
261
  """
262
  Evaluate predictions and update the leaderboard.
 
194
  return clean[0].upper()
195
  return None
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  def evaluate_predictions(prediction_file):
199
  ground_truth_file = "ground_truth.csv"
 
231
  except Exception as e:
232
  return f"Error during evaluation: {str(e)}"
233
 
 
 
 
 
234
 
235
  # Build Gradio App
236
 
237
+ def update_leaderboard(results):
238
+ """
239
+ Update the leaderboard file with new results.
240
+ """
241
+ new_entry = {
242
+ "Model Name": results['model_name'],
243
+ "Overall Accuracy": round(results['overall_accuracy'] * 100, 2),
244
+ "Valid Accuracy": round(results['valid_accuracy'] * 100, 2),
245
+ "Correct Predictions": results['correct_predictions'],
246
+ "Total Questions": results['total_questions'],
247
+ "Timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
248
+ }
249
+
250
+ # Convert new entry to DataFrame
251
+ new_entry_df = pd.DataFrame([new_entry])
252
+
253
+ # Append to leaderboard file
254
+ if not os.path.exists(LEADERBOARD_FILE):
255
+ # If file does not exist, create it with headers
256
+ new_entry_df.to_csv(LEADERBOARD_FILE, index=False)
257
+ else:
258
+ # Append without headers
259
+ new_entry_df.to_csv(LEADERBOARD_FILE, mode='a', index=False, header=False)
260
+
261
+
262
  def load_leaderboard():
263
+ """
264
+ Load the leaderboard from the leaderboard file.
265
+ """
266
  if not os.path.exists(LEADERBOARD_FILE):
267
+ return pd.DataFrame({
268
+ "Model Name": [],
269
+ "Overall Accuracy": [],
270
+ "Valid Accuracy": [],
271
+ "Correct Predictions": [],
272
+ "Total Questions": [],
273
+ "Timestamp": [],
274
+ })
275
  return pd.read_csv(LEADERBOARD_FILE)
276
+
277
+
278
  def evaluate_predictions_and_update_leaderboard(prediction_file):
279
  """
280
  Evaluate predictions and update the leaderboard.