Update app.py
Browse files
app.py
CHANGED
@@ -8,56 +8,30 @@ import re
|
|
8 |
|
9 |
from huggingface_hub import login
|
10 |
|
11 |
-
|
12 |
token = os.environ.get("HG_TOKEN")
|
13 |
login(token)
|
14 |
|
15 |
-
print("Loading dataset...")
|
16 |
try:
|
17 |
dataset = load_dataset("sudoping01/bambara-speech-recognition-benchmark", name="default")["eval"]
|
18 |
references = {row["id"]: row["text"] for row in dataset}
|
19 |
-
print(f"Successfully loaded dataset with {len(references)} samples")
|
20 |
except Exception as e:
|
21 |
-
print(f"Error loading dataset: {str(e)}")
|
22 |
references = {}
|
23 |
-
print("WARNING: Using empty references dictionary due to dataset loading error")
|
24 |
|
25 |
-
|
26 |
leaderboard_file = "leaderboard.csv"
|
27 |
if not os.path.exists(leaderboard_file):
|
28 |
-
# Create with Model_Name consistently
|
29 |
pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"]).to_csv(leaderboard_file, index=False)
|
30 |
-
print("Created new leaderboard file")
|
31 |
-
|
32 |
-
# Add example entries for first-time visitors
|
33 |
-
example_data = [
|
34 |
-
["Example Model 1", 0.35, 0.20, 0.305, "2023-01-01 00:00:00"],
|
35 |
-
["Example Model 2", 0.40, 0.18, 0.334, "2023-01-02 00:00:00"],
|
36 |
-
["Example Model 3", 0.32, 0.25, 0.299, "2023-01-03 00:00:00"]
|
37 |
-
]
|
38 |
-
example_df = pd.DataFrame(
|
39 |
-
example_data,
|
40 |
-
columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"]
|
41 |
-
)
|
42 |
-
example_df.to_csv(leaderboard_file, index=False)
|
43 |
-
print("Added example data to empty leaderboard for demonstration")
|
44 |
else:
|
45 |
-
# Load existing leaderboard
|
46 |
leaderboard_df = pd.read_csv(leaderboard_file)
|
47 |
|
48 |
-
|
49 |
-
if "submitter" in leaderboard_df.columns and "Model_Name" not in leaderboard_df.columns:
|
50 |
-
|
51 |
-
|
52 |
-
print("Renamed 'submitter' column to 'Model_Name' for consistency")
|
53 |
|
54 |
-
# Add Combined_Score column if it doesn't exist
|
55 |
if "Combined_Score" not in leaderboard_df.columns:
|
56 |
-
leaderboard_df["Combined_Score"] = leaderboard_df["WER"] * 0.7 + leaderboard_df["CER"] * 0.3
|
57 |
leaderboard_df.to_csv(leaderboard_file, index=False)
|
58 |
-
print("Added Combined_Score column to existing leaderboard")
|
59 |
-
|
60 |
-
print(f"Loaded existing leaderboard with {len(leaderboard_df)} entries")
|
61 |
|
62 |
def normalize_text(text):
|
63 |
"""
|
@@ -89,9 +63,7 @@ def calculate_metrics(predictions_df):
|
|
89 |
reference = normalize_text(references[id_val])
|
90 |
hypothesis = normalize_text(row["text"])
|
91 |
|
92 |
-
|
93 |
if not reference or not hypothesis:
|
94 |
-
print(f"Warning: Empty reference or hypothesis for ID {id_val}")
|
95 |
continue
|
96 |
|
97 |
reference_words = reference.split()
|
@@ -99,18 +71,15 @@ def calculate_metrics(predictions_df):
|
|
99 |
reference_chars = list(reference)
|
100 |
|
101 |
try:
|
102 |
-
|
103 |
sample_wer = wer(reference, hypothesis)
|
104 |
sample_cer = cer(reference, hypothesis)
|
105 |
|
106 |
sample_wer = min(sample_wer, 2.0)
|
107 |
sample_cer = min(sample_cer, 2.0)
|
108 |
|
109 |
-
|
110 |
total_ref_words += len(reference_words)
|
111 |
total_ref_chars += len(reference_chars)
|
112 |
|
113 |
-
|
114 |
results.append({
|
115 |
"id": id_val,
|
116 |
"reference": reference,
|
@@ -120,13 +89,13 @@ def calculate_metrics(predictions_df):
|
|
120 |
"wer": sample_wer,
|
121 |
"cer": sample_cer
|
122 |
})
|
123 |
-
except Exception
|
124 |
-
|
125 |
|
126 |
if not results:
|
127 |
raise ValueError("No valid samples for WER/CER calculation")
|
128 |
|
129 |
-
|
130 |
avg_wer = sum(item["wer"] for item in results) / len(results)
|
131 |
avg_cer = sum(item["cer"] for item in results) / len(results)
|
132 |
|
@@ -142,7 +111,7 @@ def update_ranking(method):
|
|
142 |
current_lb = pd.read_csv(leaderboard_file)
|
143 |
|
144 |
if "Combined_Score" not in current_lb.columns:
|
145 |
-
current_lb["Combined_Score"] = current_lb["WER"] * 0.7 + current_lb["CER"] * 0.3
|
146 |
|
147 |
if method == "WER Only":
|
148 |
return current_lb.sort_values("WER")
|
@@ -150,15 +119,12 @@ def update_ranking(method):
|
|
150 |
return current_lb.sort_values("CER")
|
151 |
else: # Combined Score
|
152 |
return current_lb.sort_values("Combined_Score")
|
153 |
-
except Exception
|
154 |
-
print(f"Error updating ranking: {str(e)}")
|
155 |
-
# Return empty dataframe if something goes wrong
|
156 |
return pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"])
|
157 |
|
158 |
def process_submission(model_name, csv_file):
|
159 |
try:
|
160 |
df = pd.read_csv(csv_file)
|
161 |
-
print(f"Processing submission from {model_name} with {len(df)} rows")
|
162 |
|
163 |
if len(df) == 0:
|
164 |
return "Error: Uploaded CSV is empty.", None
|
@@ -170,7 +136,7 @@ def process_submission(model_name, csv_file):
|
|
170 |
dup_ids = df[df["id"].duplicated()]["id"].unique()
|
171 |
return f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}", None
|
172 |
|
173 |
-
|
174 |
missing_ids = set(references.keys()) - set(df["id"])
|
175 |
extra_ids = set(df["id"]) - set(references.keys())
|
176 |
|
@@ -180,7 +146,7 @@ def process_submission(model_name, csv_file):
|
|
180 |
if extra_ids:
|
181 |
return f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}", None
|
182 |
|
183 |
-
|
184 |
try:
|
185 |
avg_wer, avg_cer, weighted_wer, weighted_cer, detailed_results = calculate_metrics(df)
|
186 |
|
@@ -194,7 +160,7 @@ def process_submission(model_name, csv_file):
|
|
194 |
leaderboard = pd.read_csv(leaderboard_file)
|
195 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
196 |
|
197 |
-
#
|
198 |
combined_score = avg_wer * 0.7 + avg_cer * 0.3
|
199 |
|
200 |
new_entry = pd.DataFrame(
|
@@ -210,23 +176,6 @@ def process_submission(model_name, csv_file):
|
|
210 |
except Exception as e:
|
211 |
return f"Error processing submission: {str(e)}", None
|
212 |
|
213 |
-
# Make sure we have at least some data for first-time visitors
|
214 |
-
if os.path.exists(leaderboard_file):
|
215 |
-
leaderboard_df = pd.read_csv(leaderboard_file)
|
216 |
-
if len(leaderboard_df) == 0:
|
217 |
-
# Add example entries if leaderboard is empty
|
218 |
-
example_data = [
|
219 |
-
["Example Model 1", 0.35, 0.20, 0.305, "2023-01-01 00:00:00"],
|
220 |
-
["Example Model 2", 0.40, 0.18, 0.334, "2023-01-02 00:00:00"],
|
221 |
-
["Example Model 3", 0.32, 0.25, 0.299, "2023-01-03 00:00:00"]
|
222 |
-
]
|
223 |
-
example_df = pd.DataFrame(
|
224 |
-
example_data,
|
225 |
-
columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"]
|
226 |
-
)
|
227 |
-
example_df.to_csv(leaderboard_file, index=False)
|
228 |
-
print("Added example data to empty leaderboard for demonstration")
|
229 |
-
|
230 |
with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
|
231 |
gr.Markdown(
|
232 |
"""
|
@@ -240,17 +189,13 @@ with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
|
|
240 |
with gr.Tabs() as tabs:
|
241 |
with gr.TabItem("🏅 Current Rankings"):
|
242 |
try:
|
243 |
-
# Load and make sure we have current leaderboard data
|
244 |
current_leaderboard = pd.read_csv(leaderboard_file)
|
245 |
|
246 |
if "Combined_Score" not in current_leaderboard.columns:
|
247 |
current_leaderboard["Combined_Score"] = current_leaderboard["WER"] * 0.7 + current_leaderboard["CER"] * 0.3
|
248 |
|
249 |
-
# Sort by combined score
|
250 |
current_leaderboard = current_leaderboard.sort_values("Combined_Score")
|
251 |
-
except Exception
|
252 |
-
print(f"Error loading leaderboard: {str(e)}")
|
253 |
-
# Create empty dataframe if we can't load the file
|
254 |
current_leaderboard = pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"])
|
255 |
|
256 |
gr.Markdown("### Current ASR Model Rankings")
|
@@ -310,7 +255,5 @@ with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
|
|
310 |
outputs=[output_msg, leaderboard_display]
|
311 |
)
|
312 |
|
313 |
-
print("Starting Bambara ASR Leaderboard app...")
|
314 |
-
|
315 |
if __name__ == "__main__":
|
316 |
demo.launch(share=True)
|
|
|
8 |
|
9 |
from huggingface_hub import login
|
10 |
|
|
|
11 |
token = os.environ.get("HG_TOKEN")
|
12 |
login(token)
|
13 |
|
|
|
14 |
try:
|
15 |
dataset = load_dataset("sudoping01/bambara-speech-recognition-benchmark", name="default")["eval"]
|
16 |
references = {row["id"]: row["text"] for row in dataset}
|
|
|
17 |
except Exception as e:
|
|
|
18 |
references = {}
|
|
|
19 |
|
20 |
+
|
21 |
leaderboard_file = "leaderboard.csv"
|
22 |
if not os.path.exists(leaderboard_file):
|
|
|
23 |
pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"]).to_csv(leaderboard_file, index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
else:
|
|
|
25 |
leaderboard_df = pd.read_csv(leaderboard_file)
|
26 |
|
27 |
+
|
28 |
+
# if "submitter" in leaderboard_df.columns and "Model_Name" not in leaderboard_df.columns:
|
29 |
+
# leaderboard_df = leaderboard_df.rename(columns={"submitter": "Model_Name"})
|
30 |
+
# leaderboard_df.to_csv(leaderboard_file, index=False)
|
|
|
31 |
|
|
|
32 |
if "Combined_Score" not in leaderboard_df.columns:
|
33 |
+
leaderboard_df["Combined_Score"] = leaderboard_df["WER"] * 0.7 + leaderboard_df["CER"] * 0.3
|
34 |
leaderboard_df.to_csv(leaderboard_file, index=False)
|
|
|
|
|
|
|
35 |
|
36 |
def normalize_text(text):
|
37 |
"""
|
|
|
63 |
reference = normalize_text(references[id_val])
|
64 |
hypothesis = normalize_text(row["text"])
|
65 |
|
|
|
66 |
if not reference or not hypothesis:
|
|
|
67 |
continue
|
68 |
|
69 |
reference_words = reference.split()
|
|
|
71 |
reference_chars = list(reference)
|
72 |
|
73 |
try:
|
|
|
74 |
sample_wer = wer(reference, hypothesis)
|
75 |
sample_cer = cer(reference, hypothesis)
|
76 |
|
77 |
sample_wer = min(sample_wer, 2.0)
|
78 |
sample_cer = min(sample_cer, 2.0)
|
79 |
|
|
|
80 |
total_ref_words += len(reference_words)
|
81 |
total_ref_chars += len(reference_chars)
|
82 |
|
|
|
83 |
results.append({
|
84 |
"id": id_val,
|
85 |
"reference": reference,
|
|
|
89 |
"wer": sample_wer,
|
90 |
"cer": sample_cer
|
91 |
})
|
92 |
+
except Exception:
|
93 |
+
pass
|
94 |
|
95 |
if not results:
|
96 |
raise ValueError("No valid samples for WER/CER calculation")
|
97 |
|
98 |
+
|
99 |
avg_wer = sum(item["wer"] for item in results) / len(results)
|
100 |
avg_cer = sum(item["cer"] for item in results) / len(results)
|
101 |
|
|
|
111 |
current_lb = pd.read_csv(leaderboard_file)
|
112 |
|
113 |
if "Combined_Score" not in current_lb.columns:
|
114 |
+
current_lb["Combined_Score"] = current_lb["WER"] * 0.7 + current_lb["CER"] * 0.3
|
115 |
|
116 |
if method == "WER Only":
|
117 |
return current_lb.sort_values("WER")
|
|
|
119 |
return current_lb.sort_values("CER")
|
120 |
else: # Combined Score
|
121 |
return current_lb.sort_values("Combined_Score")
|
122 |
+
except Exception:
|
|
|
|
|
123 |
return pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"])
|
124 |
|
125 |
def process_submission(model_name, csv_file):
|
126 |
try:
|
127 |
df = pd.read_csv(csv_file)
|
|
|
128 |
|
129 |
if len(df) == 0:
|
130 |
return "Error: Uploaded CSV is empty.", None
|
|
|
136 |
dup_ids = df[df["id"].duplicated()]["id"].unique()
|
137 |
return f"Error: Duplicate IDs found: {', '.join(map(str, dup_ids[:5]))}", None
|
138 |
|
139 |
+
|
140 |
missing_ids = set(references.keys()) - set(df["id"])
|
141 |
extra_ids = set(df["id"]) - set(references.keys())
|
142 |
|
|
|
146 |
if extra_ids:
|
147 |
return f"Error: Found {len(extra_ids)} extra IDs not in reference dataset. First few extra: {', '.join(map(str, list(extra_ids)[:5]))}", None
|
148 |
|
149 |
+
|
150 |
try:
|
151 |
avg_wer, avg_cer, weighted_wer, weighted_cer, detailed_results = calculate_metrics(df)
|
152 |
|
|
|
160 |
leaderboard = pd.read_csv(leaderboard_file)
|
161 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
162 |
|
163 |
+
# Calculate combined score (70% WER, 30% CER)
|
164 |
combined_score = avg_wer * 0.7 + avg_cer * 0.3
|
165 |
|
166 |
new_entry = pd.DataFrame(
|
|
|
176 |
except Exception as e:
|
177 |
return f"Error processing submission: {str(e)}", None
|
178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
with gr.Blocks(title="Bambara ASR Leaderboard") as demo:
|
180 |
gr.Markdown(
|
181 |
"""
|
|
|
189 |
with gr.Tabs() as tabs:
|
190 |
with gr.TabItem("🏅 Current Rankings"):
|
191 |
try:
|
|
|
192 |
current_leaderboard = pd.read_csv(leaderboard_file)
|
193 |
|
194 |
if "Combined_Score" not in current_leaderboard.columns:
|
195 |
current_leaderboard["Combined_Score"] = current_leaderboard["WER"] * 0.7 + current_leaderboard["CER"] * 0.3
|
196 |
|
|
|
197 |
current_leaderboard = current_leaderboard.sort_values("Combined_Score")
|
198 |
+
except Exception:
|
|
|
|
|
199 |
current_leaderboard = pd.DataFrame(columns=["Model_Name", "WER", "CER", "Combined_Score", "timestamp"])
|
200 |
|
201 |
gr.Markdown("### Current ASR Model Rankings")
|
|
|
255 |
outputs=[output_msg, leaderboard_display]
|
256 |
)
|
257 |
|
|
|
|
|
258 |
if __name__ == "__main__":
|
259 |
demo.launch(share=True)
|