Quazim0t0 commited on
Commit
86876da
·
verified ·
1 Parent(s): c23f2ac

Update benchmark_selection.py

Browse files
Files changed (1) hide show
  1. benchmark_selection.py +73 -11
benchmark_selection.py CHANGED
@@ -78,13 +78,33 @@ class BenchmarkSelector:
78
  # Format results
79
  results = []
80
  for dataset in datasets:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  results.append({
82
  "id": dataset.id,
83
  "name": dataset.id.split("/")[-1],
84
- "author": dataset.author,
85
- "description": dataset.description[:200] + "..." if dataset.description and len(dataset.description) > 200 else dataset.description,
86
- "tags": dataset.tags,
87
- "downloads": dataset.downloads
88
  })
89
 
90
  return results
@@ -106,18 +126,43 @@ class BenchmarkSelector:
106
  dataset_info = self.hf_api.dataset_info(dataset_id)
107
 
108
  # Get available configurations
109
- configs = get_dataset_config_names(dataset_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  # Format result
112
  result = {
113
  "id": dataset_info.id,
114
  "name": dataset_info.id.split("/")[-1],
115
- "author": dataset_info.author,
116
- "description": dataset_info.description,
117
- "citation": dataset_info.citation,
118
  "configs": configs,
119
- "tags": dataset_info.tags,
120
- "downloads": dataset_info.downloads
121
  }
122
 
123
  return result
@@ -351,6 +396,9 @@ def create_benchmark_selection_ui(benchmark_selector, auth_manager):
351
 
352
  with gr.Row():
353
  refresh_benchmarks_button = gr.Button("Refresh Benchmarks")
 
 
 
354
 
355
  benchmarks_container = gr.Column()
356
  with benchmarks_container:
@@ -470,6 +518,14 @@ def create_benchmark_selection_ui(benchmark_selector, auth_manager):
470
 
471
  return gr.update(visible=False), gr.update(visible=True), formatted_benchmarks
472
 
 
 
 
 
 
 
 
 
473
  # Connect event handlers
474
  search_button.click(
475
  fn=search_datasets_handler,
@@ -501,6 +557,12 @@ def create_benchmark_selection_ui(benchmark_selector, auth_manager):
501
  outputs=[no_benchmarks_message, my_benchmarks, my_benchmarks]
502
  )
503
 
 
 
 
 
 
 
504
  # Initialize benchmarks on load
505
  benchmark_ui.load(
506
  fn=get_benchmarks_handler,
@@ -508,4 +570,4 @@ def create_benchmark_selection_ui(benchmark_selector, auth_manager):
508
  outputs=[no_benchmarks_message, my_benchmarks, my_benchmarks]
509
  )
510
 
511
- return benchmark_ui
 
78
  # Format results
79
  results = []
80
  for dataset in datasets:
81
+ # Handle cases where description might be missing
82
+ dataset_description = ""
83
+ if hasattr(dataset, 'description') and dataset.description:
84
+ dataset_description = dataset.description[:200] + "..." if len(dataset.description) > 200 else dataset.description
85
+
86
+ # Handle cases where tags might be missing
87
+ dataset_tags = []
88
+ if hasattr(dataset, 'tags'):
89
+ dataset_tags = dataset.tags
90
+
91
+ # Handle cases where downloads might be missing
92
+ dataset_downloads = 0
93
+ if hasattr(dataset, 'downloads'):
94
+ dataset_downloads = dataset.downloads
95
+
96
+ # Handle cases where author might be missing
97
+ dataset_author = ""
98
+ if hasattr(dataset, 'author'):
99
+ dataset_author = dataset.author
100
+
101
  results.append({
102
  "id": dataset.id,
103
  "name": dataset.id.split("/")[-1],
104
+ "author": dataset_author,
105
+ "description": dataset_description,
106
+ "tags": dataset_tags,
107
+ "downloads": dataset_downloads
108
  })
109
 
110
  return results
 
126
  dataset_info = self.hf_api.dataset_info(dataset_id)
127
 
128
  # Get available configurations
129
+ configs = []
130
+ try:
131
+ configs = get_dataset_config_names(dataset_id)
132
+ except Exception as e:
133
+ print(f"Error getting dataset configs: {e}")
134
+
135
+ # Handle missing attributes safely
136
+ dataset_description = ""
137
+ if hasattr(dataset_info, 'description'):
138
+ dataset_description = dataset_info.description
139
+
140
+ dataset_citation = ""
141
+ if hasattr(dataset_info, 'citation'):
142
+ dataset_citation = dataset_info.citation
143
+
144
+ dataset_tags = []
145
+ if hasattr(dataset_info, 'tags'):
146
+ dataset_tags = dataset_info.tags
147
+
148
+ dataset_downloads = 0
149
+ if hasattr(dataset_info, 'downloads'):
150
+ dataset_downloads = dataset_info.downloads
151
+
152
+ dataset_author = ""
153
+ if hasattr(dataset_info, 'author'):
154
+ dataset_author = dataset_info.author
155
 
156
  # Format result
157
  result = {
158
  "id": dataset_info.id,
159
  "name": dataset_info.id.split("/")[-1],
160
+ "author": dataset_author,
161
+ "description": dataset_description,
162
+ "citation": dataset_citation,
163
  "configs": configs,
164
+ "tags": dataset_tags,
165
+ "downloads": dataset_downloads
166
  }
167
 
168
  return result
 
396
 
397
  with gr.Row():
398
  refresh_benchmarks_button = gr.Button("Refresh Benchmarks")
399
+ reload_sample_benchmarks_button = gr.Button("Reload Sample Benchmarks", variant="secondary")
400
+
401
+ reload_status = gr.Markdown("")
402
 
403
  benchmarks_container = gr.Column()
404
  with benchmarks_container:
 
518
 
519
  return gr.update(visible=False), gr.update(visible=True), formatted_benchmarks
520
 
521
+ def reload_sample_benchmarks_handler():
522
+ try:
523
+ from sample_benchmarks import add_sample_benchmarks
524
+ num_added = add_sample_benchmarks()
525
+ return f"✅ Successfully reloaded {num_added} sample benchmarks."
526
+ except Exception as e:
527
+ return f"❌ Error reloading benchmarks: {str(e)}"
528
+
529
  # Connect event handlers
530
  search_button.click(
531
  fn=search_datasets_handler,
 
557
  outputs=[no_benchmarks_message, my_benchmarks, my_benchmarks]
558
  )
559
 
560
+ reload_sample_benchmarks_button.click(
561
+ fn=reload_sample_benchmarks_handler,
562
+ inputs=[],
563
+ outputs=[reload_status]
564
+ )
565
+
566
  # Initialize benchmarks on load
567
  benchmark_ui.load(
568
  fn=get_benchmarks_handler,
 
570
  outputs=[no_benchmarks_message, my_benchmarks, my_benchmarks]
571
  )
572
 
573
+ return benchmark_ui