Fang Yunhao commited on
Commit
bfd1f01
·
1 Parent(s): 73121ca
Files changed (1) hide show
  1. evaluation.py +205 -88
evaluation.py CHANGED
@@ -1,13 +1,17 @@
1
- from dataclasses import dataclass
2
  from enum import Enum
3
  from pathlib import Path
4
  from typing import Dict, List, Optional, Union
5
  import logging
6
  import os
7
-
 
 
 
 
 
8
  import numpy as np
9
  from mmengine import load, dump
10
- from tqdm import tqdm
11
  from collections import defaultdict
12
 
13
 
@@ -17,11 +21,9 @@ class EvaluationType(Enum):
17
  COMMON_SENSE = "common_sense"
18
 
19
 
20
- @dataclass
21
- class EvaluationConfig:
22
- """Configuration for evaluation prompts and scoring criteria."""
23
-
24
- PROMPT_TEMPLATES: Dict[str, str] = {
25
  EvaluationType.INSTRUCTION.value: """
26
  Evaluate if this video follows the instruction: '{instruction}'.
27
  Use the following scoring criteria:
@@ -33,17 +35,22 @@ class EvaluationConfig:
33
 
34
  Let's analyze step-by-step and conclude with 'Score: [score]'.
35
  """.strip(),
 
36
  EvaluationType.PHYSICAL_LAWS.value: """
37
  Watch the video and determine if it shows any '{physical_laws}'
38
  Let's think step-by-step and conclude with "Yes" or "No".
39
  """.strip(),
 
40
  EvaluationType.COMMON_SENSE.value: """
41
  Does the video exhibit '{common_sense}'?
42
  Let's think step-by-step and conclude with "Yes" or "No".
43
  """.strip(),
44
  }
45
 
46
- QUESTION_POOL: Dict[str, Optional[List[str]]] = {
 
 
 
47
  EvaluationType.INSTRUCTION.value: None,
48
  EvaluationType.PHYSICAL_LAWS.value: [
49
  "Violation of Newton's Law: Objects move without any external force.",
@@ -59,156 +66,266 @@ class EvaluationConfig:
59
  }
60
 
61
 
62
- class WorldModelEvaluator:
63
- """Evaluates world model benchmark videos using judge model."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
 
 
 
65
  def __init__(self, judge_path: str, video_dir: str, config: EvaluationConfig):
66
  self.judge = self._load_judge(judge_path)
67
  self.video_dir = Path(video_dir)
68
  self.config = config
69
  self.logger = logging.getLogger(__name__)
 
70
 
71
  @staticmethod
72
  def _load_judge(judge_path: str):
73
- """Load the LLaVA judge model."""
74
  import llava
75
-
76
  return llava.load(judge_path)
77
 
78
- def _load_video(self, video_name: str) -> Optional["llava.Video"]:
79
  """Load a video file for evaluation."""
80
  video_path = self.video_dir / f"{video_name}.mp4"
81
  if not video_path.exists():
82
  self.logger.warning(f"Video not found: {video_path}")
83
  return None
84
  import llava
85
-
86
  return llava.Video(str(video_path))
87
 
88
- def evaluate_video(
89
- self, video: "llava.Video", prompt: str, cot: bool = True
90
- ) -> str:
91
  """Generate evaluation content for a video."""
92
  if not cot:
93
  prompt = prompt.replace(
94
  "Let's think step-by-step and conclude with", "Answer with"
95
- ).replace("Let's analyze step-by-step and conclude with", "Answer with")
 
 
96
  return self.judge.generate_content([video, prompt])
97
 
98
  def process_results(self, preds: Dict, accs: defaultdict) -> float:
99
- """Process and print evaluation results."""
100
  num_insts = len(preds)
101
  total_score = 0
102
-
103
  category_mapping = {
104
  2: [("framewise", "temporal")],
105
- 5: [("newton", "mass", "fluid", "penetration", "gravity")],
106
  }
107
 
108
  for category, scores in accs.items():
109
- print(f"\n{category} details:")
110
  num_sub = len(scores) // num_insts
111
-
112
  if num_sub == 1:
113
- mean_score = np.mean(scores)
114
- print(f"-- overall score: {mean_score:.2f}")
115
- total_score += mean_score
116
  elif num_sub in category_mapping:
117
- sub_scores = []
118
  for i, sub in enumerate(category_mapping[num_sub][0]):
119
  sub_mean = np.mean(scores[i::num_sub])
120
- print(f"-- {sub} score: {sub_mean:.2f}")
121
- sub_scores.append(sub_mean)
122
- overall_mean = np.mean(sub_scores)
123
- print(f"-- overall score: {overall_mean:.2f}")
124
- total_score += overall_mean
 
 
 
 
 
 
 
125
  else:
126
  raise ValueError(f"Unexpected number of subcategories: {num_sub}")
127
 
 
128
  return total_score
129
 
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  def main():
132
  import argparse
133
-
134
  parser = argparse.ArgumentParser(description="Evaluate World Model Benchmark")
135
- parser.add_argument(
136
- "--judge", type=str, required=True, help="Path to judge model checkpoint"
137
- )
138
- parser.add_argument(
139
- "--video_dir", type=str, required=True, help="Path to generated video directory"
140
- )
141
- parser.add_argument(
142
- "--save_name", type=str, required=True, help="Path to save evaluation results"
143
- )
144
- parser.add_argument(
145
- "--cot", action="store_true", help="Enable Chain-of-Thought output"
146
- )
147
-
148
  args = parser.parse_args()
149
-
150
- # Setup logging
151
- logging.basicConfig(level=logging.INFO)
 
 
 
 
152
  logger = logging.getLogger(__name__)
153
 
154
  # Initialize evaluator
155
  config = EvaluationConfig()
156
  evaluator = WorldModelEvaluator(args.judge, args.video_dir, config)
157
-
158
- # Load validation set
 
 
159
  validation_set = load("./worldmodelbench.json")
160
-
161
  # Check for existing results
162
- save_path = f"{args.save_name}_cot" if args.cot else args.save_name
163
  if os.path.exists(save_path):
 
164
  results = load(save_path)
165
  try:
166
  preds, accs = results["preds"], results["accs"]
167
  except KeyError:
168
  raise KeyError("Expected keys not found in results file")
169
  else:
 
170
  preds = {}
171
  accs = defaultdict(list)
172
-
173
- for vid, v_i in tqdm(enumerate(validation_set), total=len(validation_set)):
174
- video_name = Path(v_i["first_frame"]).stem
175
- video = evaluator._load_video(video_name)
176
- if not video:
177
- continue
178
-
179
- for eval_type in EvaluationType:
180
- preds_i = []
181
- prompt_template = config.PROMPT_TEMPLATES[eval_type.value]
182
- questions = config.QUESTION_POOL[eval_type.value]
183
-
184
- if questions:
185
- accs_i = []
186
- for question in questions:
187
- format_kwargs = {f"{eval_type.value}": question.lower()}
188
- prompt = prompt_template.format(**format_kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  pred = evaluator.evaluate_video(video, prompt, args.cot)
190
  preds_i.append(pred)
191
- accs_i.append("no" in pred.lower())
192
- accs[eval_type.value].extend(accs_i)
193
- else:
194
- prompt = prompt_template.format(instruction=v_i["text_instruction"])
195
- pred = evaluator.evaluate_video(video, prompt, args.cot)
196
- preds_i.append(pred)
197
- try:
198
- score = float(pred.split(":")[-1].strip(" ."))
199
- except ValueError:
200
- logger.warning(f"Could not parse score from prediction: {pred}")
201
- score = 0
202
- accs[eval_type.value].append(score)
203
-
204
- if video_name not in preds:
205
- preds[video_name] = {}
206
- preds[video_name][eval_type.value] = preds_i
 
 
 
207
 
208
  # Process and display results
 
209
  total_score = evaluator.process_results(preds, accs)
210
- print(f"\nTotal score: {total_score:.2f}")
211
 
212
 
213
  if __name__ == "__main__":
214
  main()
 
 
1
+ from dataclasses import dataclass, field
2
  from enum import Enum
3
  from pathlib import Path
4
  from typing import Dict, List, Optional, Union
5
  import logging
6
  import os
7
+ from rich.console import Console
8
+ from rich.table import Table
9
+ from rich.panel import Panel
10
+ # from rich.progress import track
11
+ # from rich import print as rprint
12
+ from rich.progress import Progress, BarColumn, TimeRemainingColumn
13
  import numpy as np
14
  from mmengine import load, dump
 
15
  from collections import defaultdict
16
 
17
 
 
21
  COMMON_SENSE = "common_sense"
22
 
23
 
24
+ def get_default_prompt_templates() -> Dict[str, str]:
25
+ """Factory function for default prompt templates."""
26
+ return {
 
 
27
  EvaluationType.INSTRUCTION.value: """
28
  Evaluate if this video follows the instruction: '{instruction}'.
29
  Use the following scoring criteria:
 
35
 
36
  Let's analyze step-by-step and conclude with 'Score: [score]'.
37
  """.strip(),
38
+
39
  EvaluationType.PHYSICAL_LAWS.value: """
40
  Watch the video and determine if it shows any '{physical_laws}'
41
  Let's think step-by-step and conclude with "Yes" or "No".
42
  """.strip(),
43
+
44
  EvaluationType.COMMON_SENSE.value: """
45
  Does the video exhibit '{common_sense}'?
46
  Let's think step-by-step and conclude with "Yes" or "No".
47
  """.strip(),
48
  }
49
 
50
+
51
+ def get_default_question_pool() -> Dict[str, Optional[List[str]]]:
52
+ """Factory function for default question pool."""
53
+ return {
54
  EvaluationType.INSTRUCTION.value: None,
55
  EvaluationType.PHYSICAL_LAWS.value: [
56
  "Violation of Newton's Law: Objects move without any external force.",
 
66
  }
67
 
68
 
69
+ @dataclass
70
+ class EvaluationConfig:
71
+ """Configuration for evaluation prompts and scoring criteria."""
72
+ PROMPT_TEMPLATES: Dict[str, str] = field(default_factory=get_default_prompt_templates)
73
+ QUESTION_POOL: Dict[str, Optional[List[str]]] = field(default_factory=get_default_question_pool)
74
+
75
+
76
+ class ResultsPrinter:
77
+ """Handles formatted output of evaluation results."""
78
+
79
+ def __init__(self):
80
+ self.console = Console()
81
+
82
+ def print_header(self, text: str):
83
+ """Print a styled header."""
84
+ self.console.print(f"\n[bold blue]{text}[/bold blue]")
85
+
86
+ def print_score(self, category: str, score: float, indent: int = 0):
87
+ """Print a score with proper formatting."""
88
+ indent_str = " " * indent
89
+ self.console.print(f"{indent_str}[cyan]{category}:[/cyan] [yellow]{score:.2f}[/yellow]")
90
+
91
+ def create_results_table(self, category: str, scores: Dict[str, float]) -> Table:
92
+ """Create a rich table for displaying results."""
93
+ table = Table(title=f"{category} Results", show_header=True, header_style="bold magenta")
94
+ table.add_column("Metric", style="cyan")
95
+ table.add_column("Score", justify="right", style="yellow")
96
+
97
+ for metric, score in scores.items():
98
+ table.add_row(metric, f"{score:.2f}")
99
+
100
+ return table
101
+
102
+ def print_summary_panel(self, total_score: float, num_categories: int):
103
+ """Print a panel with summary information."""
104
+ panel = Panel(
105
+ f"[bold green]Total Score: {total_score:.2f}[/bold green]\n",
106
+ # f"[blue]Average per category: {total_score/num_categories:.2f}[/blue]",
107
+ title="Evaluation Summary",
108
+ border_style="green"
109
+ )
110
+ self.console.print(panel)
111
+
112
 
113
+ class WorldModelEvaluator:
114
+ """Evaluates world model benchmark videos using VILA model."""
115
+
116
  def __init__(self, judge_path: str, video_dir: str, config: EvaluationConfig):
117
  self.judge = self._load_judge(judge_path)
118
  self.video_dir = Path(video_dir)
119
  self.config = config
120
  self.logger = logging.getLogger(__name__)
121
+ self.printer = ResultsPrinter()
122
 
123
  @staticmethod
124
  def _load_judge(judge_path: str):
125
+ """Load the VILA judge model."""
126
  import llava
 
127
  return llava.load(judge_path)
128
 
129
+ def _load_video(self, video_name: str) -> Optional['llava.Video']:
130
  """Load a video file for evaluation."""
131
  video_path = self.video_dir / f"{video_name}.mp4"
132
  if not video_path.exists():
133
  self.logger.warning(f"Video not found: {video_path}")
134
  return None
135
  import llava
 
136
  return llava.Video(str(video_path))
137
 
138
+ def evaluate_video(self, video: 'llava.Video', prompt: str, cot: bool = True) -> str:
 
 
139
  """Generate evaluation content for a video."""
140
  if not cot:
141
  prompt = prompt.replace(
142
  "Let's think step-by-step and conclude with", "Answer with"
143
+ ).replace(
144
+ "Let's analyze step-by-step and conclude with", "Answer with"
145
+ )
146
  return self.judge.generate_content([video, prompt])
147
 
148
  def process_results(self, preds: Dict, accs: defaultdict) -> float:
149
+ """Process and print evaluation results with rich formatting."""
150
  num_insts = len(preds)
151
  total_score = 0
152
+
153
  category_mapping = {
154
  2: [("framewise", "temporal")],
155
+ 5: [("newton", "mass", "fluid", "penetration", "gravity")]
156
  }
157
 
158
  for category, scores in accs.items():
159
+ self.printer.print_header(f"{category.replace('_', ' ').title()} Details")
160
  num_sub = len(scores) // num_insts
161
+
162
  if num_sub == 1:
163
+ overall = np.mean(scores)
164
+ self.printer.print_score("Overall", overall)
165
+ total_score += overall
166
  elif num_sub in category_mapping:
167
+ sub_scores = {}
168
  for i, sub in enumerate(category_mapping[num_sub][0]):
169
  sub_mean = np.mean(scores[i::num_sub])
170
+ sub_scores[sub.title()] = sub_mean
171
+
172
+ # Create and display results table
173
+ table = self.printer.create_results_table(
174
+ category.replace('_', ' ').title(),
175
+ sub_scores
176
+ )
177
+ self.printer.console.print(table)
178
+
179
+ overall = np.sum(list(sub_scores.values()))
180
+ self.printer.print_score("Overall", overall, indent=2)
181
+ total_score += overall
182
  else:
183
  raise ValueError(f"Unexpected number of subcategories: {num_sub}")
184
 
185
+ self.printer.print_summary_panel(total_score, len(accs))
186
  return total_score
187
 
188
 
189
+ def save_results(results: Dict, save_path: str):
190
+ """Save evaluation results to a file."""
191
+ dump(results, save_path, indent=4)
192
+ Console().print(f"[green]Results saved to: {save_path}[/green]")
193
+
194
+ class RichLogHandler(logging.Handler):
195
+ """Custom logging handler that uses Rich for formatting."""
196
+ def __init__(self):
197
+ super().__init__()
198
+ self.console = Console()
199
+
200
+ def emit(self, record):
201
+ try:
202
+ msg = self.format(record)
203
+ style = "bold red" if record.levelno >= logging.WARNING else "blue"
204
+ self.console.print(f"[{style}]{msg}[/{style}]")
205
+ except Exception:
206
+ self.handleError(record)
207
+
208
  def main():
209
  import argparse
210
+
211
  parser = argparse.ArgumentParser(description="Evaluate World Model Benchmark")
212
+ parser.add_argument("--judge", type=str, required=True, help="Path to judge model checkpoint")
213
+ parser.add_argument("--video_dir", type=str, required=True, help="Path to generated video directory")
214
+ parser.add_argument("--save_name", type=str, required=True, help="Path to save evaluation results")
215
+ parser.add_argument("--cot", action="store_true", help="Enable Chain-of-Thought output")
216
+ parser.add_argument("--no-save", action="store_true", help="Disable saving results")
217
+
 
 
 
 
 
 
 
218
  args = parser.parse_args()
219
+
220
+ # Setup logging with custom Rich handler
221
+ logging.basicConfig(
222
+ level=logging.INFO,
223
+ format="%(message)s",
224
+ handlers=[RichLogHandler()]
225
+ )
226
  logger = logging.getLogger(__name__)
227
 
228
  # Initialize evaluator
229
  config = EvaluationConfig()
230
  evaluator = WorldModelEvaluator(args.judge, args.video_dir, config)
231
+ printer = ResultsPrinter()
232
+
233
+ # Load validation set with status message
234
+ printer.console.print("[bold]Loading validation set...[/bold]")
235
  validation_set = load("./worldmodelbench.json")
236
+
237
  # Check for existing results
238
+ save_path = f"{args.save_name}_cot.json" if args.cot else f"{args.save_name}.json"
239
  if os.path.exists(save_path):
240
+ printer.console.print("[bold yellow]Loading existing results...[/bold yellow]")
241
  results = load(save_path)
242
  try:
243
  preds, accs = results["preds"], results["accs"]
244
  except KeyError:
245
  raise KeyError("Expected keys not found in results file")
246
  else:
247
+ printer.console.print("[bold green]Starting new evaluation...[/bold green]")
248
  preds = {}
249
  accs = defaultdict(list)
250
+
251
+ # Create a single progress instance for all operations
252
+ with Progress(
253
+ "[progress.description]{task.description}",
254
+ BarColumn(),
255
+ "[progress.percentage]{task.percentage:>3.0f}%",
256
+ TimeRemainingColumn(),
257
+ console=printer.console
258
+ ) as progress:
259
+ # Main task for video processing
260
+ video_task = progress.add_task("Processing videos", total=len(validation_set))
261
+
262
+ for vid, v_i in enumerate(validation_set):
263
+ video_name = Path(v_i["first_frame"]).stem
264
+ video = evaluator._load_video(video_name)
265
+ if not video:
266
+ progress.advance(video_task)
267
+ continue
268
+
269
+ # Evaluation task
270
+ eval_task = progress.add_task(
271
+ f"Evaluating {video_name}",
272
+ total=len(EvaluationType)
273
+ )
274
+
275
+ for eval_type in EvaluationType:
276
+ preds_i = []
277
+ prompt_template = config.PROMPT_TEMPLATES[eval_type.value]
278
+ questions = config.QUESTION_POOL[eval_type.value]
279
+
280
+ if questions:
281
+ accs_i = []
282
+ # Questions task
283
+ question_task = progress.add_task(
284
+ f"Processing {eval_type.value} questions",
285
+ total=len(questions)
286
+ )
287
+
288
+ for question in questions:
289
+ format_kwargs = {
290
+ f"{eval_type.value}": question.lower()
291
+ }
292
+ prompt = prompt_template.format(**format_kwargs)
293
+ pred = evaluator.evaluate_video(video, prompt, args.cot)
294
+ preds_i.append(pred)
295
+ accs_i.append("no" in pred.lower())
296
+ progress.advance(question_task)
297
+
298
+ progress.remove_task(question_task)
299
+ accs[eval_type.value].extend(accs_i)
300
+ else:
301
+ prompt = prompt_template.format(instruction=v_i["text_instruction"])
302
  pred = evaluator.evaluate_video(video, prompt, args.cot)
303
  preds_i.append(pred)
304
+ try:
305
+ score = float(pred.split(":")[-1].strip(" ."))
306
+ except ValueError:
307
+ logger.warning(f"Could not parse score from prediction: {pred}")
308
+ score = 0
309
+ accs[eval_type.value].append(score)
310
+
311
+ if video_name not in preds:
312
+ preds[video_name] = {}
313
+ preds[video_name][eval_type.value] = preds_i
314
+ progress.advance(eval_task)
315
+
316
+ progress.remove_task(eval_task)
317
+ progress.advance(video_task)
318
+
319
+ # Save results if requested
320
+ if not args.no_save:
321
+ results = {"preds": preds, "accs": accs}
322
+ save_results(results, save_path)
323
 
324
  # Process and display results
325
+ printer.console.print("\n[bold]Final Evaluation Results[/bold]")
326
  total_score = evaluator.process_results(preds, accs)
 
327
 
328
 
329
  if __name__ == "__main__":
330
  main()
331
+