Fang Yunhao commited on
Commit
0922465
·
1 Parent(s): 944f8d2
Files changed (1) hide show
  1. evaluation.py +58 -20
evaluation.py CHANGED
@@ -8,7 +8,7 @@ from collections import defaultdict
8
  PROMPT_TEMPLATES = {
9
  "instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.",
10
  "physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".',
11
- "commonsense": 'Does the video exhibit \'{commonsense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".',
12
  }
13
 
14
  QUESTION_POOL = {
@@ -20,14 +20,16 @@ QUESTION_POOL = {
20
  "Violation of Non-physical Penetration: Objects unnaturally pass through each other.",
21
  "Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.",
22
  ],
23
- "commonsense": [
24
  "Poor Aesthetics: Visually unappealing or low-quality content.",
25
  "Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.",
26
  ],
27
  }
28
 
29
  if __name__ == "__main__":
30
- parser = argparse.ArgumentParser(description="Script for evaluating the WorldModelBenchmark.")
 
 
31
  parser.add_argument(
32
  "--judge",
33
  type=str,
@@ -43,7 +45,9 @@ if __name__ == "__main__":
43
  type=str,
44
  help="Path to save evaluation results.",
45
  )
46
- parser.add_argument("--cot", action="store_true", help="Enable or disable Chain-of-Thought output.")
 
 
47
  args = parser.parse_args()
48
 
49
  validation_set = load("./worldmodelbench.json")
@@ -70,7 +74,7 @@ if __name__ == "__main__":
70
  continue
71
  video = llava.Video(video)
72
  ## Traverse criterions
73
- for k in ["instruction", "physical_laws", "commonsense"]:
74
  preds_i = []
75
  prompt_template = PROMPT_TEMPLATES[k]
76
  qs = QUESTION_POOL[k]
@@ -78,42 +82,76 @@ if __name__ == "__main__":
78
  accs_i = []
79
  for q in qs:
80
  if k == "physical_laws":
81
- text_prompt = prompt_template.format(physical_laws=q.lower())
 
 
82
  else:
83
- text_prompt = prompt_template.format(commonsense=q.lower())
84
  if not args.cot:
85
  text_prompt = text_prompt.replace(
86
- "Let's think step-by-step and conclude with", "Answer with"
87
- ).replace("Let's analyze step-by-step and conclude with", "Answer with")
 
 
 
 
88
  pred = model.generate_content([video, text_prompt])
89
  preds_i.append(pred)
90
  ## Always ask for violations, so a "No" is preferred!
91
  accs_i.append("no" in pred.lower())
92
- accs[k].append(np.mean(accs_i))
93
  else:
94
- text_prompt = prompt_template.format(instruction=v_i["text_instruction"])
 
 
95
  if not args.cot:
96
  text_prompt = text_prompt.replace(
97
  "Let's think step-by-step and conclude with", "Answer with"
98
- ).replace("Let's analyze step-by-step and conclude with", "Answer with")
 
 
 
99
  pred = model.generate_content([video, text_prompt])
100
  preds_i.append(pred)
101
  try:
102
  score = float(pred.split(":")[-1].strip(" ."))
103
  except:
104
  score = 0
105
- accs[k].append(score / 3)
106
  if video_name not in preds:
107
  preds[video_name] = dict()
108
  preds[video_name][k] = preds_i
 
 
 
 
109
  ## Print results
 
 
110
  for k, v in accs.items():
111
- if isinstance(v, list):
112
- print(f"{k} accuracy: {np.mean(v) * 100}%.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  else:
114
- print(f"{k} accuracy: {v}%.")
115
- ## Save results
116
- if results is None:
117
- results = {"preds": preds, "accs": accs}
118
- dump(results, f"./{args.save_name}.json", indent=4)
119
 
 
8
  PROMPT_TEMPLATES = {
9
  "instruction": "Evaluate if this video follows the instruction: '{instruction}'. Use the following scoring criteria:\n\n- 0: The video does not follow the instruction at all.\n- 1: The video includes the correct object but performs the wrong action, or vice versa.\n- 2: The video follows the instruction and shows a tendency toward the intended action but does not fully achieve the goal.\n- 3: The video follows the instruction precisely and successfully achieves the intended goal.\n\nLet's analyze step-by-step and conclude with 'Score: [score]'.",
10
  "physical_laws": 'Watch the video and determine if it shows any \'{physical_laws}\' Let\'s think step-by-step and conclude with "Yes" or "No".',
11
+ "common_sense": 'Does the video exhibit \'{common_sense}\'? Let\'s think step-by-step and conclude with "Yes" or "No".',
12
  }
13
 
14
  QUESTION_POOL = {
 
20
  "Violation of Non-physical Penetration: Objects unnaturally pass through each other.",
21
  "Violation of Gravity: Objects behave inconsistently with gravity, such as floating in the air.",
22
  ],
23
+ "common_sense": [
24
  "Poor Aesthetics: Visually unappealing or low-quality content.",
25
  "Temporal Inconsistency: Noticeable flickering, choppy motion, or abrupt appearance/disappearance of irrelevant objects.",
26
  ],
27
  }
28
 
29
  if __name__ == "__main__":
30
+ parser = argparse.ArgumentParser(
31
+ description="Script for evaluating the WorldModelBenchmark."
32
+ )
33
  parser.add_argument(
34
  "--judge",
35
  type=str,
 
45
  type=str,
46
  help="Path to save evaluation results.",
47
  )
48
+ parser.add_argument(
49
+ "--cot", action="store_true", help="Enable or disable Chain-of-Thought output."
50
+ )
51
  args = parser.parse_args()
52
 
53
  validation_set = load("./worldmodelbench.json")
 
74
  continue
75
  video = llava.Video(video)
76
  ## Traverse criterions
77
+ for k in ["instruction", "physical_laws", "common_sense"]:
78
  preds_i = []
79
  prompt_template = PROMPT_TEMPLATES[k]
80
  qs = QUESTION_POOL[k]
 
82
  accs_i = []
83
  for q in qs:
84
  if k == "physical_laws":
85
+ text_prompt = prompt_template.format(
86
+ physical_laws=q.lower()
87
+ )
88
  else:
89
+ text_prompt = prompt_template.format(common_sense=q.lower())
90
  if not args.cot:
91
  text_prompt = text_prompt.replace(
92
+ "Let's think step-by-step and conclude with",
93
+ "Answer with",
94
+ ).replace(
95
+ "Let's analyze step-by-step and conclude with",
96
+ "Answer with",
97
+ )
98
  pred = model.generate_content([video, text_prompt])
99
  preds_i.append(pred)
100
  ## Always ask for violations, so a "No" is preferred!
101
  accs_i.append("no" in pred.lower())
102
+ accs[k].extend(accs_i)
103
  else:
104
+ text_prompt = prompt_template.format(
105
+ instruction=v_i["text_instruction"]
106
+ )
107
  if not args.cot:
108
  text_prompt = text_prompt.replace(
109
  "Let's think step-by-step and conclude with", "Answer with"
110
+ ).replace(
111
+ "Let's analyze step-by-step and conclude with",
112
+ "Answer with",
113
+ )
114
  pred = model.generate_content([video, text_prompt])
115
  preds_i.append(pred)
116
  try:
117
  score = float(pred.split(":")[-1].strip(" ."))
118
  except:
119
  score = 0
120
+ accs[k].append(score)
121
  if video_name not in preds:
122
  preds[video_name] = dict()
123
  preds[video_name][k] = preds_i
124
+ ## Save results
125
+ # if results is None:
126
+ # results = {"preds": preds, "accs": accs}
127
+ # dump(results, f"./{args.save_name}.json", indent=4)
128
  ## Print results
129
+ num_insts = len(preds)
130
+ total_score = 0
131
  for k, v in accs.items():
132
+ print(k + " details:")
133
+ num_sub = len(v) // num_insts
134
+ if num_sub == 1:
135
+ print(f"-- overall score: {np.mean(v):.2f}.")
136
+ total_score += np.mean(v)
137
+ elif num_sub == 2:
138
+ sub_scores = []
139
+ for i, sub in enumerate(["framewise", "temporal"]):
140
+ print(f"-- {sub} score: {np.mean(v):.2f}.")
141
+ sub_scores.append(np.mean(v))
142
+ print(f"-- overall score: {np.mean(sub_scores):.2f}.")
143
+ total_score += np.mean(sub_scores)
144
+ elif num_sub == 5:
145
+ sub_scores = []
146
+ for i, sub in enumerate(
147
+ ["newton", "mass", "fluid", "penetration", "gravity"]
148
+ ):
149
+ print(f"-- {sub} score: {np.mean(v):.2f}.")
150
+ sub_scores.append(np.mean(v))
151
+ print(f"-- overall score: {np.mean(sub_scores):.2f}.")
152
+ total_score += np.mean(sub_scores)
153
  else:
154
+ raise ValueError("Unexpected number of subcategories!")
155
+
156
+ print(f"\ntotal score: {total_score:.2f}.")
 
 
157