rohansampath commited on
Commit
b5e28ab
·
verified ·
1 Parent(s): 90b1ba7

Update mmlu_pro_eval_adapted.py

Browse files
Files changed (1) hide show
  1. mmlu_pro_eval_adapted.py +1 -2
mmlu_pro_eval_adapted.py CHANGED
@@ -18,7 +18,6 @@ import pandas as pd
18
  import numpy as np
19
 
20
  logging.basicConfig(level=logging.INFO)
21
- logger = logging.getLogger(__name__)
22
 
23
  # Can be found at https://github.com/TIGER-AI-Lab/MMLU-Pro/blob/main/cot_prompt_lib/initial_prompt.txt
24
  initial_prompt = "The following are multiple choice questions (with answers) about {$}. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
@@ -59,7 +58,7 @@ def load_model(model_name, gpu_utilization=0.8):
59
  tensor_parallel_size=torch.cuda.device_count(),
60
  max_model_len=max_model_length,
61
  trust_remote_code=True)
62
- logger.info(f"Torch Device CUDA Count: {torch.cuda.device_count()}")
63
  sampling_params = SamplingParams(temperature=0, max_tokens=max_new_tokens,
64
  stop=["Question:"])
65
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
 
18
  import numpy as np
19
 
20
  logging.basicConfig(level=logging.INFO)
 
21
 
22
  # Can be found at https://github.com/TIGER-AI-Lab/MMLU-Pro/blob/main/cot_prompt_lib/initial_prompt.txt
23
  initial_prompt = "The following are multiple choice questions (with answers) about {$}. Think step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice."
 
58
  tensor_parallel_size=torch.cuda.device_count(),
59
  max_model_len=max_model_length,
60
  trust_remote_code=True)
61
+ logging.info(f"Torch Device CUDA Count: {torch.cuda.device_count()}")
62
  sampling_params = SamplingParams(temperature=0, max_tokens=max_new_tokens,
63
  stop=["Question:"])
64
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)