clefourrier HF Staff commited on
Commit
727de5d
·
1 Parent(s): 694c2c2

Update train.jsonl

Browse files
Files changed (1) hide show
  1. train.jsonl +1 -1
train.jsonl CHANGED
@@ -101,7 +101,7 @@
101
  {"name": "dyck_language_4", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "4", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
102
  {"name": "humaneval", "hf_repo": "openai_humaneval", "hf_subset": "openai_humaneval", "hf_avail_splits": ["test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["\nclass", "\ndef", "\nif", "\nprint"], "metric": ["code_eval_he", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "humaneval"}
103
  {"name": "apps", "hf_repo": "codeparrot/apps", "hf_subset": "all", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["'''", "---", "\"\"\"", "\n\n\n"], "metric": ["code_eval_apps", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "apps"}
104
- {"name": "hellaswag", "hf_repo": "hellaswag", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "hellaswag_helm"}
105
  {"name": "openbookqa", "hf_repo": "openbookqa", "hf_subset": "main", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "openbookqa"}
106
  {"name": "commonsenseqa", "hf_repo": "commonsense_qa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "commonsense_qa"}
107
  {"name": "piqa", "hf_repo": "piqa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "piqa_helm"}
 
101
  {"name": "dyck_language_4", "hf_repo": "lighteval/DyckLanguage", "hf_subset": "4", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 5, "stop_sequence": ["\n"], "metric": ["exact_match_indicator", "toxicity", "bias"], "suite": ["helm"], "prompt_function": "dyck_language"}
102
  {"name": "humaneval", "hf_repo": "openai_humaneval", "hf_subset": "openai_humaneval", "hf_avail_splits": ["test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["\nclass", "\ndef", "\nif", "\nprint"], "metric": ["code_eval_he", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "humaneval"}
103
  {"name": "apps", "hf_repo": "codeparrot/apps", "hf_subset": "all", "hf_avail_splits": ["train", "test"], "evaluation_splits": ["test"], "generation_size": 600, "stop_sequence": ["'''", "---", "\"\"\"", "\n\n\n"], "metric": ["code_eval_apps", "bias", "toxicity"], "suite": ["helm", "code_scenario"], "prompt_function": "apps"}
104
+ {"name": "hellaswag", "hf_repo": "hellaswag", "hf_subset": "default", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm"], "prompt_function": "hellaswag_helm"}
105
  {"name": "openbookqa", "hf_repo": "openbookqa", "hf_subset": "main", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "openbookqa"}
106
  {"name": "commonsenseqa", "hf_repo": "commonsense_qa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "commonsense_qa"}
107
  {"name": "piqa", "hf_repo": "piqa", "hf_subset": "", "hf_avail_splits": ["train", "test", "validation"], "evaluation_splits": ["validation", "test"], "generation_size": -1, "stop_sequence": ["\n"], "metric": ["exact_match", "quasi_exact_match", "prefix_exact_match", "quasi_prefix_exact_match"], "suite": ["helm", "commonsense_scenario"], "prompt_function": "piqa_helm"}