|
from dataclasses import dataclass |
|
from enum import Enum |
|
|
|
@dataclass |
|
class Task: |
|
benchmark: str |
|
metric: str |
|
col_name: str |
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
task0 = Task("anli_r1", "acc", "ANLI") |
|
task1 = Task("logiqa", "acc_norm", "LogiQA") |
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">Align-Anything</h1>""" |
|
|
|
|
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
# Align-Anything |
|
Align-Anything aims to align any modality large models (any-to-any models), including LLMs, VLMs, and others, with human intentions and values. |
|
More details about the definition and milestones of alignment for Large Models can be found in AI Alignment. |
|
""" |
|
|
|
|
|
LLM_BENCHMARKS_TEXT = f""" |
|
""" |
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
""" |
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
CITATION_BUTTON_TEXT = """ |
|
@article{Align-Anything, |
|
title={ALIGN ANYTHING: TRAINING ALL MODALITY MODELS TO FOLLOW INSTRUCTIONS WITH UNIFIED LANGUAGE FEEDBACK}, |
|
author={Xuyao Wang and Jiayi Zhou and Jiaming Ji and Yaodong Yang}, |
|
journal={arXiv preprint arXiv:2411.20343}, |
|
eprint={2411.20343}, |
|
eprinttype = {arXiv}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
ABOUT_TEXT = """ |
|
""" |