text
stringlengths 15
129
|
---|
dataset:shareAI/DPO-zh-en-emoji [22] |
dataset:FartLabs/FartDB [32] |
dataset:OEvortex/SentimentSynth [33] |
dataset:MITLL/LADI-v2-dataset [10] |
dataset:liswei/Taiwan-Text-Excellence-2B [33] |
dataset:BUAADreamer/llava-med-zh-instruct-60k [47, 16] |
dataset:AIR-Bench/qa_finance_en [34] |
dataset:ajibawa-2023/Maths-College [33, 40, 22] |
dataset:m-a-p/Matrix [33] |
dataset:spanish-ir/messirve [34] |
dataset:liswei/PromptPair-TW [33, 43, 26] |
dataset:joshuachou/SkinCAP [16] |
dataset:efederici/capybara-claude-15k-ita [22, 33] |
dataset:Zery/BS-Objaverse [13, 47, 22] |
dataset:mxersion/SE-Chatting.en [22, 43] |
dataset:Mitsua/wikidata-parallel-descriptions-en-ja [43] |
dataset:JetBrains/KStack [33] |
dataset:Vi-VLM/Vista [47] |
dataset:HPAI-BSC/medqa-cot [20, 22] |
dataset:tomg-group-umd/cinepile [47, 46] |
dataset:CortexLM/midjourney-v6 [37] |
dataset:atlasia/darija_english [43] |
dataset:HachiML/alpaca_jp_python [33] |
dataset:tdolega/rag-tge_finetuning-dataset_pl [33] |
dataset:aiana94/polynews [8, 33] |
dataset:cenfis/alpaca-turkish-combined [33] |
dataset:llamafactory/alpaca_gpt4_en [33, 22] |
dataset:llamafactory/alpaca_gpt4_zh [33, 22] |
dataset:cajcodes/political-bias [32] |
dataset:TAUR-Lab/MuSR [22] |
dataset:Kukedlc/Big-Spanish-1.2M [33] |
dataset:machinelearnear/multiturn_chat_milei_gpt [22, 40, 33] |
dataset:openbmb/RLAIF-V-Dataset [47] |
dataset:nguyennghia0902/project02_textming_dataset [22] |
dataset:BUAADreamer/llava-en-zh-2k [47] |
dataset:ifmain/text-moderation-410K [32] |
dataset:CarrotAI/ko-instruction-dataset [33] |
dataset:openfoodfacts/spellcheck-dataset [40] |
dataset:Zihao-Li/IEA_Energy_Dataset [33] |
dataset:suyash2739/News_Hinglish_English [43] |
dataset:myrkur/persian-alpaca-deep-clean [33, 26, 42] |
dataset:SenseLLM/ReflectionSeq-GPT [33] |
dataset:SenseLLM/ReflectionSeq-DS [33] |
dataset:kaist-ai/Multifaceted-Collection-DPO [33] |
dataset:kaist-ai/Multifaceted-Collection-ORPO [33] |
dataset:DevQuasar/llm_router_dataset-synth [42] |
dataset:hkust-nlp/dart-math-hard [33] |
dataset:hkust-nlp/dart-math-uniform [33] |
dataset:nyu-visionx/Cambrian-10M [47, 22] |
dataset:efederici/evol-dpo-ita [40] |
dataset:SpursgoZmy/MMTab [27, 33] |
dataset:remyxai/vqasynth_spacellava [47] |
dataset:tasksource/doc-nli [32] |
dataset:damerajee/hindi_VQA [47] |
dataset:NuclearAi/Nuke-X-Glaive-Python-Dataset [22, 33, 40] |
dataset:UCSC-VLAA/Recap-DataComp-1B [49, 34, 16, 37] |
dataset:neoneye/base64-decode-v2 [43] |
dataset:mlabonne/orpo-dpo-mix-40k-flat [33] |
dataset:deepvk/ru-HNP [7] |
dataset:BestWishYsh/ChronoMagic [39] |
dataset:OmniAICreator/Japanese-Roleplay-Dialogues [33] |
dataset:MushanW/GLOBE [36, 4, 3, 1] |
dataset:EvanTHU/MoVid [22] |
dataset:BleachNick/UltraEdit [37] |
dataset:gretelai/synthetic_pii_finance_multilingual [32, 8, 42] |
dataset:Omartificial-Intelligence-Space/Arabic-NLi-Pair-Class [25] |
dataset:FBK-MT/Speech-MASSIVE [1, 32, 49, 4] |
dataset:deepvk/ru-WANLI [7] |
dataset:allenai/tulu-2.5-preference-data [33, 23] |
dataset:Ateeqq/Amazon-Product-Description [40] |
dataset:ReliableAI/Irish-Text-Collection [33] |
dataset:benjleite/FairytaleQA-translated-ptBR [22, 33] |
dataset:benjleite/FairytaleQA-translated-french [22, 33] |
dataset:sorry-bench/sorry-bench-202406 [33, 22, 40] |
dataset:Salesforce/xlam-function-calling-60k [22, 33, 23] |
dataset:AIDC-AI/Ovis-dataset [47] |
dataset:parler-tts/mls-eng-speaker-descriptions [4, 38, 36] |
dataset:parler-tts/libritts-r-filtered-speaker-descriptions [38] |
dataset:parler-tts/libritts_r_filtered [38, 4] |
dataset:tomg-group-umd/pixelprose [16, 37, 47] |
dataset:maxidl/FineNews-unfiltered [33] |
dataset:Msobhi/virgool_62k [8, 33, 32] |
dataset:OEvortex/EmotionalIntelligence-10K [33] |
dataset:alibayram/doktorsitesi [33, 32, 27] |
dataset:cmarkea/doc-vqa [47] |
dataset:dipawidia/ecommerce-product-reviews-sentiment [32] |
dataset:tomaarsen/gooaq-hard-negatives [7, 25] |
dataset:msu-ceco/agxqa_v1 [22] |
dataset:BestWishYsh/ChronoMagic-Pro [39] |
dataset:Hypersniper/unity_api_2022_3 [33] |
dataset:chenghao/sec-material-contracts-qa-splitted [47, 22, 6] |
dataset:yukiarimo/english-vocabulary [32, 42] |
dataset:Panoramax/fr_road_sign_subsign [21] |
dataset:CaptionEmporium/coyo-hd-11m-llavanext [37, 16] |
dataset:sorry-bench/sorry-bench-human-judgment-202406 [32, 33] |
dataset:espnet/mms_ulab_v2 [3, 1] |
dataset:werty1248/Korean-1930-Novel-Scene-Summarize [26] |
dataset:FreedomIntelligence/PubMedVision [22, 33] |
dataset:Magpie-Align/Magpie-Qwen2-Pro-200K-Chinese [22] |
dataset:BestWishYsh/ChronoMagic-Bench [39] |
Subsets and Splits