{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Processing an existing dataset for long-form question answering to filter out overly long answers." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Cut off value for the maximum number of tokens in the answer\n", "MAX_TOKENS_ANSWER = 512" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "DatasetDict({\n", " train: Dataset({\n", " features: ['question', 'answer', 'context'],\n", " num_rows: 226147\n", " })\n", " validation: Dataset({\n", " features: ['question', 'answer', 'context'],\n", " num_rows: 3020\n", " })\n", "})" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import datasets\n", "\n", "# Long-form question answering dataset, nicely preprocessed already.\n", "# Similar to ELI5: https://facebookresearch.github.io/ELI5/index.html (which is unavailable now)\n", "dataset_lfqa = datasets.load_dataset(\"LLukas22/lfqa_preprocessed\")\n", "dataset_lfqa" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", " | question | \n", "answer | \n", "context | \n", "
---|