/** * * Copyright 2023-2025 InspectorRAGet Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * **/ 'use client'; import { isEmpty } from 'lodash'; import DOMPurify from 'dompurify'; import parse from 'html-react-parser'; import { useMemo, useState } from 'react'; import { Tabs, TabList, Tab, TabPanels, TabPanel, Button, ContainedList, ContainedListItem, } from '@carbon/react'; import { TextHighlight, WarningAlt } from '@carbon/icons-react'; import { Model, StringMatchObject, TaskEvaluation, RetrievedDocument, Task, Metric, RetrievedDocumentAnnotation, } from '@/src/types'; import { useDataStore } from '@/src/store'; import { truncate, overlaps } from '@/src/utilities/strings'; import { mark } from '@/src/utilities/highlighter'; import DocumentPanel from '@/src/views/document/DocumentPanel'; import AnnotationsTable from '@/src/views/annotations-table/AnnotationsTable'; import RAGTaskCopierModal from '@/src/components/task-copier/RAGTaskCopier'; import classes from './RAGTask.module.scss'; // =================================================================================== // TYPES // =================================================================================== interface Props { task: Task; models: Map; metrics: Metric[]; taskCopierModalOpen: boolean; setTaskCopierModalOpen: Function; updateCommentProvenance: Function; } // =================================================================================== // MAIN FUNCTION // =================================================================================== export default function RAGTask({ task, models, metrics, taskCopierModalOpen, setTaskCopierModalOpen, updateCommentProvenance, }: Props) { // Step 1: Initialize state and necessary variables const [selectedEvaluationIndex, setSelectedEvaluationIndex] = useState(0); const [showOverlap, setShowOverlap] = useState(false); const [activeDocumentIndex, setActiveDocumentIndex] = useState(0); // Step 2: Run effects // Step 2.a: Fetch data from data store const { item: data } = useDataStore(); // Step 2.b: Fetch documents and evaluations const [documentsPerEvaluation, evaluations] = useMemo(() => { // Step 2.b.i: Initialize necessary variables const contextsPerEvaluation: RetrievedDocument[][] = []; // Step 2.b.i: Fetch evaluations let taskEvaluations: TaskEvaluation[] | undefined = undefined; if (data) { taskEvaluations = data.evaluations.filter( (evaluation) => evaluation.taskId === task.taskId, ); // Step 2.b.i.*: Identify context document for each evaluation and compute context-response overlap and add to evaluation object taskEvaluations.forEach((evaluation) => { const contextDocuments: RetrievedDocument[] = []; const contexts = evaluation.contexts ? evaluation.contexts : task.contexts ? task.contexts : []; if (!isEmpty(contexts)) { contexts.forEach((context, contextIdx) => { if (data.documents) { const referenceDocument = data.documents.find( (document) => document.documentId === context.documentId, ); if (referenceDocument) { // Step 2.b.i.*: Fetch context relevant annotations, if present if ( task?.annotations && task.annotations.hasOwnProperty('context_relevance') ) { const documentAnnotation: RetrievedDocumentAnnotation = { text: 'Relevant', authors: [], color: 'green', }; for (const [annotator, annotations] of Object.entries( task.annotations.context_relevance, )) { if ( Array.isArray(annotations) && annotations.includes(contextIdx) ) { documentAnnotation.authors.push(annotator); } } if (!isEmpty(documentAnnotation.authors)) { referenceDocument.annotations = [documentAnnotation]; } } contextDocuments.push(referenceDocument); } else { contextDocuments.push({ documentId: context.documentId, text: 'Missing document text', }); } } else { contextDocuments.push({ documentId: context.documentId, text: 'Missing document text', }); } }); } // Compute context-response overlap and add to evaluation object const textOverlaps: StringMatchObject[][] = []; contextDocuments.forEach((contextDocument) => { textOverlaps.push( overlaps(evaluation.modelResponse, contextDocument.text), ); }); evaluation.overlaps = textOverlaps; // Add context documents contextsPerEvaluation.push(contextDocuments); }); } return [contextsPerEvaluation, taskEvaluations]; }, [task.taskId, task.contexts, data]); // Step 2.c: Build human & algorithmic metric maps const [hMetrics, aMetrics] = useMemo(() => { const humanMetrics = new Map( metrics ?.filter((metric) => metric.author === 'human') .map((metric) => [metric.name, metric]), ); const algorithmicMetrics = new Map( metrics ?.filter((metric) => metric.author === 'algorithm') .map((metric) => [metric.name, metric]), ); return [humanMetrics, algorithmicMetrics]; }, [metrics]); // Step 3: Render return ( <> {models && metrics && task && evaluations && ( { setTaskCopierModalOpen(false); }} > )} {task && models && evaluations && ( <>
1 ? classes.conversationContainer : classes.questionContainer } > {typeof task.input === 'string' ? ( <>

Question

{ updateCommentProvenance('input'); }} onMouseUp={() => updateCommentProvenance('input')} > {task.input}
) : Array.isArray(task.input) ? ( <>

Conversation

{ updateCommentProvenance('input'); }} onMouseUp={() => updateCommentProvenance('input')} > {task.input.map((utterance, idx) => ( {utterance.speaker.charAt(0).toUpperCase() + utterance.speaker.slice(1).toLowerCase()} : {utterance.text} ))}
) : typeof task.input === 'string' ? ( <>

Input

{ updateCommentProvenance('input'); }} onMouseUp={() => updateCommentProvenance('input')} > {task.input}
) : null}
{documentsPerEvaluation && (

Contexts

{showOverlap && (
 marks text assumed to be copied from context into model response
)}
{isEmpty(documentsPerEvaluation[selectedEvaluationIndex]) ? (
No context is available
) : ( { return { documentId: document.documentId, text: showOverlap ? mark( document.text, evaluations[selectedEvaluationIndex].overlaps[ documentIdx ], 'target', ) : document.text, ...(document.title && { title: document.title }), ...(document.url && { url: document.url }), ...(document.annotations && { annotations: document.annotations, }), }; })} onMouseDown={(provenance: string) => { updateCommentProvenance(provenance); }} onMouseUp={(provenance: string) => updateCommentProvenance(provenance) } notify={(documentIndex: number) => { setActiveDocumentIndex(documentIndex); }} /> )}
)}
{ setSelectedEvaluationIndex(e.selectedIndex); setActiveDocumentIndex(0); }} > {evaluations.map((evaluation) => ( {truncate( models.get(evaluation.modelId)?.name || evaluation.modelId, 15, )} ))} {evaluations.map((evaluation, evaluationIdx) => (
Model:
{models.get(evaluation.modelId)?.name || evaluation.modelId}
{ updateCommentProvenance( `${evaluation.modelId}::evaluation::response`, ); }} onMouseUp={() => updateCommentProvenance( `${evaluation.modelId}::evaluation::response`, ) } > {parse( DOMPurify.sanitize( showOverlap && evaluationIdx === selectedEvaluationIndex ? mark( evaluation.modelResponse, evaluation.overlaps[activeDocumentIndex], 'source', ) : evaluation.modelResponse, ), )}
{task.targets && !isEmpty(task.targets) ? ( {task.targets.length > 1 ? ( task.targets.map((target, targetIdx) => target.text ? ( Target {targetIdx + 1}: {target.text} ) : null, ) ) : ( {task.targets[0].text} )} ) : null} {evaluation.annotations && hMetrics.size ? ( <>
Human Evaluations:
) : null} {evaluation.annotations && aMetrics.size ? ( <>
Algorithmic Evaluations:
) : null}
))}
)} ); }