kpfadnis commited on
Commit
8bab137
·
1 Parent(s): 0a1d0ea

feat (model comparator): Enhanced control and added interactablity to single metric plot (#11)

Browse files

* feat (model comparator): Allow user to click on scatter plot to load task.
* feat (model comparator): Allow user to restrict data points based on range for numerical metrics.
---------

Signed-off-by: Kshitij Fadnis <[email protected]>

src/views/model-comparator/ModelComparator.module.scss CHANGED
@@ -30,12 +30,8 @@
30
  column-gap: $spacing-09;
31
  }
32
 
33
- .agreementSelector {
34
- width: 15%;
35
- }
36
-
37
  .modelSelector {
38
- width: 20%;
39
  }
40
 
41
  .metricSelector {
 
30
  column-gap: $spacing-09;
31
  }
32
 
 
 
 
 
33
  .modelSelector {
34
+ width: 15%;
35
  }
36
 
37
  .metricSelector {
src/views/model-comparator/ModelComparator.tsx CHANGED
@@ -20,8 +20,8 @@
20
 
21
  import { countBy, isEmpty } from 'lodash';
22
  import cx from 'classnames';
23
- import { useState, useMemo, useEffect } from 'react';
24
- import { Tile, Button } from '@carbon/react';
25
  import { WarningAlt } from '@carbon/icons-react';
26
  import { ScatterChart } from '@carbon/charts-react';
27
 
@@ -34,6 +34,7 @@ import {
34
  } from '@/src/utilities/metrics';
35
  import { calculateFisherRandomization } from '@/src/utilities/significance';
36
  import { areObjectsIntersecting } from '@/src/utilities/objects';
 
37
 
38
  import Filters from '@/src/components/filters/Filters';
39
  import TasksTable from '@/src/views/tasks-table/TasksTable';
@@ -54,6 +55,7 @@ type StatisticalInformation = {
54
  meanA: number;
55
  distributionB: number[];
56
  meanB: number;
 
57
  };
58
 
59
  interface Props {
@@ -90,6 +92,7 @@ function extractEvaluationsPerTask(
90
  modelB: Model,
91
  metric: string,
92
  selectedFilters: { [key: string]: string[] },
 
93
  ) {
94
  // Step 1: Initiaze necessary variable
95
  const modelEvaluationsPerTask: { [key: string]: TaskEvaluation[] } = {};
@@ -115,8 +118,16 @@ function extractEvaluationsPerTask(
115
  });
116
 
117
  // Step 3: Retain only those task which has evaluations for both models
 
118
  return Object.values(modelEvaluationsPerTask).filter(
119
- (entry) => entry.length == 2,
 
 
 
 
 
 
 
120
  );
121
  }
122
 
@@ -136,6 +147,7 @@ function runStatisticalSignificanceTest(
136
  modelB: Model,
137
  selectedMetric: Metric | undefined,
138
  selectedFilters: { [key: string]: string[] },
 
139
  ) {
140
  // Step 1: Initialize necessary variables
141
  const evaluationsPerMetricPerTask: { [key: string]: TaskEvaluation[][] } = {};
@@ -148,7 +160,9 @@ function runStatisticalSignificanceTest(
148
  modelB,
149
  selectedMetric.name,
150
  selectedFilters,
 
151
  );
 
152
  if (evaluationsPerTask.length !== 0) {
153
  evaluationsPerMetricPerTask[selectedMetric.name] = evaluationsPerTask;
154
  }
@@ -160,18 +174,25 @@ function runStatisticalSignificanceTest(
160
  modelB,
161
  metric,
162
  selectedFilters,
 
163
  );
164
  if (evaluationsPerTask.length !== 0) {
165
  evaluationsPerMetricPerTask[metric] = evaluationsPerTask;
166
  }
167
  });
168
  }
 
169
  // Step 3: Compute model value distribution for every metric
170
  const distributionA: { [key: string]: number[] } = {};
171
  const distributionB: { [key: string]: number[] } = {};
 
172
 
173
  Object.keys(evaluationsPerMetricPerTask).forEach((metric) => {
174
  const metricValues = metrics.find((entry) => entry.name === metric)?.values;
 
 
 
 
175
  distributionA[metric] = evaluationsPerMetricPerTask[metric].map((entry) =>
176
  castToNumber(
177
  entry[0].modelId === modelA.modelId
@@ -204,6 +225,7 @@ function runStatisticalSignificanceTest(
204
  meanA: meanA,
205
  distributionB: distributionB[metric],
206
  meanB: meanB,
 
207
  };
208
  });
209
 
@@ -218,22 +240,26 @@ function prepareScatterPlotData(
218
  distributionA: number[],
219
  modelB: string,
220
  distributionB: number[],
 
221
  ) {
222
  if (distributionA.length !== distributionB.length) {
223
  return [];
224
  }
225
 
226
  // Step 2: Collate model wise predictions per task
227
- const distributions: number[][] = [];
228
  distributionA.forEach((valueA, index) => {
229
- distributions.push([valueA, distributionB[index]]);
 
 
 
230
  });
231
 
232
  // Step 3: Primary sort based on model A's value
233
- distributions.sort((a, b) => a[0] - b[0]);
234
 
235
  // Step 4: Scondary sort based on Model B's value
236
- distributions.sort((a, b) => a[1] - b[1]);
237
 
238
  // Step 5: Prepare chart data
239
  const chartData: { [key: string]: string | number }[] = [];
@@ -242,14 +268,16 @@ function prepareScatterPlotData(
242
  chartData.push({
243
  group: modelA,
244
  key: idx,
245
- value: entry[0],
 
246
  });
247
 
248
  // Model B record
249
  chartData.push({
250
  group: modelB,
251
  key: idx,
252
- value: entry[1],
 
253
  });
254
  });
255
 
@@ -300,6 +328,7 @@ function renderResult(
300
  statisticalInformationPerMetric[metric.name].distributionA,
301
  modelB.name,
302
  statisticalInformationPerMetric[metric.name].distributionB,
 
303
  )}
304
  options={{
305
  axes: {
@@ -316,7 +345,7 @@ function renderResult(
316
  castToNumber(metric.maxValue?.value || 4, metric.values),
317
  ],
318
  }),
319
- title: 'Scores',
320
  },
321
  bottom: {
322
  mapsTo: 'key',
@@ -375,6 +404,8 @@ export default function ModelComparator({
375
  const [statisticalInformationPerMetric, setStatisticalInformationPerMetric] =
376
  useState<{ [key: string]: StatisticalInformation } | undefined>(undefined);
377
  const [modelColors, modelOrder] = getModelColorPalette(models);
 
 
378
 
379
  // Step 2: Run effects
380
  // Step 2.a: Window resizing
@@ -411,18 +442,33 @@ export default function ModelComparator({
411
  return [hMetrics, aMetrics];
412
  }, [metrics]);
413
 
414
- // Step 2.d: Identify visible evaluations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
  const filteredEvaluations = useMemo(() => {
416
  if (selectedMetric) {
417
  // Step 1: Identify evaluations for selected models
418
  const evaluationsForSelectedModels = evaluationsPerMetric[
419
  selectedMetric.name
420
- ].filter((evaluation) =>
421
- (evaluation.modelId === modelA.modelId ||
422
- evaluation.modelId === modelB.modelId) &&
423
- !isEmpty(selectedFilters)
424
- ? areObjectsIntersecting(selectedFilters, evaluation)
425
- : true,
 
426
  );
427
 
428
  // Step 2: Collate evaluation per task id
@@ -442,10 +488,21 @@ export default function ModelComparator({
442
  });
443
 
444
  // Step 3: Only select evaluation tasks where models aggregate values differe
 
445
  const visibleEvaluationTaskIds = Object.keys(evaluationsPerTask).filter(
446
  (taskId) =>
447
  Object.keys(countBy(Object.values(evaluationsPerTask[taskId])))
448
- .length > 1,
 
 
 
 
 
 
 
 
 
 
449
  );
450
 
451
  // Step 4: Return evaluations for selected evaluation tasks where models aggregate values differe
@@ -454,14 +511,20 @@ export default function ModelComparator({
454
  );
455
  }
456
  return [];
457
- }, [evaluationsPerMetric, selectedMetric, modelA, modelB]);
458
-
459
- // Step 2.e: Reset statistical information, if either of model changes or filters are changed
 
 
 
 
 
 
460
  useEffect(() => {
461
  setStatisticalInformationPerMetric(undefined);
462
  }, [modelA, modelB, selectedFilters]);
463
 
464
- // Step 2.f: Recalculate statistical information, if metric changes
465
  useEffect(() => {
466
  if (
467
  !selectedMetric &&
@@ -476,12 +539,30 @@ export default function ModelComparator({
476
  modelB,
477
  selectedMetric,
478
  selectedFilters,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
  ),
480
  );
481
  }
482
- }, [selectedMetric]);
483
 
484
- // Step 2.f: Compute computation complexity
485
  const complexity = useMemo(() => {
486
  let size = 0;
487
  if (selectedMetric) {
@@ -498,6 +579,31 @@ export default function ModelComparator({
498
  return 'low';
499
  }, [evaluationsPerMetric, selectedMetric]);
500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
  // Step 3: Render
502
  return (
503
  <div className={classes.page}>
@@ -546,6 +652,50 @@ export default function ModelComparator({
546
  warnText={'You must select a single metric to view tasks. '}
547
  />
548
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
  <div className={classes.calculateBtn}>
550
  <Button
551
  onClick={() => {
@@ -558,6 +708,7 @@ export default function ModelComparator({
558
  modelB,
559
  selectedMetric,
560
  selectedFilters,
 
561
  ),
562
  );
563
  }}
@@ -646,7 +797,7 @@ export default function ModelComparator({
646
  ) ? (
647
  <div className={classes.row}>
648
  <div
649
- key={'statisticalInformation-metric-' + selectedMetric.name}
650
  className={classes.performanceInformation}
651
  >
652
  <h5>
@@ -678,6 +829,7 @@ export default function ModelComparator({
678
  </div>
679
  </Tile>
680
  <ScatterChart
 
681
  data={prepareScatterPlotData(
682
  modelA.name,
683
  statisticalInformationPerMetric[selectedMetric.name]
@@ -685,6 +837,8 @@ export default function ModelComparator({
685
  modelB.name,
686
  statisticalInformationPerMetric[selectedMetric.name]
687
  .distributionB,
 
 
688
  )}
689
  options={{
690
  axes: {
@@ -710,7 +864,7 @@ export default function ModelComparator({
710
  ),
711
  ],
712
  }),
713
- title: 'Scores',
714
  },
715
  bottom: {
716
  mapsTo: 'key',
 
20
 
21
  import { countBy, isEmpty } from 'lodash';
22
  import cx from 'classnames';
23
+ import { useState, useMemo, useEffect, useRef } from 'react';
24
+ import { Tile, Button, Slider } from '@carbon/react';
25
  import { WarningAlt } from '@carbon/icons-react';
26
  import { ScatterChart } from '@carbon/charts-react';
27
 
 
34
  } from '@/src/utilities/metrics';
35
  import { calculateFisherRandomization } from '@/src/utilities/significance';
36
  import { areObjectsIntersecting } from '@/src/utilities/objects';
37
+ import { hash } from '@/src/utilities/strings';
38
 
39
  import Filters from '@/src/components/filters/Filters';
40
  import TasksTable from '@/src/views/tasks-table/TasksTable';
 
55
  meanA: number;
56
  distributionB: number[];
57
  meanB: number;
58
+ taskIds?: string[];
59
  };
60
 
61
  interface Props {
 
92
  modelB: Model,
93
  metric: string,
94
  selectedFilters: { [key: string]: string[] },
95
+ selectedMetricRange?: number[],
96
  ) {
97
  // Step 1: Initiaze necessary variable
98
  const modelEvaluationsPerTask: { [key: string]: TaskEvaluation[] } = {};
 
118
  });
119
 
120
  // Step 3: Retain only those task which has evaluations for both models
121
+ // and one or more models have aggregate value in the selected range
122
  return Object.values(modelEvaluationsPerTask).filter(
123
+ (entry) =>
124
+ entry.length == 2 &&
125
+ (selectedMetricRange
126
+ ? (entry[0][`${metric}_agg`].value >= selectedMetricRange[0] &&
127
+ entry[0][`${metric}_agg`].value <= selectedMetricRange[1]) ||
128
+ (entry[1][`${metric}_agg`].value >= selectedMetricRange[0] &&
129
+ entry[1][`${metric}_agg`].value <= selectedMetricRange[1])
130
+ : true),
131
  );
132
  }
133
 
 
147
  modelB: Model,
148
  selectedMetric: Metric | undefined,
149
  selectedFilters: { [key: string]: string[] },
150
+ selectedMetricRange?: number[],
151
  ) {
152
  // Step 1: Initialize necessary variables
153
  const evaluationsPerMetricPerTask: { [key: string]: TaskEvaluation[][] } = {};
 
160
  modelB,
161
  selectedMetric.name,
162
  selectedFilters,
163
+ selectedMetricRange,
164
  );
165
+
166
  if (evaluationsPerTask.length !== 0) {
167
  evaluationsPerMetricPerTask[selectedMetric.name] = evaluationsPerTask;
168
  }
 
174
  modelB,
175
  metric,
176
  selectedFilters,
177
+ selectedMetricRange,
178
  );
179
  if (evaluationsPerTask.length !== 0) {
180
  evaluationsPerMetricPerTask[metric] = evaluationsPerTask;
181
  }
182
  });
183
  }
184
+
185
  // Step 3: Compute model value distribution for every metric
186
  const distributionA: { [key: string]: number[] } = {};
187
  const distributionB: { [key: string]: number[] } = {};
188
+ const taskIds: { [key: string]: string[] } = {};
189
 
190
  Object.keys(evaluationsPerMetricPerTask).forEach((metric) => {
191
  const metricValues = metrics.find((entry) => entry.name === metric)?.values;
192
+ taskIds[metric] = evaluationsPerMetricPerTask[metric].map(
193
+ (entry) => entry[0].taskId,
194
+ );
195
+
196
  distributionA[metric] = evaluationsPerMetricPerTask[metric].map((entry) =>
197
  castToNumber(
198
  entry[0].modelId === modelA.modelId
 
225
  meanA: meanA,
226
  distributionB: distributionB[metric],
227
  meanB: meanB,
228
+ taskIds: taskIds[metric],
229
  };
230
  });
231
 
 
240
  distributionA: number[],
241
  modelB: string,
242
  distributionB: number[],
243
+ taskIds?: string[],
244
  ) {
245
  if (distributionA.length !== distributionB.length) {
246
  return [];
247
  }
248
 
249
  // Step 2: Collate model wise predictions per task
250
+ const distributions: { values: number[]; taskId: string }[] = [];
251
  distributionA.forEach((valueA, index) => {
252
+ distributions.push({
253
+ taskId: taskIds ? taskIds[index] : `${index}`,
254
+ values: [valueA, distributionB[index]],
255
+ });
256
  });
257
 
258
  // Step 3: Primary sort based on model A's value
259
+ distributions.sort((a, b) => a.values[0] - b.values[0]);
260
 
261
  // Step 4: Scondary sort based on Model B's value
262
+ distributions.sort((a, b) => a.values[1] - b.values[1]);
263
 
264
  // Step 5: Prepare chart data
265
  const chartData: { [key: string]: string | number }[] = [];
 
268
  chartData.push({
269
  group: modelA,
270
  key: idx,
271
+ value: entry.values[0],
272
+ ...(taskIds && { taskId: entry.taskId }),
273
  });
274
 
275
  // Model B record
276
  chartData.push({
277
  group: modelB,
278
  key: idx,
279
+ value: entry.values[1],
280
+ ...(taskIds && { taskId: entry.taskId }),
281
  });
282
  });
283
 
 
328
  statisticalInformationPerMetric[metric.name].distributionA,
329
  modelB.name,
330
  statisticalInformationPerMetric[metric.name].distributionB,
331
+ statisticalInformationPerMetric[metric.name].taskIds,
332
  )}
333
  options={{
334
  axes: {
 
345
  castToNumber(metric.maxValue?.value || 4, metric.values),
346
  ],
347
  }),
348
+ title: extractMetricDisplayName(metric),
349
  },
350
  bottom: {
351
  mapsTo: 'key',
 
404
  const [statisticalInformationPerMetric, setStatisticalInformationPerMetric] =
405
  useState<{ [key: string]: StatisticalInformation } | undefined>(undefined);
406
  const [modelColors, modelOrder] = getModelColorPalette(models);
407
+ const [selectedMetricRange, setSelectedMetricRange] = useState<number[]>();
408
+ const chartRef = useRef(null);
409
 
410
  // Step 2: Run effects
411
  // Step 2.a: Window resizing
 
442
  return [hMetrics, aMetrics];
443
  }, [metrics]);
444
 
445
+ // Step 2.d: Reset selected metric range, only applicable for numerical metrics
446
+ useEffect(() => {
447
+ if (
448
+ selectedMetric &&
449
+ selectedMetric.type === 'numerical' &&
450
+ selectedMetric.range
451
+ ) {
452
+ setSelectedMetricRange([
453
+ selectedMetric.range[0],
454
+ selectedMetric.range[1],
455
+ ]);
456
+ } else setSelectedMetricRange(undefined);
457
+ }, [selectedMetric]);
458
+
459
+ // Step 2.e: Identify visible evaluations
460
  const filteredEvaluations = useMemo(() => {
461
  if (selectedMetric) {
462
  // Step 1: Identify evaluations for selected models
463
  const evaluationsForSelectedModels = evaluationsPerMetric[
464
  selectedMetric.name
465
+ ].filter(
466
+ (evaluation) =>
467
+ (evaluation.modelId === modelA.modelId ||
468
+ evaluation.modelId === modelB.modelId) &&
469
+ (!isEmpty(selectedFilters)
470
+ ? areObjectsIntersecting(selectedFilters, evaluation)
471
+ : true),
472
  );
473
 
474
  // Step 2: Collate evaluation per task id
 
488
  });
489
 
490
  // Step 3: Only select evaluation tasks where models aggregate values differe
491
+ // and one or more models have aggregate value in the selected range
492
  const visibleEvaluationTaskIds = Object.keys(evaluationsPerTask).filter(
493
  (taskId) =>
494
  Object.keys(countBy(Object.values(evaluationsPerTask[taskId])))
495
+ .length > 1 &&
496
+ (selectedMetricRange
497
+ ? (Object.values(evaluationsPerTask[taskId])[0] >=
498
+ selectedMetricRange[0] &&
499
+ Object.values(evaluationsPerTask[taskId])[0] <=
500
+ selectedMetricRange[1]) ||
501
+ (Object.values(evaluationsPerTask[taskId])[1] >=
502
+ selectedMetricRange[0] &&
503
+ Object.values(evaluationsPerTask[taskId])[1] <=
504
+ selectedMetricRange[1])
505
+ : true),
506
  );
507
 
508
  // Step 4: Return evaluations for selected evaluation tasks where models aggregate values differe
 
511
  );
512
  }
513
  return [];
514
+ }, [
515
+ evaluationsPerMetric,
516
+ selectedMetric,
517
+ modelA,
518
+ modelB,
519
+ selectedMetricRange,
520
+ ]);
521
+
522
+ // Step 2.f: Reset statistical information, if either of model changes or filters are changed
523
  useEffect(() => {
524
  setStatisticalInformationPerMetric(undefined);
525
  }, [modelA, modelB, selectedFilters]);
526
 
527
+ // Step 2.g: Recalculate statistical information, if metric changes
528
  useEffect(() => {
529
  if (
530
  !selectedMetric &&
 
539
  modelB,
540
  selectedMetric,
541
  selectedFilters,
542
+ selectedMetricRange,
543
+ ),
544
+ );
545
+ } else if (
546
+ selectedMetric &&
547
+ selectedMetricRange &&
548
+ statisticalInformationPerMetric &&
549
+ statisticalInformationPerMetric.hasOwnProperty(selectedMetric.name)
550
+ ) {
551
+ setStatisticalInformationPerMetric(
552
+ runStatisticalSignificanceTest(
553
+ evaluationsPerMetric,
554
+ metrics,
555
+ modelA,
556
+ modelB,
557
+ selectedMetric,
558
+ selectedFilters,
559
+ selectedMetricRange,
560
  ),
561
  );
562
  }
563
+ }, [selectedMetric, selectedMetricRange]);
564
 
565
+ // Step 2.h: Compute computation complexity
566
  const complexity = useMemo(() => {
567
  let size = 0;
568
  if (selectedMetric) {
 
579
  return 'low';
580
  }, [evaluationsPerMetric, selectedMetric]);
581
 
582
+ // Step 2.i: Add chart event
583
+ useEffect(() => {
584
+ if (chartRef && chartRef.current) {
585
+ //@ts-ignore
586
+ chartRef.current.chart.services.events.addEventListener(
587
+ 'scatter-click',
588
+ ({ detail }) => {
589
+ onTaskSelection(detail.datum.taskId);
590
+ },
591
+ );
592
+ }
593
+
594
+ return () => {
595
+ if (chartRef && chartRef.current) {
596
+ //@ts-ignore
597
+ chartRef.current.chart.services.events.removeEventListener(
598
+ 'scatter-click',
599
+ ({ detail }) => {
600
+ onTaskSelection(detail.datum.taskId);
601
+ },
602
+ );
603
+ }
604
+ };
605
+ }, [chartRef, selectedMetric, statisticalInformationPerMetric]);
606
+
607
  // Step 3: Render
608
  return (
609
  <div className={classes.page}>
 
652
  warnText={'You must select a single metric to view tasks. '}
653
  />
654
  </div>
655
+ {selectedMetric &&
656
+ selectedMetric.type === 'numerical' &&
657
+ selectedMetric.range ? (
658
+ <div>
659
+ <Slider
660
+ ariaLabelInput="Lower bound"
661
+ unstable_ariaLabelInputUpper="Upper bound"
662
+ labelText={`${extractMetricDisplayName(selectedMetric)} Range`}
663
+ value={
664
+ selectedMetricRange
665
+ ? selectedMetricRange[0]
666
+ : selectedMetric.range[0]
667
+ }
668
+ unstable_valueUpper={
669
+ selectedMetricRange
670
+ ? selectedMetricRange[1]
671
+ : selectedMetric.range[1]
672
+ }
673
+ min={selectedMetric.range[0]}
674
+ max={selectedMetric.range[1]}
675
+ step={
676
+ selectedMetric.range.length === 3 ? selectedMetric.range[2] : 1
677
+ }
678
+ onChange={({
679
+ value,
680
+ valueUpper,
681
+ }: {
682
+ value: number;
683
+ valueUpper?: number;
684
+ }) => {
685
+ setSelectedMetricRange((prev) => [
686
+ value,
687
+ valueUpper
688
+ ? valueUpper
689
+ : prev
690
+ ? prev[1]
691
+ : selectedMetric.range
692
+ ? selectedMetric.range[2]
693
+ : 100,
694
+ ]);
695
+ }}
696
+ />
697
+ </div>
698
+ ) : null}
699
  <div className={classes.calculateBtn}>
700
  <Button
701
  onClick={() => {
 
708
  modelB,
709
  selectedMetric,
710
  selectedFilters,
711
+ selectedMetricRange,
712
  ),
713
  );
714
  }}
 
797
  ) ? (
798
  <div className={classes.row}>
799
  <div
800
+ key={`statisticalInformation-metric-${selectedMetric.name}--${hash(JSON.stringify(statisticalInformationPerMetric[selectedMetric.name]))}`}
801
  className={classes.performanceInformation}
802
  >
803
  <h5>
 
829
  </div>
830
  </Tile>
831
  <ScatterChart
832
+ ref={chartRef}
833
  data={prepareScatterPlotData(
834
  modelA.name,
835
  statisticalInformationPerMetric[selectedMetric.name]
 
837
  modelB.name,
838
  statisticalInformationPerMetric[selectedMetric.name]
839
  .distributionB,
840
+ statisticalInformationPerMetric[selectedMetric.name]
841
+ .taskIds,
842
  )}
843
  options={{
844
  axes: {
 
864
  ),
865
  ],
866
  }),
867
+ title: extractMetricDisplayName(selectedMetric),
868
  },
869
  bottom: {
870
  mapsTo: 'key',
src/views/tasks-table/TasksTable.module.scss CHANGED
@@ -53,6 +53,7 @@
53
  .link {
54
  color: $blue-60;
55
  text-decoration: underline;
 
56
  }
57
 
58
  .taskCell {
 
53
  .link {
54
  color: $blue-60;
55
  text-decoration: underline;
56
+ cursor: pointer;
57
  }
58
 
59
  .taskCell {