Saghir commited on
Commit
b5401fe
·
1 Parent(s): 165fa9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -27,13 +27,13 @@ st.sidebar.markdown(
27
 
28
 
29
  default_image_url_compare = "images/HistRotate.png"
30
- st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=350)
31
 
32
  default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
33
- st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=350)
34
 
35
  default_image_url_compare = "images/ActivationMap.png"
36
- st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=350)
37
 
38
 
39
  st.sidebar.markdown("### Citation")
 
27
 
28
 
29
  default_image_url_compare = "images/HistRotate.png"
30
+ st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=300)
31
 
32
  default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
33
+ st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=300)
34
 
35
  default_image_url_compare = "images/ActivationMap.png"
36
+ st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=300)
37
 
38
 
39
  st.sidebar.markdown("### Citation")