Update app.py
Browse files
app.py
CHANGED
@@ -16,23 +16,33 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
16 |
# Load PathDino model and image transforms
|
17 |
model, image_transforms = get_pathDino_model("PathDino512.pth")
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
st.sidebar.markdown("### PathDino")
|
21 |
st.sidebar.markdown(
|
22 |
-
"PathDino is a lightweight
|
23 |
-
"PathDino is a customized ViT architecture, finely tuned to the nuances of
|
24 |
"superior performance but also effectively reduces susceptibility to overfitting, a common challenge in histology "
|
25 |
"image analysis.\n\n"
|
26 |
)
|
27 |
|
28 |
default_image_url_compare = "images/HistRotate.png"
|
29 |
-
st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=
|
30 |
|
31 |
default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
|
32 |
-
st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=
|
33 |
|
34 |
default_image_url_compare = "images/ActivationMap.png"
|
35 |
-
st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=
|
36 |
|
37 |
|
38 |
|
@@ -67,7 +77,7 @@ def generate_activation_maps(image):
|
|
67 |
return attention_list
|
68 |
|
69 |
# Streamlit UI
|
70 |
-
st.title("PathDino - Compact ViT for
|
71 |
st.write("Upload a histology image to view the activation maps.")
|
72 |
|
73 |
# uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
|
|
|
16 |
# Load PathDino model and image transforms
|
17 |
model, image_transforms = get_pathDino_model("PathDino512.pth")
|
18 |
|
19 |
+
# Increase the width of the sidebar
|
20 |
+
st.write(
|
21 |
+
f"""
|
22 |
+
<style>
|
23 |
+
.sidebar .sidebar-content {{
|
24 |
+
width: 420px;
|
25 |
+
}}
|
26 |
+
</style>
|
27 |
+
"""
|
28 |
+
)
|
29 |
|
30 |
st.sidebar.markdown("### PathDino")
|
31 |
st.sidebar.markdown(
|
32 |
+
"PathDino is a lightweight Histopathology transformer consisting of just five small vision transformer blocks. "
|
33 |
+
"PathDino is a customized ViT architecture, finely tuned to the nuances of histology images. It not only exhibits "
|
34 |
"superior performance but also effectively reduces susceptibility to overfitting, a common challenge in histology "
|
35 |
"image analysis.\n\n"
|
36 |
)
|
37 |
|
38 |
default_image_url_compare = "images/HistRotate.png"
|
39 |
+
st.sidebar.image(default_image_url_compare, caption='A 360 rotation augmentation for training models on histopathology images. Unlike training on natural images where the rotation may change the context of the visual data, rotating a histopathology patch does not change the context and it improves the learning process for better reliable embedding learning.', width=300)
|
40 |
|
41 |
default_image_url_compare = "images/FigPathDino_parameters_FLOPs_compare.png"
|
42 |
+
st.sidebar.image(default_image_url_compare, caption='PathDino Vs its counterparts. Number of Parameters (Millions) vs the patch-level retrieval with macro avg F-score of majority vote (MV@5) on CAMELYON16 dataset. The bubble size represents the FLOPs.', width=300)
|
43 |
|
44 |
default_image_url_compare = "images/ActivationMap.png"
|
45 |
+
st.sidebar.image(default_image_url_compare, caption='Attention Visualization. When visualizing attention patterns, our PathDino transformer outperforms HIPT-small and DinoSSLPath, despite being trained on a smaller dataset of 6 million TCGA patches. In contrast, DinoSSLPath and HIPT were trained on much larger datasets, with 19 million and 104 million TCGA patches, respectively.', width=300)
|
46 |
|
47 |
|
48 |
|
|
|
77 |
return attention_list
|
78 |
|
79 |
# Streamlit UI
|
80 |
+
st.title("PathDino - Compact ViT for Histopathology Image Analysis")
|
81 |
st.write("Upload a histology image to view the activation maps.")
|
82 |
|
83 |
# uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
|