Spaces:
Running
Running
Merge branch 'chore/documentation' docs first draft
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- README.md +1 -1
- app.py +0 -1
- dev/call_hf_batch.py +94 -0
- docs/dev_notes.md +40 -0
- docs/fix_tabrender.md +5 -0
- docs/input_handling.md +8 -0
- docs/main.md +10 -0
- docs/obs_map.md +7 -0
- docs/st_logs.md +7 -0
- docs/whale_gallery.md +4 -0
- docs/whale_viewer.md +4 -0
- images/references/640x427-atlantic-white-sided-dolphin.jpg +0 -3
- images/references/640x427-long-finned-pilot-whale.webp +0 -3
- images/references/640x427-southern-right-whale.jpg +0 -3
- images/references/Humpback.webp +0 -3
- images/references/Whale_Short-Finned_Pilot-markedDW.png +0 -3
- images/references/beluga.webp +0 -3
- images/references/blue-whale.webp +0 -3
- images/references/bottlenose_dolphin.webp +0 -3
- images/references/brydes.webp +0 -3
- images/references/common_dolphin.webp +0 -3
- images/references/cuviers_beaked_whale.webp +0 -3
- images/references/false-killer-whale.webp +0 -3
- images/references/fin-whale.webp +0 -3
- images/references/gray-whale.webp +0 -3
- images/references/killer_whale.webp +0 -3
- images/references/melon.webp +0 -3
- images/references/minke-whale.webp +0 -3
- images/references/pantropical-spotted-dolphin.webp +0 -3
- images/references/pygmy-killer-whale.webp +0 -3
- images/references/rough-toothed-dolphin.webp +0 -3
- images/references/sei.webp +0 -3
- images/references/spinner.webp +0 -3
- mkdocs.yaml +20 -3
- {call_models β snippets}/click_map.py +0 -0
- {call_models β snippets}/d_entry.py +0 -0
- snippets/extract_meta.py +1 -1
- {call_models β snippets}/hotdogs.py +0 -0
- {call_models β snippets}/imgs/cakes.jpg +0 -0
- {call_models β snippets}/test_upload.py +0 -0
- {call_models β src}/alps_map.py +0 -0
- {call_models β src}/entry_and_hotdog.py +49 -12
- {call_models β src}/fix_tabrender.py +37 -1
- {call_models β src}/images/references/640x427-atlantic-white-sided-dolphin.jpg +0 -0
- {call_models β src}/images/references/640x427-long-finned-pilot-whale.webp +0 -0
- {call_models β src}/images/references/640x427-southern-right-whale.jpg +0 -0
- {call_models β src}/images/references/Humpback.webp +0 -0
- {call_models β src}/images/references/Whale_Short-Finned_Pilot-markedDW.png +0 -0
- {call_models β src}/images/references/beluga.webp +0 -0
- {call_models β src}/images/references/blue-whale.webp +0 -0
README.md
CHANGED
@@ -6,7 +6,7 @@ colorTo: red
|
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.39.0
|
8 |
python_version: "3.10"
|
9 |
-
app_file:
|
10 |
pinned: false
|
11 |
license: apache-2.0
|
12 |
short_description: 'SDSC Hackathon - Project 10. '
|
|
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.39.0
|
8 |
python_version: "3.10"
|
9 |
+
app_file: src/main.py
|
10 |
pinned: false
|
11 |
license: apache-2.0
|
12 |
short_description: 'SDSC Hackathon - Project 10. '
|
app.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
call_models/entry_and_hotdog.py
|
|
|
|
dev/call_hf_batch.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import HfApi
|
3 |
+
import cv2
|
4 |
+
from pathlib import Path
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
from transformers import pipeline
|
8 |
+
from transformers import AutoModelForImageClassification
|
9 |
+
import time
|
10 |
+
|
11 |
+
'''
|
12 |
+
how to use this script:
|
13 |
+
1. get data from the kaggle competition, including images and the train.csv file
|
14 |
+
edit the "base" variable, assuming the following layout
|
15 |
+
|
16 |
+
ceteans/
|
17 |
+
βββ images
|
18 |
+
βΒ Β βββ 00021adfb725ed.jpg
|
19 |
+
βΒ Β βββ 000562241d384d.jpg
|
20 |
+
βΒ Β βββ ...
|
21 |
+
βββ train.csv
|
22 |
+
|
23 |
+
2. inspect the df_results dataframe to see how the model is performing
|
24 |
+
|
25 |
+
|
26 |
+
'''
|
27 |
+
# setup for the ML model on huggingface (our wrapper)
|
28 |
+
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
29 |
+
rev = 'main'
|
30 |
+
|
31 |
+
# load the model
|
32 |
+
cetacean_classifier = AutoModelForImageClassification.from_pretrained(
|
33 |
+
"Saving-Willy/cetacean-classifier",
|
34 |
+
revision=rev,
|
35 |
+
trust_remote_code=True)
|
36 |
+
|
37 |
+
# get ready to load images
|
38 |
+
base = Path('~/Documents/ceteans/').expanduser()
|
39 |
+
df = pd.read_csv(base / 'train.csv')
|
40 |
+
|
41 |
+
i_max = 100 # put a limit on the number of images to classify in this test (or None)
|
42 |
+
|
43 |
+
# for each file in the folder base/images, 1/ load image, 2/ classify, 3/ compare against the relevant row in df
|
44 |
+
# also keep track of the time it takes to classify each image
|
45 |
+
|
46 |
+
|
47 |
+
classifications = []
|
48 |
+
|
49 |
+
img_pth = base / 'images'
|
50 |
+
img_files = list(img_pth.glob('*.jpg'))
|
51 |
+
|
52 |
+
|
53 |
+
for i, img_file in enumerate(img_files):
|
54 |
+
# lets check we can get the right target.
|
55 |
+
img_id = img_file.name # includes .jpg
|
56 |
+
target = df.loc[df['image'] == img_id, 'species'].item()
|
57 |
+
#print(img_id, target)
|
58 |
+
|
59 |
+
start_time = time.time()
|
60 |
+
image = cv2.imread(str(img_file))
|
61 |
+
load_time = time.time() - start_time
|
62 |
+
|
63 |
+
start_time = time.time()
|
64 |
+
out = cetacean_classifier(image) # get top 3 matches
|
65 |
+
classify_time = time.time() - start_time
|
66 |
+
|
67 |
+
whale_prediction1 = out['predictions'][0]
|
68 |
+
|
69 |
+
# comparison
|
70 |
+
ok = whale_prediction1 == target
|
71 |
+
any = target in [x for x in out['predictions']]
|
72 |
+
row = [img_id, target, ok, any, load_time, classify_time] + list(out['predictions'])
|
73 |
+
|
74 |
+
print(i, row)
|
75 |
+
|
76 |
+
classifications.append(row)
|
77 |
+
|
78 |
+
if i_max is not None and i >= i_max:
|
79 |
+
break
|
80 |
+
|
81 |
+
|
82 |
+
df_results = pd.DataFrame(classifications, columns=['img_id', 'target', 'ok', 'any', 'load_time', 'classify_time'] + [f'pred_{i}' for i in range(3)])
|
83 |
+
|
84 |
+
# print out a few summary stats
|
85 |
+
# mean time to load and classify (formatted 3dp), +- std dev (formatted to 2dp),
|
86 |
+
print(f"Mean load time: {df_results['load_time'].mean():.3f} +- {df_results['load_time'].std():.2f} s")
|
87 |
+
print(f"Mean classify time: {df_results['classify_time'].mean():.3f} +- {df_results['classify_time'].std():.2f} s")
|
88 |
+
|
89 |
+
# accuracy: count of ok / count of any
|
90 |
+
print(f"Accuracy: correct with top prediction: {df_results['ok'].sum()} | any of top 3 correct: {df_results['any'].sum():.3f} (of total {df_results.shape[0]})")
|
91 |
+
|
92 |
+
# diversity: is the model just predicting one class for everything it sees?
|
93 |
+
print("Which classes are predicted?")
|
94 |
+
print(df_results.pred_0.value_counts())
|
docs/dev_notes.md
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# How to run the UI
|
2 |
+
|
3 |
+
We set this up so it is hosted as a huggingface space. Each commit to `main` triggers a push and a rebuild on their servers.
|
4 |
+
|
5 |
+
For local testing, assuming you have all the required packages installed in a
|
6 |
+
conda env or virtualenv, and that env is activated:
|
7 |
+
|
8 |
+
```
|
9 |
+
cd src
|
10 |
+
streamlit run main.py
|
11 |
+
```
|
12 |
+
Then use a web browser to view the site indiciated, by default: http://localhost:8501
|
13 |
+
|
14 |
+
# How to build and view docs locally
|
15 |
+
|
16 |
+
We have a CI action to presesnt the docs on github.io.
|
17 |
+
To validate locally, you need the deps listed in `requirements.txt` installed.
|
18 |
+
|
19 |
+
Run
|
20 |
+
```
|
21 |
+
mkdocs serve
|
22 |
+
```
|
23 |
+
And navigate to the wish server running locally, by default: http://127.0.0.1:8888/
|
24 |
+
|
25 |
+
This automatically watches for changes in the markdown files, but if you edit the
|
26 |
+
something else like the docstrings in py files, triggering a rebuild in another terminal
|
27 |
+
refreshes the site, without having to quit and restart the server.
|
28 |
+
```
|
29 |
+
mkdocs build -c
|
30 |
+
```
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
# Set up a venv
|
35 |
+
|
36 |
+
(standard stuff)
|
37 |
+
|
38 |
+
# Set up a conda env
|
39 |
+
|
40 |
+
(Standard stuff)
|
docs/fix_tabrender.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
A js fix for certain UI elements, including maps, getting rendered into a
|
2 |
+
zero-sized frame by default. Here we resize it so it is visible once the tab is
|
3 |
+
clicked and no further interaction is required to see it.
|
4 |
+
|
5 |
+
::: src.fix_tabrender
|
docs/input_handling.md
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This module focuses on image and metadata entry:
|
2 |
+
|
3 |
+
- UI elements to upload an image and populate the metadata (or edit the
|
4 |
+
auto-discovered metadata)
|
5 |
+
- a container class for an observation
|
6 |
+
|
7 |
+
|
8 |
+
::: src.input_handling
|
docs/main.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Main entry point
|
2 |
+
|
3 |
+
This module sets up the streamlit UI frontend,
|
4 |
+
as well as logger and session state elements in the backend.
|
5 |
+
|
6 |
+
The session state is used to retain values from one interaction to the next, since the streamlit execution model is to re-run the entire script top-to-bottom upon each user interaction (e.g. click).
|
7 |
+
See streamlit [docs](https://docs.streamlit.io/develop/api-reference/caching-and-state/st.session_state).
|
8 |
+
|
9 |
+
|
10 |
+
::: src.entry_and_hotdog
|
docs/obs_map.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This module provides rendering of observations on an interactive map, with a variety of tilesets available.
|
2 |
+
|
3 |
+
Note: OSM, ESRI, and CartoDB map tiles are served without authentication/tokens,
|
4 |
+
and so render correctly on the huggingface deployment. The Stamen tiles render
|
5 |
+
on localhost but require a token to present on a 3rd-party site.
|
6 |
+
|
7 |
+
::: src.obs_map
|
docs/st_logs.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This module provides utilities to incorporate a standard python logger within streamlit.
|
2 |
+
|
3 |
+
|
4 |
+
# Streamlit log handler
|
5 |
+
|
6 |
+
::: src.st_logs
|
7 |
+
|
docs/whale_gallery.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This module provides a gallery of the whales and dolphins that the classifier
|
2 |
+
is trained on. It diplays the images and links to further info on the species.
|
3 |
+
|
4 |
+
::: src.whale_gallery
|
docs/whale_viewer.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This module provides a streamlit rendering for the whales and dolphins that the classifier is aware of, and also holds the
|
2 |
+
metadata for them (images, class names that the classifier uses, and URLS for further information about each species).
|
3 |
+
|
4 |
+
::: src.whale_viewer
|
images/references/640x427-atlantic-white-sided-dolphin.jpg
DELETED
Git LFS Details
|
images/references/640x427-long-finned-pilot-whale.webp
DELETED
Git LFS Details
|
images/references/640x427-southern-right-whale.jpg
DELETED
Git LFS Details
|
images/references/Humpback.webp
DELETED
Git LFS Details
|
images/references/Whale_Short-Finned_Pilot-markedDW.png
DELETED
Git LFS Details
|
images/references/beluga.webp
DELETED
Git LFS Details
|
images/references/blue-whale.webp
DELETED
Git LFS Details
|
images/references/bottlenose_dolphin.webp
DELETED
Git LFS Details
|
images/references/brydes.webp
DELETED
Git LFS Details
|
images/references/common_dolphin.webp
DELETED
Git LFS Details
|
images/references/cuviers_beaked_whale.webp
DELETED
Git LFS Details
|
images/references/false-killer-whale.webp
DELETED
Git LFS Details
|
images/references/fin-whale.webp
DELETED
Git LFS Details
|
images/references/gray-whale.webp
DELETED
Git LFS Details
|
images/references/killer_whale.webp
DELETED
Git LFS Details
|
images/references/melon.webp
DELETED
Git LFS Details
|
images/references/minke-whale.webp
DELETED
Git LFS Details
|
images/references/pantropical-spotted-dolphin.webp
DELETED
Git LFS Details
|
images/references/pygmy-killer-whale.webp
DELETED
Git LFS Details
|
images/references/rough-toothed-dolphin.webp
DELETED
Git LFS Details
|
images/references/sei.webp
DELETED
Git LFS Details
|
images/references/spinner.webp
DELETED
Git LFS Details
|
mkdocs.yaml
CHANGED
@@ -16,10 +16,27 @@ plugins:
|
|
16 |
- mkdocstrings:
|
17 |
default_handler: python
|
18 |
handlers:
|
19 |
-
python:
|
20 |
-
paths: [
|
21 |
|
22 |
|
23 |
nav:
|
24 |
- README: index.md
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
- mkdocstrings:
|
17 |
default_handler: python
|
18 |
handlers:
|
19 |
+
python:
|
20 |
+
paths: [nonexistent_path_on_purpose]
|
21 |
|
22 |
|
23 |
nav:
|
24 |
- README: index.md
|
25 |
+
#- Quickstart:
|
26 |
+
#- Installation: installation.md
|
27 |
+
#- Usage: usage.md
|
28 |
+
- API:
|
29 |
+
- Main app: main.md
|
30 |
+
- Modules:
|
31 |
+
- Data entry handling: input_handling.md
|
32 |
+
- Map of observations: obs_map.md
|
33 |
+
- Whale gallery: whale_gallery.md
|
34 |
+
- Whale viewer: whale_viewer.md
|
35 |
+
- Logging: st_logs.md
|
36 |
+
- Tab-rendering fix (js): fix_tabrender.md
|
37 |
+
|
38 |
+
- Development clutter:
|
39 |
+
- Demo app: app.md
|
40 |
+
|
41 |
+
- How to contribute:
|
42 |
+
- Dev Notes: dev_notes.md
|
{call_models β snippets}/click_map.py
RENAMED
File without changes
|
{call_models β snippets}/d_entry.py
RENAMED
File without changes
|
snippets/extract_meta.py
CHANGED
@@ -38,7 +38,7 @@ def extract_gps(image_path):
|
|
38 |
|
39 |
return (lat, lon)
|
40 |
# Example usage
|
41 |
-
image_path = '
|
42 |
datetime_info = extract_datetime(image_path)
|
43 |
gps_info = extract_gps(image_path)
|
44 |
print(f'Date and Time: {datetime_info}')
|
|
|
38 |
|
39 |
return (lat, lon)
|
40 |
# Example usage
|
41 |
+
image_path = 'imgs/cakes.jpg' # this file has good exif data, inc GPS, timestamps etc.
|
42 |
datetime_info = extract_datetime(image_path)
|
43 |
gps_info = extract_gps(image_path)
|
44 |
print(f'Date and Time: {datetime_info}')
|
{call_models β snippets}/hotdogs.py
RENAMED
File without changes
|
{call_models β snippets}/imgs/cakes.jpg
RENAMED
File without changes
|
{call_models β snippets}/test_upload.py
RENAMED
File without changes
|
{call_models β src}/alps_map.py
RENAMED
File without changes
|
{call_models β src}/entry_and_hotdog.py
RENAMED
@@ -1,27 +1,27 @@
|
|
1 |
-
import datetime
|
2 |
-
import os
|
3 |
import json
|
4 |
import logging
|
|
|
5 |
import tempfile
|
|
|
6 |
import pandas as pd
|
7 |
import streamlit as st
|
|
|
8 |
import folium
|
9 |
from streamlit_folium import st_folium
|
10 |
from huggingface_hub import HfApi
|
11 |
-
|
12 |
-
|
13 |
|
14 |
-
import whale_viewer as sw_wv
|
15 |
-
import input_handling as sw_inp
|
16 |
import alps_map as sw_am
|
17 |
-
import
|
18 |
import obs_map as sw_map
|
19 |
import st_logs as sw_logs
|
|
|
|
|
20 |
|
21 |
|
22 |
|
23 |
-
from transformers import pipeline
|
24 |
-
from transformers import AutoModelForImageClassification
|
25 |
|
26 |
# setup for the ML model on huggingface (our wrapper)
|
27 |
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
@@ -64,14 +64,29 @@ if "tab_log" not in st.session_state:
|
|
64 |
st.session_state.tab_log = None
|
65 |
|
66 |
|
67 |
-
def metadata2md():
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
markdown_str = "\n"
|
69 |
for key, value in st.session_state.full_data.items():
|
70 |
markdown_str += f"- **{key}**: {value}\n"
|
71 |
return markdown_str
|
72 |
|
73 |
|
74 |
-
def push_observation(tab_log=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
# we get the data from session state: 1 is the dict 2 is the image.
|
76 |
# first, lets do an info display (popup)
|
77 |
metadata_str = json.dumps(st.session_state.full_data)
|
@@ -105,7 +120,26 @@ def push_observation(tab_log=None):
|
|
105 |
st.info(msg)
|
106 |
|
107 |
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
g_logger.info("App started.")
|
111 |
g_logger.warning(f"[D] Streamlit version: {st.__version__}. Python version: {os.sys.version}")
|
@@ -306,3 +340,6 @@ if __name__ == "__main__":
|
|
306 |
tab_hotdogs.write(f"Session Data: {json.dumps(st.session_state.full_data)}")
|
307 |
|
308 |
|
|
|
|
|
|
|
|
1 |
+
#import datetime
|
|
|
2 |
import json
|
3 |
import logging
|
4 |
+
import os
|
5 |
import tempfile
|
6 |
+
|
7 |
import pandas as pd
|
8 |
import streamlit as st
|
9 |
+
from streamlit.delta_generator import DeltaGenerator # for type hinting
|
10 |
import folium
|
11 |
from streamlit_folium import st_folium
|
12 |
from huggingface_hub import HfApi
|
13 |
+
from transformers import pipeline
|
14 |
+
from transformers import AutoModelForImageClassification
|
15 |
|
|
|
|
|
16 |
import alps_map as sw_am
|
17 |
+
import input_handling as sw_inp
|
18 |
import obs_map as sw_map
|
19 |
import st_logs as sw_logs
|
20 |
+
import whale_gallery as sw_wg
|
21 |
+
import whale_viewer as sw_wv
|
22 |
|
23 |
|
24 |
|
|
|
|
|
25 |
|
26 |
# setup for the ML model on huggingface (our wrapper)
|
27 |
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
|
|
64 |
st.session_state.tab_log = None
|
65 |
|
66 |
|
67 |
+
def metadata2md() -> str:
|
68 |
+
"""Get metadata from cache and return as markdown-formatted key-value list
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
str: Markdown-formatted key-value list of metadata
|
72 |
+
|
73 |
+
"""
|
74 |
markdown_str = "\n"
|
75 |
for key, value in st.session_state.full_data.items():
|
76 |
markdown_str += f"- **{key}**: {value}\n"
|
77 |
return markdown_str
|
78 |
|
79 |
|
80 |
+
def push_observation(tab_log:DeltaGenerator=None):
|
81 |
+
"""
|
82 |
+
Push the observation to the Hugging Face dataset
|
83 |
+
|
84 |
+
Args:
|
85 |
+
tab_log (streamlit.container): The container to log messages to. If not provided,
|
86 |
+
log messages are in any case written to the global logger (TODO: test - didn't
|
87 |
+
push any data since generating the logger)
|
88 |
+
|
89 |
+
"""
|
90 |
# we get the data from session state: 1 is the dict 2 is the image.
|
91 |
# first, lets do an info display (popup)
|
92 |
metadata_str = json.dumps(st.session_state.full_data)
|
|
|
120 |
st.info(msg)
|
121 |
|
122 |
|
123 |
+
|
124 |
+
def main() -> None:
|
125 |
+
"""
|
126 |
+
Main entry point to set up the streamlit UI and run the application.
|
127 |
+
|
128 |
+
The organisation is as follows:
|
129 |
+
|
130 |
+
1. data input (a new observation) is handled in the sidebar
|
131 |
+
2. the rest of the interface is organised in tabs:
|
132 |
+
|
133 |
+
- cetean classifier
|
134 |
+
- hotdog classifier
|
135 |
+
- map to present the obersvations
|
136 |
+
- table of recent log entries
|
137 |
+
- gallery of whale images
|
138 |
+
|
139 |
+
The majority of the tabs are instantiated from modules. Currently the two
|
140 |
+
classifiers are still in-line here.
|
141 |
+
|
142 |
+
"""
|
143 |
|
144 |
g_logger.info("App started.")
|
145 |
g_logger.warning(f"[D] Streamlit version: {st.__version__}. Python version: {os.sys.version}")
|
|
|
340 |
tab_hotdogs.write(f"Session Data: {json.dumps(st.session_state.full_data)}")
|
341 |
|
342 |
|
343 |
+
|
344 |
+
if __name__ == "__main__":
|
345 |
+
main()
|
{call_models β src}/fix_tabrender.py
RENAMED
@@ -10,6 +10,21 @@ import streamlit as st
|
|
10 |
import uuid, html
|
11 |
# workaround for streamlit making tabs height 0 when not active, breaks map
|
12 |
def inject_iframe_js_code(source: str) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
div_id = uuid.uuid4()
|
14 |
|
15 |
st.markdown(
|
@@ -28,7 +43,28 @@ def inject_iframe_js_code(source: str) -> None:
|
|
28 |
unsafe_allow_html=True,
|
29 |
)
|
30 |
|
31 |
-
def js_show_zeroheight_iframe(component_iframe_title: str, height: str = "auto"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
source = f"""
|
33 |
(function() {{
|
34 |
var attempts = 0;
|
|
|
10 |
import uuid, html
|
11 |
# workaround for streamlit making tabs height 0 when not active, breaks map
|
12 |
def inject_iframe_js_code(source: str) -> None:
|
13 |
+
"""
|
14 |
+
Injects JavaScript code into a Streamlit app using an iframe.
|
15 |
+
|
16 |
+
This function creates a hidden div with a unique ID and injects the provided
|
17 |
+
JavaScript code into the parent document using an iframe. The iframe's source
|
18 |
+
is a JavaScript URL that creates a script element, sets its type to 'text/javascript',
|
19 |
+
and assigns the provided JavaScript code to its text content. The script element
|
20 |
+
is then appended to the hidden div in the parent document.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
source (str): The JavaScript code to be injected.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
None
|
27 |
+
"""
|
28 |
div_id = uuid.uuid4()
|
29 |
|
30 |
st.markdown(
|
|
|
43 |
unsafe_allow_html=True,
|
44 |
)
|
45 |
|
46 |
+
def js_show_zeroheight_iframe(component_iframe_title: str, height: str = "auto") -> None:
|
47 |
+
"""
|
48 |
+
Injects JavaScript code to dynamically set iframe height (located by title)
|
49 |
+
|
50 |
+
This function generates and injects JavaScript code that searches for
|
51 |
+
iframes with the given title and sets their height to the specified value.
|
52 |
+
The script attempts to find the iframes up to a maximum number of attempts,
|
53 |
+
and also listens for user interactions to reattempt setting the height.
|
54 |
+
|
55 |
+
See https://github.com/streamlit/streamlit/issues/7376
|
56 |
+
|
57 |
+
|
58 |
+
Args:
|
59 |
+
component_iframe_title (str): The title attribute of the iframes to target.
|
60 |
+
height (str, optional): The height to set for the iframes. Defaults to "auto".
|
61 |
+
|
62 |
+
Notes:
|
63 |
+
- The JavaScript code will attempt to find the iframes every 250
|
64 |
+
milliseconds, up to a maximum of 20 attempts.
|
65 |
+
- If the iframes are found, their height will be set to the specified value.
|
66 |
+
- User interactions (e.g., click events) triggers a reattempt to set the height.
|
67 |
+
"""
|
68 |
source = f"""
|
69 |
(function() {{
|
70 |
var attempts = 0;
|
{call_models β src}/images/references/640x427-atlantic-white-sided-dolphin.jpg
RENAMED
File without changes
|
{call_models β src}/images/references/640x427-long-finned-pilot-whale.webp
RENAMED
File without changes
|
{call_models β src}/images/references/640x427-southern-right-whale.jpg
RENAMED
File without changes
|
{call_models β src}/images/references/Humpback.webp
RENAMED
File without changes
|
{call_models β src}/images/references/Whale_Short-Finned_Pilot-markedDW.png
RENAMED
File without changes
|
{call_models β src}/images/references/beluga.webp
RENAMED
File without changes
|
{call_models β src}/images/references/blue-whale.webp
RENAMED
File without changes
|