File size: 8,595 Bytes
b051880
 
f98a043
0707d05
6cd7819
f6a8624
98c55ad
6cd7819
033d048
a4e5a59
f0f9dff
6cd7819
e57fc59
93f512e
e57fc59
 
 
 
 
6cd7819
 
d79abfc
6cd7819
 
033d048
6cd7819
033d048
 
8a3e216
6cd7819
 
93f512e
033d048
 
 
 
 
 
 
 
98c55ad
6cd7819
93f512e
033d048
 
 
 
6cd7819
93f512e
6cd7819
 
 
 
 
033d048
6cd7819
 
 
033d048
93f512e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6cd7819
93f512e
 
 
 
 
 
 
 
 
 
 
6cd7819
 
 
 
 
 
 
 
 
 
 
033d048
6cd7819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
033d048
6cd7819
 
 
 
033d048
6cd7819
 
033d048
 
6cd7819
033d048
 
6cd7819
 
 
 
 
3374d3d
 
6cd7819
033d048
 
 
6cd7819
033d048
6cd7819
033d048
6cd7819
 
033d048
 
 
 
6cd7819
 
 
 
 
033d048
6cd7819
 
033d048
6cd7819
 
a4e5a59
 
 
 
3374d3d
 
 
a4e5a59
 
 
6cd7819
a4e5a59
 
6cd7819
 
 
033d048
6cd7819
 
 
 
 
 
 
 
 
 
 
 
 
033d048
6cd7819
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
import os
import tempfile
import cv2
import streamlit as st
import PIL
import requests
from ultralytics import YOLO
import time
import numpy as np
import imageio_ffmpeg as ffmpeg

# Page config first
st.set_page_config(
    page_title="WildfireWatch: AI Detection",
    page_icon="🔥",
    layout="wide",
    initial_sidebar_state="expanded"
)

# Model path
model_path = 'https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/best.pt'

# Session state initialization
for key in ["processed_frames", "slider_value", "processed_video", "start_time"]:
    if key not in st.session_state:
        st.session_state[key] = [] if key == "processed_frames" else 0 if key == "slider_value" else None

# Sidebar
with st.sidebar:
    st.header("Upload & Settings")
    source_file = st.file_uploader("Upload image/video", type=["jpg", "jpeg", "png", "bmp", "webp", "mp4"])
    confidence = float(st.slider("Confidence Threshold", 25, 100, 40)) / 100
    fps_options = {
        "Original FPS": None,
        "3 FPS": 3,
        "1 FPS": 1,
        "1 frame/4s": 0.25,
        "1 frame/10s": 0.1,
        "1 frame/15s": 0.0667,
        "1 frame/30s": 0.0333
    }
    video_option = st.selectbox("Output Frame Rate", list(fps_options.keys()))
    process_button = st.button("Detect Wildfire")
    progress_bar = st.progress(0)
    progress_text = st.empty()
    download_slot = st.empty()

# Main page
st.title("WildfireWatch: AI-Powered Detection")
col1, col2 = st.columns(2)
with col1:
    st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_1.jpeg", use_column_width=True)
with col2:
    st.image("https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/Fire_3.png", use_column_width=True)

st.markdown("""
Early wildfire detection using YOLOv8 AI vision model. See examples below or upload your own content!
""")

# Function to create synchronized video pair HTML
def create_synced_video_pair(orig_data, proc_data, pair_id):
    orig_b64 = requests.get(orig_data).content.encode('base64').decode()
    proc_b64 = requests.get(proc_data).content.encode('base64').decode()
    html = f"""
    <div style="display: flex; justify-content: space-between;">
        <div style="width: 48%;">
            <h4>Original</h4>
            <video id="orig_{pair_id}" width="100%" controls>
                <source src="data:video/mp4;base64,{orig_b64}" type="video/mp4">
            </video>
        </div>
        <div style="width: 48%;">
            <h4>Processed</h4>
            <video id="proc_{pair_id}" width="100%" controls>
                <source src="data:video/mp4;base64,{proc_b64}" type="video/mp4">
            </video>
        </div>
    </div>
    <script>
        const origVideo_{pair_id} = document.getElementById('orig_{pair_id}');
        const procVideo_{pair_id} = document.getElementById('proc_{pair_id}');
        
        origVideo_{pair_id}.addEventListener('play', function() {{
            procVideo_{pair_id}.currentTime = origVideo_{pair_id}.currentTime;
            procVideo_{pair_id}.play();
        }});
        procVideo_{pair_id}.addEventListener('play', function() {{
            origVideo_{pair_id}.currentTime = procVideo_{pair_id}.currentTime;
            origVideo_{pair_id}.play();
        }});
        
        origVideo_{pair_id}.addEventListener('pause', function() {{
            procVideo_{pair_id}.pause();
        }});
        procVideo_{pair_id}.addEventListener('pause', function() {{
            origVideo_{pair_id}.pause();
        }});
        
        origVideo_{pair_id}.addEventListener('seeked', function() {{
            procVideo_{pair_id}.currentTime = origVideo_{pair_id}.currentTime;
        }});
        procVideo_{pair_id}.addEventListener('seeked', function() {{
            origVideo_{pair_id}.currentTime = procVideo_{pair_id}.currentTime;
        }});
    </script>
    """
    return html

# Example videos with synchronization
st.header("Example Results")
examples = [
    ("T Example", "T1.mp4", "T2.mpg"),
    ("LA Example", "LA1.mp4", "LA2.mp4")
]
for title, orig_file, proc_file in examples:
    st.subheader(title)
    orig_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{orig_file}"
    proc_url = f"https://huggingface.co/spaces/tstone87/ccr-colorado/resolve/main/{proc_file}"
    pair_id = title.replace(" ", "").lower()
    video_html = create_synced_video_pair(orig_url, proc_url, pair_id)
    st.markdown(video_html, unsafe_allow_html=True)

st.header("Your Results")
result_cols = st.columns(2)
viewer_slot = st.empty()

# Load model
try:
    model = YOLO(model_path)
except Exception as ex:
    st.error(f"Model loading failed: {str(ex)}")
    model = None

# Processing
if process_button and source_file and model:
    st.session_state.processed_frames = []
    if source_file.type.split('/')[0] == 'image':
        image = PIL.Image.open(source_file)
        res = model.predict(image, conf=confidence)
        result = res[0].plot()[:, :, ::-1]
        with result_cols[0]:
            st.image(image, caption="Original", use_column_width=True)
        with result_cols[1]:
            st.image(result, caption="Detected", use_column_width=True)
    else:
        # Video processing
        with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
            tmp.write(source_file.read())
            vidcap = cv2.VideoCapture(tmp.name)
        
        orig_fps = vidcap.get(cv2.CAP_PROP_FPS)
        total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
        width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        
        output_fps = fps_options[video_option] if fps_options[video_option] else orig_fps
        sample_interval = max(1, int(orig_fps / output_fps)) if output_fps else 1
        
        st.session_state.start_time = time.time()
        frame_count = 0
        processed_count = 0
        
        success, frame = vidcap.read()
        while success:
            if frame_count % sample_interval == 0:
                res = model.predict(frame, conf=confidence)
                processed_frame = res[0].plot()[:, :, ::-1]
                if not processed_frame.flags['C_CONTIGUOUS']:
                    processed_frame = np.ascontiguousarray(processed_frame)
                st.session_state.processed_frames.append(processed_frame)
                
                processed_count += 1
                elapsed = time.time() - st.session_state.start_time
                progress = frame_count / total_frames
                
                if elapsed > 0 and processed_count > 0:
                    time_per_frame = elapsed / processed_count
                    frames_left = (total_frames - frame_count) / sample_interval
                    eta = frames_left * time_per_frame
                    eta_str = f"{int(eta // 60)}m {int(eta % 60)}s"
                else:
                    eta_str = "Calculating..."
                
                progress_bar.progress(min(progress, 1.0))
                progress_text.text(f"Progress: {progress:.1%} | ETA: {eta_str}")
            
            frame_count += 1
            success, frame = vidcap.read()
        
        vidcap.release()
        os.unlink(tmp.name)
        
        if st.session_state.processed_frames:
            out_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
            writer = ffmpeg.write_frames(
                out_path,
                (width, height),
                fps=output_fps or orig_fps,
                codec='libx264',
                pix_fmt_in='bgr24',
                pix_fmt_out='yuv420p'
            )
            writer.send(None)  # Initialize writer
            
            for frame in st.session_state.processed_frames:
                writer.send(frame)
            writer.close()
            
            with open(out_path, 'rb') as f:
                st.session_state.processed_video = f.read()
            os.unlink(out_path)
            
            progress_bar.progress(1.0)
            progress_text.text("Processing complete!")
            with result_cols[0]:
                st.video(source_file)
            with result_cols[1]:
                st.video(st.session_state.processed_video)
            download_slot.download_button(
                label="Download Processed Video",
                data=st.session_state.processed_video,
                file_name="processed_wildfire.mp4",
                mime="video/mp4"
            )

if not source_file:
    st.info("Please upload a file to begin.")