File size: 8,136 Bytes
fc1b9c5
 
bc7d231
 
c7c92f9
 
dfda773
356a130
58e3cb5
63fc765
5554139
e9d7d81
1a06525
8cf7678
a650af8
 
 
aebc520
a650af8
b749133
fc1b9c5
a650af8
fc1b9c5
 
a650af8
fc1b9c5
 
16f7989
fc1b9c5
 
 
 
16f7989
a650af8
16f7989
fc1b9c5
f05f048
b749133
 
 
 
bc7d231
fc1b9c5
ef4d7f5
 
498c16a
fc1b9c5
 
498c16a
 
 
 
fc1b9c5
498c16a
ef4d7f5
bc7d231
fc1b9c5
9d4c7bc
0fa8d68
ef4d7f5
 
 
 
4c78312
ef4d7f5
 
 
 
 
 
 
 
e9a6c39
 
 
f960a9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9a6c39
 
 
f960a9d
 
ef4d7f5
 
 
 
 
 
 
 
 
 
7391509
d40826b
 
 
849e3ce
 
 
 
 
 
 
 
 
 
 
ea70d69
a650af8
c7b31a6
fc1b9c5
 
a650af8
fc1b9c5
 
 
 
 
ea70d69
682bc75
849e3ce
 
 
 
 
 
 
 
 
 
 
 
 
 
fc1b9c5
 
849e3ce
d40826b
fc1b9c5
 
c6252cf
ef4d7f5
 
c6252cf
f960a9d
9023a92
c6252cf
 
fc1b9c5
c6252cf
fc1b9c5
c6252cf
 
 
 
 
 
 
 
 
 
 
fc1b9c5
c6252cf
 
 
 
fc1b9c5
c6252cf
 
 
 
4c78312
 
 
 
fc1b9c5
c6252cf
 
f05f048
 
 
 
fc1b9c5
f05f048
f35e4aa
 
fc1b9c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206


import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
from PIL import Image
import torch.nn as nn
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.gen_utilities import free_gpu_resources
from my_model.KBVQA import KBVQA, prepare_kbvqa_model



def answer_question(caption, detected_objects_str, question, model):

    answer = model.generate_answer(question, caption, detected_objects_str)
    return answer

def get_caption(image):
    return "Generated caption for the image"

def free_gpu_resources():
    pass

# Sample images (assuming these are paths to your sample images)
sample_images = ["Files/sample1.jpg", "Files/sample2.jpg", "Files/sample3.jpg", 
                 "Files/sample4.jpg", "Files/sample5.jpg", "Files/sample6.jpg", 
                 "Files/sample7.jpg"]



def analyze_image(image, model):
    st.write("Analyzing . . .")
    caption = model.get_caption(image)
    image_with_boxes, detected_objects_str = model.detect_objects(image)
    return caption, detected_objects_str
    

def image_qa_app(kbvqa):
    if 'images_data' not in st.session_state:
        st.session_state['images_data'] = {}

    # Display sample images as clickable thumbnails
    st.write("Choose from sample images:")
    cols = st.columns(len(sample_images))
    for idx, sample_image_path in enumerate(sample_images):
        with cols[idx]:
            image = Image.open(sample_image_path)
            st.image(image, use_column_width=True)
            if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx}'):
                process_new_image(sample_image_path, image, kbvqa)

    # Image uploader
    uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
    if uploaded_image is not None:
        process_new_image(uploaded_image.name, Image.open(uploaded_image), kbvqa)

    # Display and interact with each uploaded/selected image
    for image_key, image_data in st.session_state['images_data'].items():
        st.image(image_data['image'], caption=f'Uploaded Image: {image_key[-11:]}', use_column_width=True)
        if not image_data['analysis_done']:
            if st.button('Analyze Image', key=f'analyze_{image_key}'):
                caption, detected_objects_str = analyze_image(image_data['image'], kbvqa)
                image_data['caption'] = caption
                image_data['detected_objects_str'] = detected_objects_str
                image_data['analysis_done'] = True

        if image_data['analysis_done']:
            # Initialize qa_history
            qa_history = []
        
            # Check if the question has already been asked
            current_image_key = st.session_state.get('current_image_key')
            question = st.text_input("Ask a question about this image:")
            if st.button('Get Answer'):
                # Ensure that the image data exists in the session state
                if current_image_key not in st.session_state['images_data']:
                    st.session_state['images_data'][current_image_key] = {'qa_history': []}
        
                qa_history = st.session_state['images_data'][current_image_key].get('qa_history', [])
                if question not in [q for q, _ in qa_history]:
                    # Assuming caption and detected_objects_str are already set
                    caption = st.session_state.get('caption', '')
                    detected_objects_str = st.session_state.get('detected_objects_str', '')
                    answer = answer_question(caption, detected_objects_str, question, kbvqa)
                    qa_history.append((question, answer))
                    st.session_state['images_data'][current_image_key]['qa_history'] = qa_history
                else:
                    st.info("This question has already been asked.")

    # Display Q&A history
    for q, a in qa_history:
        st.text(f"Q: {q}\nA: {a}\n")
    # Display Q&A history
    for q, a in qa_history:
        st.text(f"Q: {q}\nA: {a}\n")
def process_new_image(image_key, image, kbvqa):
    """Process a new image and update the session state."""
    if image_key not in st.session_state['images_data']:
        st.session_state['images_data'][image_key] = {
            'image': image,
            'caption': '',
            'detected_objects_str': '',
            'qa_history': [],
            'analysis_done': False
        }

def run_inference():
    st.title("Run Inference")

    method = st.selectbox("Choose a method:", ["Fine-Tuned Model", "In-Context Learning (n-shots)"], index=0)
    detection_model = st.selectbox("Choose a model for object detection:", ["yolov5", "detic"], index=0)
    confidence_level = st.slider("Select minimum detection confidence level", min_value=0.1, max_value=0.9, value=0.2 if detection_model == "yolov5" else 0.4, step=0.1)

    # Check for changes in model or confidence level
    model_changed = (st.session_state.get('detection_model') != detection_model)
    confidence_changed = (st.session_state.get('confidence_level') != confidence_level)

    if model_changed or confidence_changed:
        st.session_state['detection_model'] = detection_model
        st.session_state['confidence_level'] = confidence_level
        st.warning("Detection model or confidence level changed. Please reload the model, this will take few seconds :)")


    
    # Initialize session state for the model

    if method == "Fine-Tuned Model":
        if 'kbvqa' not in st.session_state:
            st.session_state['kbvqa'] = None
    
        # Button to load KBVQA models



        if st.button('Load Model'):
            if st.session_state.get('kbvqa') and not model_changed and not confidence_changed:
                st.write("Model already loaded.")
            else:
                st.text("Loading the model will take no more than a few minutes . .")
                st.session_state['kbvqa'] = prepare_kbvqa_model(detection_model)
                st.session_state['kbvqa'].detection_confidence = confidence_level
                st.success("Model loaded with updated settings.")

        if st.session_state.get('kbvqa'):
            st.write("Model is ready for inference.")
            image_qa_app(st.session_state['kbvqa'])

    else: 
        st.write('Model is not ready for inference yet')
        # here goes the code for n-shot learning

            
# Main function
def main():

        
    st.sidebar.title("Navigation")
    selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Finetuning and Evaluation Results", "Run Inference", "Dissertation Report"])
    st.sidebar.write("More Pages will follow .. ")

    if selection == "Home":
        st.title("MultiModal Learning for Knowledg-Based Visual Question Answering")
        st.write("Home page content goes here...")
        
    elif selection == "Dissertation Report":
        st.title("Dissertation Report")
        st.write("Click the link below to view the PDF.")
        # Example to display a link to a PDF
        st.download_button(
            label="Download PDF",
            data=open("Files/Dissertation Report.pdf", "rb"),
            file_name="example.pdf",
            mime="application/octet-stream"
        )

        
    elif selection == "Evaluation Results":
        st.title("Evaluation Results")
        st.write("This is a Place Holder until the contents are uploaded.")

        
    elif selection == "Dataset Analysis":
        st.title("OK-VQA Dataset Analysis")
        st.write("This is a Place Holder until the contents are uploaded.")

    elif selection == "Finetuning and Evaluation Results":
        st.title("Finetuning and Evaluation Results")
        st.write("This is a Place Holder until the contents are uploaded.")


    elif selection == "Run Inference":
        run_inference()

    elif selection == "More Pages will follow .. ":
        st.title("Staye Tuned")
        st.write("This is a Place Holder until the contents are uploaded.")
            


if __name__ == "__main__":
    main()