File size: 2,957 Bytes
7ebfeb9
afda258
 
cf05f8b
22ed06b
 
623b4fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7ebfeb9
 
afda258
 
5ce0179
7ebfeb9
 
cf05f8b
 
 
afda258
fc6f52f
7ebfeb9
623b4fb
22ed06b
5d8bab6
22ed06b
 
 
 
 
 
 
 
 
 
 
cb2be18
 
 
 
 
 
 
b1241b7
 
 
 
 
 
22ed06b
 
 
b1241b7
22ed06b
 
afda258
 
623b4fb
b51c75c
cf05f8b
afda258
623b4fb
 
 
b1241b7
 
 
623b4fb
 
afda258
b1241b7
 
e6687c5
7ebfeb9
22ed06b
 
623b4fb
 
22ed06b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from PIL import Image
import clipGPT
import vitGPT
import skimage.io as io
import PIL.Image
import difflib


def compare_and_highlight(text1, text2):
    matcher = difflib.SequenceMatcher(None, text1, text2)
    output = ''
    for op, a1, a2, b1, b2 in matcher.get_opcodes():
        if op == 'equal':
            output += f"**{text1[a1:a2]}**"  # Highlight matches in bold
        elif op == 'insert':
            output += f"<ins>{text2[b1:b2]}</ins>" 
        elif op == 'delete':
            output += f"<del>{text1[a1:a2]}</del>"
        elif op == 'replace':  
            # Handle replacements (more complex)
            output += f"<del>{text1[a1:a2]}</del> <ins>{text2[b1:b2]}</ins>" 
    return output


# Caption generation functions
def generate_caption_clipgpt(image):
    caption = clipGPT.generate_caption_clipgpt(image)
    return caption

def generate_caption_vitgpt(image):
    caption = vitGPT.generate_caption(image)
    return caption



with gr.Blocks() as demo:
    

    gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
    gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")

    with gr.Row():
        sample_images = [
        "CXR191_IM-0591-1001.png",
        "CXR192_IM-0598-1001.png",
        "CXR193_IM-0601-1001.png",
        "CXR194_IM-0609-1001.png",
        "CXR195_IM-0618-1001.png"
    ]

        
        image = gr.Image(label="Upload Chest X-ray")    
        gr.Gallery(
        value = sample_images,
        label="Sample Images",
        )
        
    # sample_images_gallery = gr.Gallery(
    #     value = sample_images,
    #     label="Sample Images",
    # )
    
    with gr.Row():
        model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
        generate_button = gr.Button("Generate Caption") 
    
    caption = gr.Textbox(label="Generated Caption") 

    def predict(img, model_name):
        if model_name == "CLIP-GPT2":
            return generate_caption_clipgpt(img)
        elif model_name == "ViT-GPT2":
            return generate_caption_vitgpt(img)
        else:
            return "Caption generation for this model is not yet implemented."     

    with gr.Row():
        text1 = gr.Textbox(label="Text 1")
        text2 = gr.Textbox(label="Text 2")
        compare_button = gr.Button("Compare Texts")
    with gr.Row():
        comparison_result = gr.Textbox(label="Comparison Result")

    # Event handlers
    compare_button.click(lambda: compare_and_highlight(text1.value, text2.value), [], comparison_result) 


    generate_button.click(predict, [image, model_choice], caption)  # Trigger prediction on button click 
    # sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption)  # Handle sample images


demo.launch()