File size: 2,889 Bytes
5a6aade
 
 
 
 
 
 
25835df
5a6aade
 
 
 
 
28a5246
11666df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a5d9ef
4605fd9
5a6aade
39a7b7a
5a6aade
39a7b7a
28a5246
39a7b7a
28a5246
4605fd9
87fabef
9d6fdf7
28a5246
4605fd9
 
 
 
 
 
39a7b7a
4605fd9
3c0f272
4605fd9
caae7d4
9d6fdf7
 
 
 
 
39a7b7a
9d6fdf7
 
 
 
 
 
 
 
 
 
 
 
9a5d9ef
 
4605fd9
9a5d9ef
4605fd9
39a7b7a
 
9d6fdf7
39a7b7a
 
68cca71
28a5246
68cca71
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import streamlit as st
import torch
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
from PIL import Image

model_id = "brucewayne0459/paligemma_derm"
processor = AutoProcessor.from_pretrained(model_id)
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
model.eval()

device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)

# logo (pakai yg huggingface dulu)
st.markdown(
    """
    <style>
    .huggingface-logo {
        display: flex;
        justify-content: center;
        margin-bottom: 20px;
    }
    .huggingface-logo img {
        width: 150px;
    }
    </style>
    <div class="huggingface-logo">
        <img src="https://huggingface.co/front/assets/huggingface_logo-noborder.svg" alt="Hugging Face Logo">
    </div>
    """,
    unsafe_allow_html=True,
)

st.title("VisionDerm")
st.write("Upload an image or use your camera to identify the skin condition.")

col1, col2 = st.columns([3, 2])

with col1:
    # File uploader
    uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
    # Camera input
    camera_photo = st.camera_input("Take a photo")
    prompt = 'Identify the skin condition?'

# Choose input image
input_image = None
if camera_photo:
    input_image = Image.open(camera_photo)
elif uploaded_file:
    input_image = Image.open(uploaded_file)

with col2:
    if input_image:
        resized_image = input_image.resize((300, 300))
        st.image(resized_image, caption="Selected Image (300x300)", use_container_width=True)

        # Resize image for processing (512x512 pixels)
        max_size = (512, 512)
        processed_image = input_image.resize(max_size)

        with st.spinner("Processing..."):
            try:
                inputs = processor(
                    text=prompt,
                    images=processed_image,
                    return_tensors="pt",
                    padding="longest"
                ).to(device)

                default_max_tokens = 50  # Set a default value for max tokens
                with torch.no_grad():
                    outputs = model.generate(**inputs, max_new_tokens=default_max_tokens)

                decoded_output = processor.decode(outputs[0], skip_special_tokens=True)
                if prompt in decoded_output:
                    decoded_output = decoded_output.replace(prompt, "").strip()

                decoded_output = decoded_output.title()

                st.success("Analysis Complete!")
                st.write("**Model Output:**", decoded_output)

            except Exception as e:
                st.error(f"Error: {str(e)}")

st.markdown("---")
st.info("""
### Team: Mahasigma Berprestasi
- **Muhammad Karov Ardava Barus** ; 103052300001
- **Akmal Yaasir Fauzaan** ; 103052300008
- **Farand Diy Dat Mahazalfaa** ; 103052300050
- **Hauzan Rafi Attallah**; 103052330011
""")