File size: 2,373 Bytes
8a359f9
4265985
cf8fb5c
8a359f9
4c0ed57
cf8fb5c
 
99d610b
cf8fb5c
99d610b
cf8fb5c
 
99d610b
cf8fb5c
 
 
 
 
 
 
 
4c0ed57
 
cf8fb5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99d610b
cf8fb5c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import streamlit as st
from transformers import pipeline, AutoImageProcessor, AutoModelForImageClassification
from PIL import Image

def load_pipeline():
    """Load the Hugging Face pipeline for image classification."""
    try:
        return pipeline("image-classification", model="dima806/pneumonia_chest_xray_image_detection")
    except Exception as e:
        st.error(f"Error loading pipeline: {e}")
        return None

def classify_image_with_pipeline(pipe, image):
    """Classify an image using the pipeline."""
    try:
        results = pipe(image)
        return results
    except Exception as e:
        st.error(f"Error classifying image: {e}")
        return None

# Streamlit App
st.title("Pneumonia Chest X-ray Image Detection")
st.markdown(
    """
    This app detects signs of pneumonia in chest X-ray images using a pre-trained Hugging Face model.
    """
)

# File uploader
uploaded_file = st.file_uploader("Upload a chest X-ray image", type=["jpg", "jpeg", "png"])

if uploaded_file:
    image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Chest X-ray", use_column_width=True)

    # Load the model pipeline
    pipe = load_pipeline()

    if pipe:
        st.write("Classifying the image...")
        results = classify_image_with_pipeline(pipe, image)

        if results:
            st.write("### Classification Results:")
            for result in results:
                st.write(f"**Label:** {result['label']} | **Score:** {result['score']:.4f}")

# Optional: Add Groq API integration if applicable
if os.getenv("GROQ_API_KEY"):
    from groq import Groq

    client = Groq(api_key=os.environ.get("GROQ_API_KEY"))

    st.sidebar.markdown("### Groq API Integration")
    question = st.sidebar.text_input("Ask a question about pneumonia or X-ray diagnosis:")

    if question:
        try:
            chat_completion = client.chat.completions.create(
                messages=[
                    {
                        "role": "user",
                        "content": question,
                    }
                ],
                model="llama-3.3-70b-versatile",
            )

            st.sidebar.write("**Groq API Response:**")
            st.sidebar.write(chat_completion.choices[0].message.content)
        except Exception as e:
            st.sidebar.error(f"Error using Groq API: {e}")