import os import streamlit as st from PIL import Image import torch from torchvision import transforms, models # Set up environment variable for Groq API os.environ["GROQ_API_KEY"] = "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" # Load Pretrained Model for Organ Recognition @st.cache_resource def load_organ_model(): model = models.resnet18(pretrained=True) # ResNet18 pretrained model model.fc = torch.nn.Linear(model.fc.in_features, 4) # Modify for 4 classes model.eval() return model organ_model = load_organ_model() # Image Preprocessing def preprocess_image(image): transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) return transform(image).unsqueeze(0) # Organ Recognition Prediction def predict_organ(image): with torch.no_grad(): input_tensor = preprocess_image(image) output = organ_model(input_tensor) classes = ["Lungs", "Heart", "Spine", "Other"] # Example organ classes prediction = classes[output.argmax().item()] return prediction # Streamlit App st.title("X-ray Organ Recognition App") st.sidebar.title("Navigation") task = st.sidebar.radio("Select a task", ["Upload X-ray", "AI Insights"]) if task == "Upload X-ray": uploaded_file = st.file_uploader("Upload an X-ray image", type=["jpg", "png", "jpeg"]) if uploaded_file: image = Image.open(uploaded_file) st.image(image, caption="Uploaded X-ray", use_column_width=True) # Predict Organ st.subheader("Step 1: Identify the Organ in the X-ray") organ = predict_organ(image) st.write(f"Predicted Organ: **{organ}**") elif task == "AI Insights": st.subheader("Ask AI") user_input = st.text_area("Enter your query for AI insights") if user_input: st.write("AI insights will be generated here.") # Placeholder for AI logic