Spaces:
Runtime error
Runtime error
File size: 3,741 Bytes
f9f1b17 b5ebf36 36b2997 f9f1b17 b5ebf36 f9f1b17 b5ebf36 f9f1b17 b5ebf36 f9f1b17 b5ebf36 f9f1b17 36b2997 f9f1b17 36b2997 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import streamlit as st
import pickle
import pandas as pd
import torch
from PIL import Image
import numpy as np
from main import predict_caption, CLIPModel, get_text_embeddings
import openai
import base64
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import docx
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from io import BytesIO
# Set up OpenAI API
openai.api_key = "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"
# Custom CSS for the page
st.markdown(
"""
<style>
body {
background-color: transparent;
}
.container {
display: flex;
justify-content: center;
align-items: center;
background-color: rgba(255, 255, 255, 0.7);
border-radius: 15px;
padding: 20px;
}
</style>
""",
unsafe_allow_html=True,
)
device = torch.device("cpu")
testing_df = pd.read_csv("testing_df.csv")
model = CLIPModel().to(device)
model.load_state_dict(torch.load("weights.pt", map_location=torch.device('cpu')))
text_embeddings = torch.load('saved_text_embeddings.pt', map_location=device)
def show_predicted_caption(image):
matches = predict_caption(
image, model, text_embeddings, testing_df["caption"]
)[0]
return matches
def generate_radiology_report(prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=800,
n=1,
stop=None,
temperature=0.9,
)
return response.choices[0].text.strip()
def chatbot_response(prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=500,
n=1,
stop=None,
temperature=0.8,
)
return response.choices[0].text.strip()
def create_pdf(caption, buffer):
c = canvas.Canvas(buffer, pagesize=letter)
c.drawString(50, 750, caption)
c.save()
buffer.seek(0)
return buffer
st.title("RadiXGPT: An Evolution of machine doctors towrads Radiology")
st.write("Upload Scan to get Radiological Report:")
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
st.write("")
if st.button("Generate Caption"):
with st.spinner("Generating caption..."):
image_np = np.array(image)
caption = show_predicted_caption(image_np)
st.success(f"Caption: {caption}")
# Add the OpenAI API call here and generate the radiology report
radiology_report = f"Write Complete Radiology Report for this: {caption}"
container = st.beta_container()
with container:
st.header("Radiology Report")
st.write(radiology_report)
st.markdown(download_link(save_as_docx(radiology_report, "radiology_report.docx"), "radiology_report.docx", "Download Report as DOCX"), unsafe_allow_html=True)
# Add the chatbot functionality
st.header("1-to-1 Consultation")
st.write("Ask any questions you have about the radiology report:")
user_input = st.text_input("Enter your question:")
if user_input:
if user_input.lower() == "thank you":
st.write("You're welcome! If you have any more questions, feel free to ask.")
else:
# Add the OpenAI API call here and generate the answer to the user's question
answer = f"Answer to the user's question based on the generated radiology report: {user_input}"
st.write(answer)
|