File size: 5,977 Bytes
1ac84a9 2a1757f 1ac84a9 56c2a68 1ac84a9 572f86e 66ec133 2a1757f 1ac84a9 999c136 f6fc806 caf315a 572f86e f6fc806 d98c79b 69f11a6 9922e66 2a1757f 7dd107c 999c136 d951a8a 999c136 2a1757f d951a8a 999c136 69f11a6 999c136 54899ff 2a1757f 3c671c6 d951a8a 1ac84a9 f6fc806 1ac84a9 3c671c6 54899ff 4742bbe aedbb72 c6add41 0a54041 3c671c6 5ae861e 84a19ec 24cfdc1 84a19ec 1ac84a9 999c136 54899ff 2a1757f 84a19ec 5ae861e 1ac84a9 d951a8a 7f990d9 f6fc806 7f990d9 f6fc806 84a19ec 7f990d9 1ac84a9 84a19ec 1ac84a9 54899ff 4742bbe aedbb72 c6add41 0a54041 1ac84a9 4089b9f 1ac84a9 f14ae84 1ac84a9 f14ae84 1ac84a9 caf315a f05ed48 f6fc806 caf315a 572f86e f05ed48 d951a8a f05ed48 572f86e aedbb72 f05ed48 572f86e f05ed48 7b20aba f6fc806 73b2985 6f4788a f05ed48 6f4788a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import gradio as gr
import openai
from openai import OpenAI
import google.generativeai as genai
import os
import io
import base64
# Set API key
#base_url = os.environ.get("OPENAI_API_BASE")
# Define the model to be used
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Medster - Medical Diagnostic Assistant</h1>
<p>An AI tool that helps you analyze symptoms and test reports. </p>
<p>🔎 Select the department you need to consult, and enter the symptom description or physical examination information in the input box; you can also upload the test report image in the picture box. </p>
<p>🦕 Please note that the generated information may be inaccurate and does not have any actual reference value. Please contact a professional doctor if necessary. </p>
</div>
'''
css = """
h1 {
text-align: center;
display: block;
}
footer {
display:none !important
}
"""
LICENSE = '[Medster](https://huggingface.co/spaces/vilarin/Medster)'
def read(filename):
with open(filename) as f:
data = f.read()
return data
SYS_PROMPT = read('system_prompt.txt')
def endpoints(api_key):
if api_key is not None:
if api_key.startswith('sk-'):
model_name = "gpt-4o"
endpoint = 'OPENAI'
return model_name, endpoint
else:
model_name = "models/gemini-1.5-pro-latest"
endpoint = 'GOOGLE'
return model_name, endpoint
def process_text(api_key, text_input, unit):
model_name, endpoint = endpoints(api_key)
if text_input and endpoint == 'OPENAI':
client = OpenAI(api_key=api_key)
completion = client.chat.completions.create(
model=model_name,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
]
)
return completion.choices[0].message.content
elif text_input and endpoint == "GOOGLE":
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=model_name)
prompt = f" You are a experienced {unit} doctor." + SYS_PROMPT + f"Could you solve {text_input}?"
response = model.generate_content(prompt)
return response.text
return ""
def encode_image_to_base64(image_input):
buffered = io.BytesIO()
image_input.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def process_image(api_key, image_input, unit):
model_name, endpoint = endpoints(api_key)
if image_input is not None and endpoint == 'OPENAI':
client = OpenAI(api_key=api_key)
#with open(image_input.name, "rb") as f:
# base64_image = base64.b64encode(f.read()).decode("utf-8")
base64_image = encode_image_to_base64(image_input)
response = client.chat.completions.create(
model=model_name,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
{"role": "user", "content": [
{"type": "text", "text": "Help me understand what is in this picture and analysis."},
{"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail":"low"}
}
]}
],
temperature=0.0,
max_tokens=1024,
)
return response.choices[0].message.content
elif image_input is not None and endpoint == "GOOGLE":
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name=model_name)
prompt = f" You are a experienced {unit} doctor." + SYS_PROMPT + "Help me understand what is in this picture and analysis."
response = model.generate_content([prompt, image_input],request_options={"timeout": 60})
return response.text
def main(unit="", api_key="", text_input="", image_input=None):
if text_input and image_input is None:
return process_text(api_key, text_input, unit)
elif image_input is not None:
return process_image(api_key, image_input, unit)
with gr.Blocks(theme='shivi/calm_seafoam', css=css, title="Medster - Medical Diagnostic Assistant") as iface:
with gr.Accordion(""):
gr.Markdown(DESCRIPTION)
unit = gr.Dropdown(label="🩺Department", value='Traditional Medicine', elem_id="units",
choices=["Traditional Medicine", "Internal Medicine", "Surgery", "Obstetrics and Gynecology", "Pediatrics", \
"Orthodontics", "Andrology", "Dermatology and Venereology", "Infectious Diseases", "Psychiatry", \
"Plastic Surgery Department", "Nutrition Department", "Reproductive Center", "Anesthesiology Department", "Medical Imaging Department", \
"Orthopedics", "Oncology", "Emergency Department", "Laboratory Department"])
with gr.Row():
output_box = gr.Markdown(label="Diagnosis") # Create an output textbox
with gr.Row():
api_key = gr.Textbox(label="API Key", type='password') # Input API key
with gr.Row():
image_input = gr.Image(type="pil", label="Upload Image") # Create an image upload button
text_input = gr.Textbox(label="Input Text") # Create a text input box
with gr.Row():
submit_btn = gr.Button("🚀 Send") # Create a submit button
clear_btn = gr.ClearButton(output_box, value="🗑️ Clear") # Create a clear button
# Set up the event listeners
submit_btn.click(main, inputs=[unit, api_key, text_input, image_input], outputs=output_box)
gr.Markdown(LICENSE)
#gr.close_all()
iface.queue().launch(show_api=False) # Launch the Gradio interface |