File size: 4,571 Bytes
1ac84a9
 
 
 
56c2a68
1ac84a9
 
572f86e
66ec133
572f86e
66ec133
1ac84a9
 
f6fc806
1ac84a9
f6fc806
 
 
 
 
572f86e
 
 
 
f6fc806
 
 
d98c79b
 
 
 
 
 
 
 
 
 
 
 
572f86e
 
 
 
 
 
fa530b9
9180e08
1ac84a9
3c671c6
1ac84a9
 
f6fc806
1ac84a9
 
 
3c671c6
 
 
5ae861e
84a19ec
24cfdc1
84a19ec
 
1ac84a9
723d90c
1ac84a9
84a19ec
 
5ae861e
1ac84a9
 
7f990d9
f6fc806
7f990d9
f6fc806
84a19ec
 
 
 
7f990d9
 
 
1ac84a9
84a19ec
1ac84a9
 
3c671c6
1ac84a9
f05ed48
1ac84a9
f05ed48
1ac84a9
f05ed48
1ac84a9
fbdf06c
f05ed48
f6fc806
572f86e
 
 
 
 
 
 
f05ed48
572f86e
f05ed48
572f86e
 
f05ed48
572f86e
 
f05ed48
 
572f86e
f6fc806
 
73b2985
6f4788a
f05ed48
6f4788a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import gradio as gr
import openai
from openai import OpenAI
import os
import io
import base64

# Set API key
#base_url = os.environ.get("OPENAI_API_BASE")
api_key = ""
client = OpenAI(api_key=api_key)

# Define the model to be used
MODEL = os.environ.get("MODEL")

SYS_PROMPT = read('system_prompt.txt')


DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Medster - Diagnostic Assistant</h1>
<p>An AI tool that helps you analyze symptoms and test reports.  </p>
<p>🔎 Select the department you need to consult, and enter the symptom description or physical examination information in the input box; you can also upload the test report image in the picture box.  </p>
<p>🦕 Please note that the generated information may be inaccurate and does not have any actual reference value. Please contact a professional doctor if necessary.  </p>
</div>
'''


css = """
h1 {
    text-align: center;
    display: block;
}
footer {
    display:none !important
}
"""


LICENSE = 'MODEL: ' + MODEL + ' LOADED'

def read(filename):
    with open(filename) as f:
        data = f.read()
    return data
    
def process_text(text_input, unit):
    if text_input:
        completion = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
                {"role": "user", "content": f"Hello! Could you solve {text_input}?"}
            ]
        )
        return completion.choices[0].message.content
    return ""

def encode_image_to_base64(image_input):
    buffered = io.BytesIO()
    image_input.save(buffered, format="JPEG")
    img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
    return img_str

def process_image(image_input, unit):
    if image_input is not None:
        #with open(image_input.name, "rb") as f:
        #    base64_image = base64.b64encode(f.read()).decode("utf-8")
        base64_image = encode_image_to_base64(image_input)
        response = client.chat.completions.create(
            model=MODEL,
            messages=[
                {"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
                {"role": "user", "content": [
                    {"type": "text", "text": "Help me understand what is in this picture and analysis."},
                    {"type": "image_url", 
                     "image_url": {
                        "url": f"data:image/jpeg;base64,{base64_image}",
                        "detail":"low"}
                    }
                ]}
            ],
            temperature=0.0,
            max_tokens=1024,
        )
        return response.choices[0].message.content


def main(text_input="", image_input=None, unit=""):
    if text_input and image_input is None:
        return process_text(text_input,unit)
    elif image_input is not None:
        return process_image(image_input,unit)

with gr.Blocks(theme='shivi/calm_seafoam', css=css, title="诊疗助手Alpha") as iface:
    with gr.Accordion(""):
        gr.Markdown(DESCRIPTION)
        unit = gr.Dropdown(label="🩺Department", value='Traditional Chinese Medicine', elem_id="units",
                             choices=["Traditional Medicine", "Internal Medicine", "Surgery", "Obstetrics and Gynecology", "Pediatrics", \
                                      "Orthodontics", "Andrology", "Dermatology and Venereology", "Infectious Diseases", "Psychiatry", \
                                         "Plastic Surgery Department", "Nutrition Department", "Reproductive Center", "Anesthesiology Department", "Medical Imaging Department", \
                                             "Orthopedics", "Oncology", "Emergency Department", "Laboratory Department"])
    with gr.Row():
        output_box = gr.Markdown(label="Diagnosis")  # Create an output textbox
    with gr.Row():
        api_key = gr.Textbox(label="OpenAI API Key") # Input API key
    with gr.Row():
        image_input = gr.Image(type="pil", label="Upload Image")  # Create an image upload button
        text_input = gr.Textbox(label="Submit")  # Create a text input box
    with gr.Row():
        submit_btn = gr.Button("🚀 Send")  # Create a submit button
        clear_btn = gr.ClearButton(output_box, value="🗑️ Clear") # Create a clear button

    # Set up the event listeners
    submit_btn.click(main, inputs=[apiKey, text_input, image_input, unit], outputs=output_box)

    gr.Markdown(LICENSE)
    
#gr.close_all()

iface.queue().launch(show_api=False)  # Launch the Gradio interface