Update app.py
Browse files
app.py
CHANGED
@@ -2,38 +2,38 @@ import gradio as gr
|
|
2 |
import openai
|
3 |
from openai import OpenAI
|
4 |
import os
|
5 |
-
import spaces
|
6 |
import base64
|
7 |
|
8 |
# Set API key and organization ID from environment variables
|
9 |
api_key = os.environ.get("OPENAI_API_KEY")
|
10 |
base_url = os.environ.get("OPENAI_API_BASE")
|
11 |
client = OpenAI(api_key=api_key, base_url=base_url)
|
|
|
12 |
|
13 |
# Define the model to be used
|
14 |
MODEL = "gpt-4o"
|
15 |
|
16 |
-
def process_text(text_input):
|
17 |
if text_input:
|
18 |
completion = client.chat.completions.create(
|
19 |
model=MODEL,
|
20 |
messages=[
|
21 |
-
{"role": "system", "content": "You are a helpful
|
22 |
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
|
23 |
]
|
24 |
)
|
25 |
return "Assistant: " + completion.choices[0].message.content
|
26 |
|
27 |
-
def process_image(image_input):
|
28 |
if image_input is not None:
|
29 |
with open(image_input.name, "rb") as f:
|
30 |
base64_image = base64.b64encode(f.read()).decode("utf-8")
|
31 |
response = client.chat.completions.create(
|
32 |
model=MODEL,
|
33 |
messages=[
|
34 |
-
{"role": "system", "content": "You are a helpful
|
35 |
{"role": "user", "content": [
|
36 |
-
{"type": "text", "text": "Help me understand what
|
37 |
{"type": "image_url", "image_url": {
|
38 |
"url": f"data:image/png;base64,{base64_image}"}
|
39 |
}
|
@@ -43,11 +43,30 @@ def process_image(image_input):
|
|
43 |
)
|
44 |
return response.choices[0].message.content
|
45 |
|
46 |
-
def main(text_input="", image_input=None):
|
47 |
if text_input and image_input is None:
|
48 |
-
return process_text(text_input)
|
49 |
elif image_input is not None:
|
50 |
-
return process_image(image_input)
|
51 |
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import openai
|
3 |
from openai import OpenAI
|
4 |
import os
|
|
|
5 |
import base64
|
6 |
|
7 |
# Set API key and organization ID from environment variables
|
8 |
api_key = os.environ.get("OPENAI_API_KEY")
|
9 |
base_url = os.environ.get("OPENAI_API_BASE")
|
10 |
client = OpenAI(api_key=api_key, base_url=base_url)
|
11 |
+
sys_prompt = read('system_prompt.txt')
|
12 |
|
13 |
# Define the model to be used
|
14 |
MODEL = "gpt-4o"
|
15 |
|
16 |
+
def process_text(text_input,unit):
|
17 |
if text_input:
|
18 |
completion = client.chat.completions.create(
|
19 |
model=MODEL,
|
20 |
messages=[
|
21 |
+
{"role": "system", "content": f" You are a helpful {unit} doctor." + sys_prompt},
|
22 |
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
|
23 |
]
|
24 |
)
|
25 |
return "Assistant: " + completion.choices[0].message.content
|
26 |
|
27 |
+
def process_image(image_input,unit):
|
28 |
if image_input is not None:
|
29 |
with open(image_input.name, "rb") as f:
|
30 |
base64_image = base64.b64encode(f.read()).decode("utf-8")
|
31 |
response = client.chat.completions.create(
|
32 |
model=MODEL,
|
33 |
messages=[
|
34 |
+
{"role": "system", "content": f" You are a helpful {unit} doctor." + sys_prompt},
|
35 |
{"role": "user", "content": [
|
36 |
+
{"type": "text", "text": "Help me understand what this image"},
|
37 |
{"type": "image_url", "image_url": {
|
38 |
"url": f"data:image/png;base64,{base64_image}"}
|
39 |
}
|
|
|
43 |
)
|
44 |
return response.choices[0].message.content
|
45 |
|
46 |
+
def main(text_input="", image_input=None, unit=""):
|
47 |
if text_input and image_input is None:
|
48 |
+
return process_text(text_input,unit)
|
49 |
elif image_input is not None:
|
50 |
+
return process_image(image_input,unit)
|
51 |
|
52 |
+
with gr.Blocks() as iface:
|
53 |
+
with gr.Accordion(""):
|
54 |
+
gr.Markdown("""医学报告助手 GPT-4o""")
|
55 |
+
unit = gr.Dropdown(label="🩺科室", value='内科', elem_id="units",
|
56 |
+
choices=["内科", "外科", "妇产科", "男科", "儿科", \
|
57 |
+
"五官科", "肿瘤科", "皮肤性病科", "中医科", "传染科", "精神心理科", \
|
58 |
+
"整形美容科", "营养科", "生殖中心", "麻醉医学科", "医学影像科", \
|
59 |
+
"急诊科", "检验科"])
|
60 |
+
with gr.Row():
|
61 |
+
output_box = gr.Textbox(label="Output") # Create an output textbox
|
62 |
+
with gr.Row():
|
63 |
+
image_btn = gr.Image(type="filepath", label="上传图片") # Create an image upload button
|
64 |
+
text_input = gr.Textbox(label="输入") # Create a text input box
|
65 |
+
with gr.Row():
|
66 |
+
submit_btn = gr.Button("🚀 确认") # Create a submit button
|
67 |
+
clear_btn = gr.ClearButton(output_box, value="🗑️ 清空") # Create a clear button
|
68 |
+
|
69 |
+
# Set up the event listeners
|
70 |
+
submit_btn.click(main, inputs=[text_input, image_btn, unit], outputs=output_box)
|
71 |
+
|
72 |
+
iface.launch() # Launch the Gradio interface
|