File size: 4,229 Bytes
1ac84a9 56c2a68 1ac84a9 d0a1d12 66ec133 1ac84a9 f6fc806 1ac84a9 fa530b9 f6fc806 d98c79b fa530b9 9180e08 1ac84a9 3c671c6 1ac84a9 f6fc806 1ac84a9 3c671c6 5ae861e 84a19ec 24cfdc1 84a19ec 1ac84a9 723d90c 1ac84a9 84a19ec 5ae861e 1ac84a9 7f990d9 f6fc806 7f990d9 f6fc806 84a19ec 7f990d9 1ac84a9 84a19ec 1ac84a9 3c671c6 1ac84a9 f05ed48 1ac84a9 f05ed48 1ac84a9 f05ed48 1ac84a9 7c4f4a3 f05ed48 f6fc806 d98c79b f05ed48 d98c79b f05ed48 20eb2c5 f05ed48 bf30b16 f05ed48 5ae861e f6fc806 73b2985 6f4788a f05ed48 6f4788a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import gradio as gr
import openai
from openai import OpenAI
import os
import io
import base64
# Set API key and organization ID from environment variables
api_key = os.environ.get("OPENAI_API_KEY")
#base_url = os.environ.get("OPENAI_API_BASE")
client = OpenAI(api_key=api_key)
# Define the model to be used
MODEL = os.environ.get("MODEL")
def read(filename):
with open(filename) as f:
data = f.read()
return data
SYS_PROMPT = read('system_prompt.txt')
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">诊疗助手</h1>
<p>一个帮助您分析症状和检验报告的AI工具。</p>
<p>🔎 选择您需要咨询的科室,在输入框中输入症状描述或者体检信息等;您也可以在图片框中上传检测报告图。</p>
<p>🦕 请注意生成信息可能不准确,且不具备任何实际参考价值,如有需要请联系专业医生。</p>
</div>
'''
css = """
h1 {
text-align: center;
display: block;
}
footer {
display:none !important
}
"""
LICENSE = '采用 ' + MODEL + ' 模型'
def process_text(text_input, unit):
if text_input:
completion = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
]
)
return completion.choices[0].message.content
return ""
def encode_image_to_base64(image_input):
buffered = io.BytesIO()
image_input.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def process_image(image_input, unit):
if image_input is not None:
#with open(image_input.name, "rb") as f:
# base64_image = base64.b64encode(f.read()).decode("utf-8")
base64_image = encode_image_to_base64(image_input)
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": f" You are a experienced {unit} doctor." + SYS_PROMPT},
{"role": "user", "content": [
{"type": "text", "text": "Help me understand what is in this picture and analysis."},
{"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail":"low"}
}
]}
],
temperature=0.0,
max_tokens=1024,
)
return response.choices[0].message.content
def main(text_input="", image_input=None, unit=""):
if text_input and image_input is None:
return process_text(text_input,unit)
elif image_input is not None:
return process_image(image_input,unit)
with gr.Blocks(theme='shivi/calm_seafoam') as iface:
with gr.Accordion(""):
gr.Markdown(DESCRIPTION)
unit = gr.Dropdown(label="🩺科室", value='中医科', elem_id="units",
choices=["中医科", "内科", "外科", "妇产科", "儿科", \
"五官科", "男科", "皮肤性病科", "传染科", "精神心理科", \
"整形美容科", "营养科", "生殖中心", "麻醉医学科", "医学影像科", \
"骨科", "肿瘤科", "急诊科", "检验科"])
with gr.Row():
output_box = gr.Markdown(label="分析") # Create an output textbox
with gr.Row():
image_input = gr.Image(type="pil", label="上传图片") # Create an image upload button
text_input = gr.Textbox(label="输入") # Create a text input box
with gr.Row():
submit_btn = gr.Button("🚀 确认") # Create a submit button
clear_btn = gr.ClearButton(output_box, value="🗑️ 清空") # Create a clear button
# Set up the event listeners
submit_btn.click(main, inputs=[text_input, image_input, unit], outputs=output_box)
gr.Markdown(LICENSE)
#gr.close_all()
iface.queue().launch(show_api=False) # Launch the Gradio interface |