Spaces:
Running
Running
File size: 9,970 Bytes
8e07496 869a62d 0aeb285 869a62d edc96fb 869a62d edc96fb 869a62d 97a468f edc96fb 97a468f edc96fb 97a468f edc96fb 869a62d edc96fb 869a62d edc96fb 869a62d edc96fb 97a468f edc96fb 97a468f edc96fb 97a468f edc96fb 97a468f edc96fb 869a62d edc96fb 869a62d edc96fb fbb58ea edc96fb 869a62d fbb58ea edc96fb 869a62d edc96fb 97a468f edc96fb 869a62d edc96fb 97a468f edc96fb 97a468f edc96fb 97a468f edc96fb 97a468f 869a62d edc96fb 869a62d edc96fb 3f3122f edc96fb 3f3122f edc96fb 97a468f edc96fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
# app.py
import torch
import gradio as gr
import os
import requests
import argparse
from libra.eval import libra_eval
from libra.eval.run_libra import load_model
DEFAULT_MODEL_PATH = "X-iZhang/libra-v1.0-7b"
def get_model_short_name(model_path: str) -> str:
"""
提取模型路径最后一个 '/' 之后的部分,作为在下拉菜单中显示的名字。
例如: "X-iZhang/libra-v1.0-7b" -> "libra-v1.0-7b"
"""
return model_path.rstrip("/").split("/")[-1]
# 全局/或在main里定义都行,这里示例放在外层
loaded_models = {} # {model_key: reuse_model_object}
def generate_radiology_description(
selected_model_name: str,
current_img_data,
prior_img_data,
use_no_prior: bool,
prompt: str,
temperature: float,
top_p: float,
num_beams: int,
max_new_tokens: int,
model_paths_dict: dict
) -> str:
"""
执行放射学报告推理:
1) 根据下拉选的模型名称 -> 找到实际 model_path
2) 确保用户选了 Current & Prior 图片
3) 调用 libra_eval
"""
real_model_path = model_paths_dict[selected_model_name]
# 若用户没选/没上传 Current Image,一定报错
if not current_img_data:
return "Error: Please select or upload the Current Image."
# 如果用户勾选了 without prior image,就把 prior_img_data 设为 current_img_data
if use_no_prior:
prior_img_data = current_img_data
else:
# 未勾选时,需要prior_img_data
if not prior_img_data:
return "Error: Please select or upload the Prior Image, or check 'Without Prior Image'."
# 若已经加载过该模型,则直接复用
if selected_model_name in loaded_models:
reuse_model = loaded_models[selected_model_name]
else:
reuse_model = load_model(real_model_path)
# 缓存起来
loaded_models[selected_model_name] = reuse_model
try:
output = libra_eval(
libra_model=reuse_model,
image_file=[current_img_data, prior_img_data],
query=prompt,
temperature=temperature,
top_p=top_p,
num_beams=num_beams,
length_penalty=1.0,
num_return_sequences=1,
conv_mode="libra_v1",
max_new_tokens=max_new_tokens
)
return output
except Exception as e:
return f"An error occurred during model inference: {str(e)}"
def main():
# ========== 获取当前脚本 (app.py) 所在目录 ==========
cur_dir = os.path.abspath(os.path.dirname(__file__))
# ========== 准备本地示例图片的绝对路径 ==========
# 向上回退两级: app.py -> serve/ -> libra/ -> Libra/ (同级)
example_curent_path = os.path.join(cur_dir, "examples", "curent.jpg")
example_curent_path = os.path.abspath(example_curent_path)
example_prior_path = os.path.join(cur_dir, "examples", "prior.jpg")
example_prior_path = os.path.abspath(example_prior_path)
# Gradio Examples 要求:对单个 gr.Image 而言,每个示例写成 ["本地文件路径"]
IMAGE_EXAMPLES = [
[example_curent_path],
[example_prior_path]
]
# ========== 命令行解析 (可选) ==========
parser = argparse.ArgumentParser(description="Demo for Radiology Image Description Generator (Local Examples)")
parser.add_argument(
"--model-path",
type=str,
default=DEFAULT_MODEL_PATH,
help="User-specified model path. If not provided, only default model is shown."
)
args = parser.parse_args()
cmd_model_path = args.model_path
# ========== 设置多模型下拉菜单 ==========
model_paths_dict = {}
user_key = get_model_short_name(cmd_model_path)
model_paths_dict[user_key] = cmd_model_path
# 如果用户传入的模型 != 默认模型,则加上默认模型选项
if cmd_model_path != DEFAULT_MODEL_PATH:
default_key = get_model_short_name(DEFAULT_MODEL_PATH)
model_paths_dict[default_key] = DEFAULT_MODEL_PATH
# (可选)若想预先加载模型,避免重复加载,可在此处:
# reuse_model = load_model(cmd_model_path)
# 然后在 generate_radiology_description 里改造传 reuse_model
# ========== 搭建 Gradio 界面 ==========
with gr.Blocks(title="Libra: Radiology Analysis with Direct URL Examples") as demo:
gr.Markdown("""
## 🩻 Libra: Leveraging Temporal Images for Biomedical Radiology Analysis
[Project Page](https://x-izhang.github.io/Libra_v1.0/) | [Paper](https://arxiv.org/abs/2411.19378) | [Code](https://github.com/X-iZhang/Libra) | [Model](https://huggingface.co/X-iZhang/libra-v1.0-7b)
**Requires a GPU to run effectively!**
""")
# 下拉模型选择
model_dropdown = gr.Dropdown(
label="Select Model",
choices=list(model_paths_dict.keys()),
value=user_key,
interactive=True
)
# 临床Prompt
prompt_input = gr.Textbox(
label="Clinical Prompt",
value="Provide a detailed description of the findings in the radiology image.",
lines=2,
info=(
"If clinical instructions are available, include them after the default prompt. "
"For example: “Provide a detailed description of the findings in the radiology image. "
"Following clinical context: Indication: chest pain, History: ...”"
)
)
# Current & Prior 画像
with gr.Row():
with gr.Column():
gr.Markdown("### Current Image")
current_img = gr.Image(
label="Drop Or Upload Current Image",
type="filepath",
interactive=True
)
gr.Examples(
examples=IMAGE_EXAMPLES,
inputs=current_img,
label="Example Current Images"
)
with gr.Column():
gr.Markdown("### Prior Image")
prior_img = gr.Image(
label="Drop Or Upload Prior Image",
type="filepath",
interactive=True
)
# 新增一个复选框,勾选后表示「Without Prior Image」
with gr.Row():
gr.Examples(
examples=IMAGE_EXAMPLES,
inputs=prior_img,
label="Example Prior Images"
)
without_prior_checkbox = gr.Checkbox(
label="Without Prior Image",
value=False,
info="If checked, the current image will be used as the dummy prior image in the Libra model."
)
with gr.Accordion("Parameters Settings", open=False):
temperature_slider = gr.Slider(
label="Temperature",
minimum=0.1, maximum=1.0, step=0.1, value=0.9
)
top_p_slider = gr.Slider(
label="Top P",
minimum=0.1, maximum=1.0, step=0.1, value=0.8
)
num_beams_slider = gr.Slider(
label="Number of Beams",
minimum=1, maximum=20, step=1, value=1
)
max_tokens_slider = gr.Slider(
label="Max output tokens",
minimum=10, maximum=4096, step=10, value=128
)
output_text = gr.Textbox(
label="Generated Findings Section",
lines=5
)
generate_button = gr.Button("Generate Findings Description")
generate_button.click(
fn=lambda model_name, c_img, p_img, no_prior, prompt, temp, top_p, beams, tokens: generate_radiology_description(
model_name,
c_img,
p_img,
no_prior,
prompt,
temp,
top_p,
beams,
tokens,
model_paths_dict
),
inputs=[
model_dropdown, # model_name
current_img, # c_img
prior_img, # p_img
without_prior_checkbox, # no_prior
prompt_input, # prompt
temperature_slider,# temp
top_p_slider, # top_p
num_beams_slider, # beams
max_tokens_slider # tokens
],
outputs=output_text
)
# 界面底部插入条款说明
gr.Markdown("""
### Terms of Use
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA.
By accessing or using this demo, you acknowledge and agree to the following:
- **Research & Non-Commercial Purposes**: This demo is provided solely for research and demonstration. It must not be used for commercial activities or profit-driven endeavors.
- **Not Medical Advice**: All generated content is experimental and must not replace professional medical judgment.
- **Content Moderationt**: While we apply basic safety checks, the system may still produce inaccurate or offensive outputs.
- **Responsible Use**: Do not use this demo for any illegal, harmful, hateful, violent, or sexual purposes.
By continuing to use this service, you confirm your acceptance of these terms. If you do not agree, please discontinue use immediately.
""")
demo.launch(share=True)
if __name__ == "__main__":
main()
# if __name__ == "__main__":
# demo.launch() |