Ali2206 commited on
Commit
0ec5bb2
·
verified ·
1 Parent(s): bb17715

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -122
app.py CHANGED
@@ -1,126 +1,10 @@
1
  import gradio as gr
2
- import logging
3
 
4
- # Delay heavy imports until later to avoid multiprocessing conflicts
5
- tx_app = None # Global agent instance
6
 
7
- logging.basicConfig(level=logging.INFO)
8
- logger = logging.getLogger(__name__)
 
9
 
10
- # ========== Dummy Response (will be replaced by real agent later) ==========
11
- def respond(message, chat_history, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round):
12
- global tx_app
13
- if tx_app is None:
14
- return chat_history + [("", "⚠️ Model is still loading. Please wait a few seconds and try again.")]
15
-
16
- try:
17
- if not isinstance(message, str) or len(message.strip()) < 10:
18
- return chat_history + [("", "Please enter a longer message.")]
19
-
20
- # Convert chat format if needed
21
- if chat_history and isinstance(chat_history[0], dict):
22
- chat_history = [(h["role"], h["content"]) for h in chat_history if "role" in h and "content" in h]
23
-
24
- response = ""
25
- for chunk in tx_app.run_gradio_chat(
26
- message=message.strip(),
27
- history=chat_history,
28
- temperature=temperature,
29
- max_new_tokens=max_new_tokens,
30
- max_token=max_tokens,
31
- call_agent=multi_agent,
32
- conversation=conversation_state,
33
- max_round=max_round,
34
- seed=42,
35
- ):
36
- if isinstance(chunk, dict):
37
- response += chunk.get("content", "")
38
- elif isinstance(chunk, str):
39
- response += chunk
40
- else:
41
- response += str(chunk)
42
-
43
- yield chat_history + [("user", message), ("assistant", response)]
44
-
45
- except Exception as e:
46
- logger.error(f"Respond error: {e}")
47
- yield chat_history + [("", f"⚠️ Error: {e}")]
48
-
49
- # ========== Gradio UI ==========
50
- with gr.Blocks(title="TxAgent Biomedical Assistant") as app:
51
- gr.Markdown("# 🧠 TxAgent Biomedical Assistant")
52
-
53
- chatbot = gr.Chatbot(label="Conversation", height=600, type="messages")
54
-
55
- msg = gr.Textbox(
56
- label="Your medical query",
57
- placeholder="Enter your biomedical question...",
58
- lines=3
59
- )
60
-
61
- with gr.Row():
62
- temp = gr.Slider(0, 1, value=0.3, label="Temperature")
63
- max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
64
- max_tokens = gr.Slider(128, 81920, value=81920, label="Max Total Tokens")
65
- max_rounds = gr.Slider(1, 30, value=10, label="Max Rounds")
66
- multi_agent = gr.Checkbox(label="Multi-Agent Mode")
67
-
68
- submit = gr.Button("Submit")
69
- clear = gr.Button("Clear")
70
- conversation_state = gr.State([])
71
-
72
- submit.click(
73
- respond,
74
- [msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, conversation_state, max_rounds],
75
- chatbot
76
- )
77
- clear.click(lambda: [], None, chatbot)
78
- msg.submit(
79
- respond,
80
- [msg, chatbot, temp, max_new_tokens, max_tokens, multi_agent, conversation_state, max_rounds],
81
- chatbot
82
- )
83
-
84
- # === Hidden trigger to load model safely on app start ===
85
- init_button = gr.Button(visible=False)
86
-
87
- def load_model():
88
- global tx_app
89
- import torch
90
- from txagent import TxAgent
91
- from importlib.resources import files
92
-
93
- logger.info("🔧 Loading full TxAgent model...")
94
-
95
- tool_files = {
96
- "opentarget": str(files('tooluniverse.data').joinpath('opentarget_tools.json')),
97
- "fda_drug_label": str(files('tooluniverse.data').joinpath('fda_drug_labeling_tools.json')),
98
- "special_tools": str(files('tooluniverse.data').joinpath('special_tools.json')),
99
- "monarch": str(files('tooluniverse.data').joinpath('monarch_tools.json'))
100
- }
101
-
102
- tx_app = TxAgent(
103
- model_name="mims-harvard/TxAgent-T1-Llama-3.1-8B",
104
- rag_model_name="mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B",
105
- tool_files_dict=tool_files,
106
- enable_finish=True,
107
- enable_rag=True,
108
- enable_summary=False,
109
- init_rag_num=0,
110
- step_rag_num=10,
111
- summary_mode='step',
112
- summary_skip_last_k=0,
113
- summary_context_length=None,
114
- force_finish=True,
115
- avoid_repeat=True,
116
- seed=42,
117
- enable_checker=True,
118
- enable_chat=False,
119
- additional_default_tools=["DirectResponse", "RequireClarification"]
120
- )
121
-
122
- tx_app.init_model()
123
- logger.info("✅ Model initialized successfully")
124
- return gr.update(visible=False)
125
-
126
- app.load(init_button.click(fn=load_model))
 
1
  import gradio as gr
 
2
 
3
+ def echo(msg, history):
4
+ return history + [(msg, f"Echo: {msg}")]
5
 
6
+ with gr.Blocks() as app:
7
+ chatbot = gr.Chatbot(label="EchoBot", type="messages")
8
+ msg = gr.Textbox(label="Type a message")
9
 
10
+ msg.submit(echo, [msg, chatbot], chatbot)