Ali2206 commited on
Commit
338b5ef
·
verified ·
1 Parent(s): c02339c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -37
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  import sys
3
- import random
4
  import gradio as gr
5
  from multiprocessing import freeze_support
6
  import importlib
@@ -14,7 +13,6 @@ import txagent.txagent
14
  importlib.reload(txagent.txagent)
15
  from txagent.txagent import TxAgent
16
 
17
- # === Debug print
18
  print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
19
  print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
20
 
@@ -23,28 +21,27 @@ current_dir = os.path.abspath(os.path.dirname(__file__))
23
  os.environ["MKL_THREADING_LAYER"] = "GNU"
24
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
25
 
26
- # === Model config
27
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
28
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
29
  new_tool_files = {
30
  "new_tool": os.path.join(current_dir, "data", "new_tool.json")
31
  }
32
 
33
- # === Example prompts
34
  question_examples = [
35
  ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
36
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
37
  ]
38
 
39
- # === Helper: Add collapsible formatting
40
  def format_collapsible_response(content):
41
  return (
42
- f"<details style='border: 1px solid #ccc; padding: 8px; margin-top: 8px;'>"
43
- f"<summary style='font-weight: bold;'>Answer</summary>"
44
- f"<div style='margin-top: 8px;'>{content}</div></details>"
45
  )
46
 
47
- # === UI creation
48
  def create_ui(agent):
49
  with gr.Blocks() as demo:
50
  gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
@@ -61,30 +58,34 @@ def create_ui(agent):
61
  message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
62
  send_button = gr.Button("Send", variant="primary")
63
 
64
- # === Core handler (streaming generator)
65
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
66
- generator = agent.run_gradio_chat(
67
- message=message,
68
- history=history,
69
- temperature=temperature,
70
- max_new_tokens=max_new_tokens,
71
- max_token=max_tokens,
72
- call_agent=multi_agent,
73
- conversation=conversation,
74
- max_round=max_round
75
- )
76
-
77
- for update in generator:
78
- formatted_messages = []
79
- for m in update:
80
- role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant")
81
- content = m["content"] if isinstance(m, dict) else getattr(m, "content", "")
82
- if role == "assistant":
83
- content = format_collapsible_response(content)
84
- formatted_messages.append({"role": role, "content": content})
85
- yield formatted_messages
86
-
87
- # === Trigger handlers
 
 
 
 
88
  send_button.click(
89
  fn=handle_chat,
90
  inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
@@ -98,15 +99,13 @@ def create_ui(agent):
98
  )
99
 
100
  gr.Examples(examples=question_examples, inputs=message_input)
101
-
102
  gr.Markdown("**DISCLAIMER**: This demo is for research purposes only and does not provide medical advice.")
103
 
104
  return demo
105
 
106
- # === Startup
107
  if __name__ == "__main__":
108
  freeze_support()
109
-
110
  try:
111
  agent = TxAgent(
112
  model_name=model_name,
@@ -116,7 +115,7 @@ if __name__ == "__main__":
116
  enable_checker=True,
117
  step_rag_num=10,
118
  seed=100,
119
- additional_default_tools=[] # Removed DirectResponse/RequireClarification to avoid errors
120
  )
121
  agent.init_model()
122
 
@@ -127,5 +126,5 @@ if __name__ == "__main__":
127
  demo.launch(show_error=True)
128
 
129
  except Exception as e:
130
- print(f"❌ Application failed to start: {e}")
131
  raise
 
1
  import os
2
  import sys
 
3
  import gradio as gr
4
  from multiprocessing import freeze_support
5
  import importlib
 
13
  importlib.reload(txagent.txagent)
14
  from txagent.txagent import TxAgent
15
 
 
16
  print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
17
  print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
18
 
 
21
  os.environ["MKL_THREADING_LAYER"] = "GNU"
22
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
23
 
24
+ # === Configs
25
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
26
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
27
  new_tool_files = {
28
  "new_tool": os.path.join(current_dir, "data", "new_tool.json")
29
  }
30
 
 
31
  question_examples = [
32
  ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
33
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
34
  ]
35
 
36
+ # === Format output in collapsible panels
37
  def format_collapsible_response(content):
38
  return (
39
+ f"<details open style='border: 1px solid #ccc; padding: 8px; margin-top: 8px; border-radius: 6px;'>"
40
+ f"<summary style='font-weight: bold; font-size: 16px;'>Answer</summary>"
41
+ f"<div style='margin-top: 10px; line-height: 1.6;'>{content.strip()}</div></details>"
42
  )
43
 
44
+ # === UI
45
  def create_ui(agent):
46
  with gr.Blocks() as demo:
47
  gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
 
58
  message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
59
  send_button = gr.Button("Send", variant="primary")
60
 
61
+ # === Chat handler
62
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
63
+ try:
64
+ generator = agent.run_gradio_chat(
65
+ message=message,
66
+ history=history,
67
+ temperature=temperature,
68
+ max_new_tokens=max_new_tokens,
69
+ max_token=max_tokens,
70
+ call_agent=multi_agent,
71
+ conversation=conversation,
72
+ max_round=max_round
73
+ )
74
+
75
+ for update in generator:
76
+ formatted_messages = []
77
+ for m in update:
78
+ role = m["role"] if isinstance(m, dict) else getattr(m, "role", "assistant")
79
+ content = m["content"] if isinstance(m, dict) else getattr(m, "content", "")
80
+ if role == "assistant":
81
+ content = format_collapsible_response(content)
82
+ formatted_messages.append({"role": role, "content": content})
83
+ yield formatted_messages
84
+ except Exception as e:
85
+ print("⚠️ Error in chat handler:", e)
86
+ yield history + [{"role": "assistant", "content": f"An error occurred: {str(e)}"}]
87
+
88
+ # === Actions
89
  send_button.click(
90
  fn=handle_chat,
91
  inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
 
99
  )
100
 
101
  gr.Examples(examples=question_examples, inputs=message_input)
 
102
  gr.Markdown("**DISCLAIMER**: This demo is for research purposes only and does not provide medical advice.")
103
 
104
  return demo
105
 
106
+ # === Entry point
107
  if __name__ == "__main__":
108
  freeze_support()
 
109
  try:
110
  agent = TxAgent(
111
  model_name=model_name,
 
115
  enable_checker=True,
116
  step_rag_num=10,
117
  seed=100,
118
+ additional_default_tools=[] # Avoid broken tools
119
  )
120
  agent.init_model()
121
 
 
126
  demo.launch(show_error=True)
127
 
128
  except Exception as e:
129
+ print(f"❌ Failed to launch app: {e}")
130
  raise