Ali2206 commited on
Commit
537f975
·
verified ·
1 Parent(s): ea924ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -35
app.py CHANGED
@@ -6,63 +6,57 @@ from multiprocessing import freeze_support
6
  import importlib
7
  import inspect
8
 
9
- # === Path fix
10
- sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "src"))
11
 
12
- # === Reload to avoid stale module
13
  import txagent.txagent
14
  importlib.reload(txagent.txagent)
15
  from txagent.txagent import TxAgent
16
- from gradio import ChatMessage
17
 
18
- # === Debug
19
  print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
20
  print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
21
 
22
- # === Env vars
23
- current_dir = os.path.dirname(os.path.abspath(__file__))
24
  os.environ["MKL_THREADING_LAYER"] = "GNU"
25
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
26
 
27
- # === UI text
28
- DESCRIPTION = '''
29
- <h1 style="text-align: center;">TxAgent: AI for Therapeutic Reasoning</h1>
30
- '''
31
- INTRO = "Ask biomedical or therapeutic questions. Results are powered by tools and reasoning."
32
- LICENSE = "DISCLAIMER: THIS WEBSITE DOES NOT PROVIDE MEDICAL ADVICE."
33
-
34
- # === Model & tool config
35
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
36
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
37
  new_tool_files = {
38
  "new_tool": os.path.join(current_dir, "data", "new_tool.json")
39
  }
40
 
 
41
  question_examples = [
42
  ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
43
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
44
  ]
45
 
46
- # === Gradio UI
47
  def create_ui(agent):
48
  with gr.Blocks() as demo:
49
- gr.Markdown(DESCRIPTION)
50
- gr.Markdown(INTRO)
51
 
52
- temperature = gr.Slider(0, 1, step=0.1, value=0.3, label="Temperature")
53
- max_new_tokens = gr.Slider(128, 4096, step=1, value=1024, label="Max New Tokens")
54
- max_tokens = gr.Slider(128, 32000, step=1, value=8192, label="Max Total Tokens")
55
- max_round = gr.Slider(1, 50, step=1, value=30, label="Max Rounds")
56
  multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False)
57
  conversation_state = gr.State([])
58
 
59
  chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages")
60
- message_input = gr.Textbox(placeholder="Ask a biomedical question...", show_label=False)
61
- send_btn = gr.Button("Send", variant="primary")
62
 
 
63
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
64
- # Ensure response is a generator that yields list of {role, content} dictionaries
65
- return agent.run_gradio_chat(
66
  message=message,
67
  history=history,
68
  temperature=temperature,
@@ -72,8 +66,18 @@ def create_ui(agent):
72
  conversation=conversation,
73
  max_round=max_round
74
  )
75
-
76
- send_btn.click(
 
 
 
 
 
 
 
 
 
 
77
  fn=handle_chat,
78
  inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
79
  outputs=chatbot
@@ -85,18 +89,16 @@ def create_ui(agent):
85
  outputs=chatbot
86
  )
87
 
88
- gr.Examples(
89
- examples=question_examples,
90
- inputs=message_input
91
- )
92
 
93
- gr.Markdown(LICENSE)
94
 
95
  return demo
96
 
97
- # === App startup
98
  if __name__ == "__main__":
99
  freeze_support()
 
100
  try:
101
  agent = TxAgent(
102
  model_name=model_name,
@@ -117,5 +119,5 @@ if __name__ == "__main__":
117
  demo.launch(show_error=True)
118
 
119
  except Exception as e:
120
- print(f"🚨 Startup error: {e}")
121
  raise
 
6
  import importlib
7
  import inspect
8
 
9
+ # === Fix path to include src/txagent
10
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
11
 
12
+ # === Import and reload to ensure correct file
13
  import txagent.txagent
14
  importlib.reload(txagent.txagent)
15
  from txagent.txagent import TxAgent
 
16
 
17
+ # === Debug print
18
  print(">>> TxAgent loaded from:", inspect.getfile(TxAgent))
19
  print(">>> TxAgent has run_gradio_chat:", hasattr(TxAgent, "run_gradio_chat"))
20
 
21
+ # === Environment
22
+ current_dir = os.path.abspath(os.path.dirname(__file__))
23
  os.environ["MKL_THREADING_LAYER"] = "GNU"
24
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
25
 
26
+ # === Model config
 
 
 
 
 
 
 
27
  model_name = "mims-harvard/TxAgent-T1-Llama-3.1-8B"
28
  rag_model_name = "mims-harvard/ToolRAG-T1-GTE-Qwen2-1.5B"
29
  new_tool_files = {
30
  "new_tool": os.path.join(current_dir, "data", "new_tool.json")
31
  }
32
 
33
+ # === Example prompts
34
  question_examples = [
35
  ["Given a patient with WHIM syndrome on prophylactic antibiotics, is it advisable to co-administer Xolremdi with fluconazole?"],
36
  ["What treatment options exist for HER2+ breast cancer resistant to trastuzumab?"]
37
  ]
38
 
39
+ # === UI creation
40
  def create_ui(agent):
41
  with gr.Blocks() as demo:
42
+ gr.Markdown("<h1 style='text-align: center;'>TxAgent: Therapeutic Reasoning</h1>")
43
+ gr.Markdown("Ask biomedical or therapeutic questions. Powered by step-by-step reasoning and tools.")
44
 
45
+ temperature = gr.Slider(0, 1, value=0.3, label="Temperature")
46
+ max_new_tokens = gr.Slider(128, 4096, value=1024, label="Max New Tokens")
47
+ max_tokens = gr.Slider(128, 32000, value=8192, label="Max Total Tokens")
48
+ max_round = gr.Slider(1, 50, value=30, label="Max Rounds")
49
  multi_agent = gr.Checkbox(label="Enable Multi-agent Reasoning", value=False)
50
  conversation_state = gr.State([])
51
 
52
  chatbot = gr.Chatbot(label="TxAgent", height=600, type="messages")
53
+ message_input = gr.Textbox(placeholder="Ask your biomedical question...", show_label=False)
54
+ send_button = gr.Button("Send", variant="primary")
55
 
56
+ # === Core handler (streaming generator)
57
  def handle_chat(message, history, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
58
+ # Must yield a list of {"role": ..., "content": ...} dicts
59
+ generator = agent.run_gradio_chat(
60
  message=message,
61
  history=history,
62
  temperature=temperature,
 
66
  conversation=conversation,
67
  max_round=max_round
68
  )
69
+ for update in generator:
70
+ # Convert to list of dicts if not already
71
+ formatted = [
72
+ {"role": m["role"], "content": m["content"]}
73
+ if isinstance(m, dict)
74
+ else {"role": m.role, "content": m.content}
75
+ for m in update
76
+ ]
77
+ yield formatted
78
+
79
+ # === Trigger handlers
80
+ send_button.click(
81
  fn=handle_chat,
82
  inputs=[message_input, chatbot, temperature, max_new_tokens, max_tokens, multi_agent, conversation_state, max_round],
83
  outputs=chatbot
 
89
  outputs=chatbot
90
  )
91
 
92
+ gr.Examples(examples=question_examples, inputs=message_input)
 
 
 
93
 
94
+ gr.Markdown("**DISCLAIMER**: This demo is for research purposes only and does not provide medical advice.")
95
 
96
  return demo
97
 
98
+ # === Startup
99
  if __name__ == "__main__":
100
  freeze_support()
101
+
102
  try:
103
  agent = TxAgent(
104
  model_name=model_name,
 
119
  demo.launch(show_error=True)
120
 
121
  except Exception as e:
122
+ print(f" Application failed to start: {e}")
123
  raise