Spaces:
Sleeping
Sleeping
Commit
·
f52aa71
1
Parent(s):
90f1f18
Update: full intake tool set
Browse files- modules/nodes.py +1 -1
- modules/tools.py +77 -5
- requirements.txt +7 -44
modules/nodes.py
CHANGED
@@ -104,7 +104,7 @@ def chatbot_with_tools(state: DataState) -> DataState:
|
|
104 |
"plan": {
|
105 |
"goal": "",
|
106 |
"expectation": "",
|
107 |
-
"
|
108 |
}
|
109 |
}, "finished": False}
|
110 |
|
|
|
104 |
"plan": {
|
105 |
"goal": "",
|
106 |
"expectation": "",
|
107 |
+
"alternative_treatment": "",
|
108 |
}
|
109 |
}, "finished": False}
|
110 |
|
modules/tools.py
CHANGED
@@ -46,7 +46,7 @@ def pain(pain_location: str, pain_side: str, pain_intensity: int, pain_descripti
|
|
46 |
Returns:
|
47 |
The updated data with the patient's pain status added.
|
48 |
"""
|
49 |
-
|
50 |
@tool
|
51 |
def medical_hist(medical_condition: str, first_time: str, surgery_history: list, medication: str, allergy: str) -> str:
|
52 |
"""Collecting patient's medical history including:
|
@@ -94,6 +94,40 @@ def review_system(weight_change: str, fever: bool, chill: bool, night_sweats: bo
|
|
94 |
The updated data with the patient's review.
|
95 |
"""
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
@tool
|
98 |
def confirm_data() -> str:
|
99 |
"""Asks the patient if the data intake is correct.
|
@@ -102,17 +136,14 @@ def confirm_data() -> str:
|
|
102 |
The user's free-text response.
|
103 |
"""
|
104 |
|
105 |
-
|
106 |
@tool
|
107 |
def get_data() -> str:
|
108 |
"""Returns the users data so far. One item per line."""
|
109 |
|
110 |
-
|
111 |
@tool
|
112 |
def clear_data():
|
113 |
"""Removes all items from the user's order."""
|
114 |
|
115 |
-
|
116 |
@tool
|
117 |
def save_data() -> int:
|
118 |
"""Send the data into database.
|
@@ -121,7 +152,6 @@ def save_data() -> int:
|
|
121 |
The status of data saving, finished.
|
122 |
"""
|
123 |
|
124 |
-
|
125 |
def data_node(state: DataState) -> DataState:
|
126 |
"""The ordering node. This is where the dataintake is manipulated."""
|
127 |
tool_msg = state.get("messages", [])[-1]
|
@@ -166,6 +196,48 @@ def data_node(state: DataState) -> DataState:
|
|
166 |
data["medical_hist"]["allergy"] = tool_call["args"]["allergy"]
|
167 |
response = "\n".join(data)
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
elif tool_call["name"] == "confirm_data":
|
170 |
|
171 |
# We could entrust the LLM to do order confirmation, but it is a good practice to
|
|
|
46 |
Returns:
|
47 |
The updated data with the patient's pain status added.
|
48 |
"""
|
49 |
+
|
50 |
@tool
|
51 |
def medical_hist(medical_condition: str, first_time: str, surgery_history: list, medication: str, allergy: str) -> str:
|
52 |
"""Collecting patient's medical history including:
|
|
|
94 |
The updated data with the patient's review.
|
95 |
"""
|
96 |
|
97 |
+
@tool
|
98 |
+
def pain_manage(pain_medication: str, specialist: bool, other_therapy: str, effectiveness: bool) -> str:
|
99 |
+
"""Collecting patient's pain management including:
|
100 |
+
- Current pain medications
|
101 |
+
- Specialist consultations
|
102 |
+
- Alternative therapies
|
103 |
+
- Treatment effectiveness
|
104 |
+
|
105 |
+
Returns:
|
106 |
+
The updated data with the patient's pain management.
|
107 |
+
"""
|
108 |
+
|
109 |
+
@tool
|
110 |
+
def functional(life_quality: str, limit_activity: str, mood: str) -> str:
|
111 |
+
"""Collecting patient's functional assement information including:
|
112 |
+
- Impact on quality of life
|
113 |
+
- Activity limitations
|
114 |
+
- Mood and emotional state
|
115 |
+
|
116 |
+
Returns:
|
117 |
+
The updated data with the patient's functional assessment information.
|
118 |
+
"""
|
119 |
+
|
120 |
+
@tool
|
121 |
+
def plan(goal: str, expectation: str, alternative_treatment: str) -> str:
|
122 |
+
"""Collecting patient's future treatment plan information including:
|
123 |
+
- Treatment goals
|
124 |
+
- Patient expectations
|
125 |
+
- Alternative treatment considerations
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
The updated data with the patient's future treatment plan information.
|
129 |
+
"""
|
130 |
+
|
131 |
@tool
|
132 |
def confirm_data() -> str:
|
133 |
"""Asks the patient if the data intake is correct.
|
|
|
136 |
The user's free-text response.
|
137 |
"""
|
138 |
|
|
|
139 |
@tool
|
140 |
def get_data() -> str:
|
141 |
"""Returns the users data so far. One item per line."""
|
142 |
|
|
|
143 |
@tool
|
144 |
def clear_data():
|
145 |
"""Removes all items from the user's order."""
|
146 |
|
|
|
147 |
@tool
|
148 |
def save_data() -> int:
|
149 |
"""Send the data into database.
|
|
|
152 |
The status of data saving, finished.
|
153 |
"""
|
154 |
|
|
|
155 |
def data_node(state: DataState) -> DataState:
|
156 |
"""The ordering node. This is where the dataintake is manipulated."""
|
157 |
tool_msg = state.get("messages", [])[-1]
|
|
|
196 |
data["medical_hist"]["allergy"] = tool_call["args"]["allergy"]
|
197 |
response = "\n".join(data)
|
198 |
|
199 |
+
elif tool_call["name"] == "family_hist":
|
200 |
+
data["family_hist"]["family_history"] = tool_call["args"]["family_history"]
|
201 |
+
response = "\n".join(data)
|
202 |
+
|
203 |
+
elif tool_call["name"] == "social_hist":
|
204 |
+
data["social_hist"]["occupation"] = tool_call["args"]["occupation"]
|
205 |
+
data["social_hist"]["smoke"] = tool_call["args"]["smoke"]
|
206 |
+
data["social_hist"]["alcohol"] = tool_call["args"]["alcohol"]
|
207 |
+
data["social_hist"]["drug"] = tool_call["args"]["drug"]
|
208 |
+
data["social_hist"]["support_system"] = tool_call["args"]["support_system"]
|
209 |
+
data["social_hist"]["living_condition"] = tool_call["args"]["living_condition"]
|
210 |
+
response = "\n".join(data)
|
211 |
+
|
212 |
+
elif tool_call["name"] == "review_system":
|
213 |
+
data["review_system"]["weight_change"] = tool_call["args"]["weight_change"]
|
214 |
+
data["review_system"]["fever"] = tool_call["args"]["fever"]
|
215 |
+
data["review_system"]["chill"] = tool_call["args"]["chill"]
|
216 |
+
data["review_system"]["night_sweats"] = tool_call["args"]["night_sweats"]
|
217 |
+
data["review_system"]["sleep"] = tool_call["args"]["sleep"]
|
218 |
+
data["review_system"]["gastrointestinal"] = tool_call["args"]["gastrointestinal"]
|
219 |
+
data["review_system"]["urinary"] = tool_call["args"]["urinary"]
|
220 |
+
response = "\n".join(data)
|
221 |
+
|
222 |
+
elif tool_call["name"] == "pain_manage":
|
223 |
+
data["pain_manage"]["pain_medication"] = tool_call["args"]["pain_medication"]
|
224 |
+
data["pain_manage"]["specialist"] = tool_call["args"]["specialist"]
|
225 |
+
data["pain_manage"]["other_therapy"] = tool_call["args"]["other_therapy"]
|
226 |
+
data["pain_manage"]["effectiveness"] = tool_call["args"]["effectiveness"]
|
227 |
+
response = "\n".join(data)
|
228 |
+
|
229 |
+
elif tool_call["name"] == "functional":
|
230 |
+
data["functional"]["life_quality"] = tool_call["args"]["life_quality"]
|
231 |
+
data["functional"]["limit_activity"] = tool_call["args"]["limit_activity"]
|
232 |
+
data["functional"]["mood"] = tool_call["args"]["mood"]
|
233 |
+
response = "\n".join(data)
|
234 |
+
|
235 |
+
elif tool_call["name"] == "plan":
|
236 |
+
data["plan"]["goal"] = tool_call["args"]["goal"]
|
237 |
+
data["plan"]["expectation"] = tool_call["args"]["expectation"]
|
238 |
+
data["plan"]["alternative_treatment"] = tool_call["args"]["alternative_treatment"]
|
239 |
+
response = "\n".join(data)
|
240 |
+
|
241 |
elif tool_call["name"] == "confirm_data":
|
242 |
|
243 |
# We could entrust the LLM to do order confirmation, but it is a good practice to
|
requirements.txt
CHANGED
@@ -3,22 +3,12 @@ aiohttp==3.11.11
|
|
3 |
aiosignal==1.3.2
|
4 |
annotated-types==0.7.0
|
5 |
anyio==4.8.0
|
6 |
-
asttokens @ file:///opt/conda/conda-bld/asttokens_1646925590279/work
|
7 |
attrs==25.1.0
|
8 |
-
Bottleneck @ file:///croot/bottleneck_1731058641041/work
|
9 |
-
Brotli @ file:///croot/brotli-split_1736182456865/work
|
10 |
cachetools==5.5.1
|
11 |
certifi==2024.12.14
|
12 |
charset-normalizer==3.4.1
|
13 |
-
comm @ file:///croot/comm_1709322850197/work
|
14 |
-
contourpy @ file:///croot/contourpy_1732540045555/work
|
15 |
-
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
|
16 |
-
debugpy @ file:///croot/debugpy_1736267418885/work
|
17 |
-
decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
|
18 |
distro==1.9.0
|
19 |
-
executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
|
20 |
filetype==1.2.0
|
21 |
-
fonttools @ file:///croot/fonttools_1737039080035/work
|
22 |
frozenlist==1.5.0
|
23 |
google-ai-generativelanguage==0.6.15
|
24 |
google-api-core==2.24.1
|
@@ -35,15 +25,13 @@ httpcore==1.0.7
|
|
35 |
httplib2==0.22.0
|
36 |
httpx==0.28.1
|
37 |
idna==3.10
|
38 |
-
ipykernel
|
39 |
-
ipython
|
40 |
-
jedi @ file:///croot/jedi_1733987392413/work
|
41 |
jiter==0.8.2
|
42 |
jsonpatch==1.33
|
43 |
jsonpointer==3.0.0
|
44 |
-
jupyter_client
|
45 |
-
jupyter_core
|
46 |
-
kiwisolver @ file:///croot/kiwisolver_1737039087198/work
|
47 |
langchain==0.3.17
|
48 |
langchain-core==0.3.33
|
49 |
langchain-google-genai==2.0.9
|
@@ -55,63 +43,38 @@ langgraph-checkpoint==2.0.10
|
|
55 |
langgraph-sdk==0.1.51
|
56 |
langsmith==0.3.3
|
57 |
matplotlib==3.10.0
|
58 |
-
matplotlib-inline
|
59 |
mkl-service==2.4.0
|
60 |
-
mkl_fft @ file:///io/mkl313/mkl_fft_1730824109137/work
|
61 |
-
mkl_random @ file:///io/mkl313/mkl_random_1730823916628/work
|
62 |
msgpack==1.1.0
|
63 |
multidict==6.1.0
|
64 |
-
|
65 |
-
numexpr @ file:///croot/numexpr_1730215937391/work
|
66 |
-
numpy @ file:///croot/numpy_and_numpy_base_1708638617955/work/dist/numpy-1.26.4-cp312-cp312-linux_x86_64.whl#sha256=1d700f51d8b4fa684d858c9e3b56b1656bc5c82b6b79ff08d4e3b491c430059f
|
67 |
ollama==0.4.7
|
68 |
openai==1.60.2
|
69 |
orjson==3.10.15
|
70 |
-
|
71 |
-
pandas @ file:///croot/pandas_1732735089971/work/dist/pandas-2.2.3-cp312-cp312-linux_x86_64.whl#sha256=57b66702d418720ec8483f7c4ec7c08d41815316ad7ce09d5b7bbc34eefcfdfd
|
72 |
-
parso @ file:///croot/parso_1733963305961/work
|
73 |
-
pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
|
74 |
-
pillow @ file:///croot/pillow_1738010226202/work
|
75 |
-
platformdirs @ file:///work/perseverance-python-buildout/croot/platformdirs_1701732573265/work
|
76 |
-
prompt-toolkit @ file:///croot/prompt-toolkit_1704404351921/work
|
77 |
propcache==0.2.1
|
78 |
proto-plus==1.26.0
|
79 |
protobuf==5.29.3
|
80 |
-
psutil @ file:///croot/psutil_1736367091698/work
|
81 |
-
ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
|
82 |
-
pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
|
83 |
pyasn1==0.6.1
|
84 |
pyasn1_modules==0.4.1
|
85 |
pydantic==2.10.6
|
86 |
pydantic_core==2.27.2
|
87 |
-
Pygments @ file:///work/perseverance-python-buildout/croot/pygments_1698846270603/work
|
88 |
pyparsing==3.2.1
|
89 |
-
python-dateutil @ file:///croot/python-dateutil_1716495738603/work
|
90 |
python-dotenv==1.0.1
|
91 |
-
pytz @ file:///croot/pytz_1713974312559/work
|
92 |
PyYAML==6.0.2
|
93 |
-
pyzmq @ file:///croot/pyzmq_1734687138743/work
|
94 |
regex==2024.11.6
|
95 |
requests==2.32.3
|
96 |
requests-toolbelt==1.0.0
|
97 |
rsa==4.9
|
98 |
-
seaborn @ file:///croot/seaborn_1718302919398/work
|
99 |
setuptools==75.8.0
|
100 |
-
six @ file:///tmp/build/80754af9/six_1644875935023/work
|
101 |
sniffio==1.3.1
|
102 |
SQLAlchemy==2.0.37
|
103 |
-
stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
|
104 |
tenacity==9.0.0
|
105 |
tiktoken==0.8.0
|
106 |
-
tornado @ file:///croot/tornado_1733960490606/work
|
107 |
tqdm==4.67.1
|
108 |
-
traitlets @ file:///croot/traitlets_1718227057033/work
|
109 |
typing_extensions==4.12.2
|
110 |
-
tzdata @ file:///croot/python-tzdata_1690578112552/work
|
111 |
-
unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
|
112 |
uritemplate==4.1.1
|
113 |
urllib3==2.3.0
|
114 |
-
wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
|
115 |
wheel==0.45.1
|
116 |
yarl==1.18.3
|
117 |
zstandard==0.23.0
|
|
|
3 |
aiosignal==1.3.2
|
4 |
annotated-types==0.7.0
|
5 |
anyio==4.8.0
|
|
|
6 |
attrs==25.1.0
|
|
|
|
|
7 |
cachetools==5.5.1
|
8 |
certifi==2024.12.14
|
9 |
charset-normalizer==3.4.1
|
|
|
|
|
|
|
|
|
|
|
10 |
distro==1.9.0
|
|
|
11 |
filetype==1.2.0
|
|
|
12 |
frozenlist==1.5.0
|
13 |
google-ai-generativelanguage==0.6.15
|
14 |
google-api-core==2.24.1
|
|
|
25 |
httplib2==0.22.0
|
26 |
httpx==0.28.1
|
27 |
idna==3.10
|
28 |
+
ipykernel
|
29 |
+
ipython
|
|
|
30 |
jiter==0.8.2
|
31 |
jsonpatch==1.33
|
32 |
jsonpointer==3.0.0
|
33 |
+
jupyter_client
|
34 |
+
jupyter_core
|
|
|
35 |
langchain==0.3.17
|
36 |
langchain-core==0.3.33
|
37 |
langchain-google-genai==2.0.9
|
|
|
43 |
langgraph-sdk==0.1.51
|
44 |
langsmith==0.3.3
|
45 |
matplotlib==3.10.0
|
46 |
+
matplotlib-inline
|
47 |
mkl-service==2.4.0
|
|
|
|
|
48 |
msgpack==1.1.0
|
49 |
multidict==6.1.0
|
50 |
+
numpy==1.26.4
|
|
|
|
|
51 |
ollama==0.4.7
|
52 |
openai==1.60.2
|
53 |
orjson==3.10.15
|
54 |
+
pandas
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
propcache==0.2.1
|
56 |
proto-plus==1.26.0
|
57 |
protobuf==5.29.3
|
|
|
|
|
|
|
58 |
pyasn1==0.6.1
|
59 |
pyasn1_modules==0.4.1
|
60 |
pydantic==2.10.6
|
61 |
pydantic_core==2.27.2
|
|
|
62 |
pyparsing==3.2.1
|
|
|
63 |
python-dotenv==1.0.1
|
|
|
64 |
PyYAML==6.0.2
|
|
|
65 |
regex==2024.11.6
|
66 |
requests==2.32.3
|
67 |
requests-toolbelt==1.0.0
|
68 |
rsa==4.9
|
|
|
69 |
setuptools==75.8.0
|
|
|
70 |
sniffio==1.3.1
|
71 |
SQLAlchemy==2.0.37
|
|
|
72 |
tenacity==9.0.0
|
73 |
tiktoken==0.8.0
|
|
|
74 |
tqdm==4.67.1
|
|
|
75 |
typing_extensions==4.12.2
|
|
|
|
|
76 |
uritemplate==4.1.1
|
77 |
urllib3==2.3.0
|
|
|
78 |
wheel==0.45.1
|
79 |
yarl==1.18.3
|
80 |
zstandard==0.23.0
|