Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
updates for HHEM
Browse files- agent.py +2 -2
- requirements.txt +1 -1
- st_app.py +9 -2
agent.py
CHANGED
@@ -33,7 +33,7 @@ tickers = {
|
|
33 |
"STT": "State Street",
|
34 |
"BK": "Bank of New York Mellon",
|
35 |
}
|
36 |
-
years = list(range(2015,
|
37 |
initial_prompt = "How can I help you today?"
|
38 |
|
39 |
|
@@ -206,6 +206,6 @@ def get_agent_config() -> OmegaConf:
|
|
206 |
'examples': os.environ.get('QUERY_EXAMPLES', None),
|
207 |
'demo_name': "finance-chat",
|
208 |
'demo_welcome': "Financial Assistant demo.",
|
209 |
-
'demo_description': f"This assistant can help you with any questions about the financials of several companies:\n\n **{companies}**.\n"
|
210 |
})
|
211 |
return cfg
|
|
|
33 |
"STT": "State Street",
|
34 |
"BK": "Bank of New York Mellon",
|
35 |
}
|
36 |
+
years = list(range(2015, 2026))
|
37 |
initial_prompt = "How can I help you today?"
|
38 |
|
39 |
|
|
|
206 |
'examples': os.environ.get('QUERY_EXAMPLES', None),
|
207 |
'demo_name': "finance-chat",
|
208 |
'demo_welcome': "Financial Assistant demo.",
|
209 |
+
'demo_description': f"This assistant can help you with any questions about the financials of several companies:\n\n **{companies}**.\n",
|
210 |
})
|
211 |
return cfg
|
requirements.txt
CHANGED
@@ -5,5 +5,5 @@ streamlit_feedback==0.1.3
|
|
5 |
uuid==1.30
|
6 |
langdetect==1.0.9
|
7 |
langcodes==3.4.0
|
8 |
-
vectara-agentic==0.2.
|
9 |
torch==2.6.0
|
|
|
5 |
uuid==1.30
|
6 |
langdetect==1.0.9
|
7 |
langcodes==3.4.0
|
8 |
+
vectara-agentic==0.2.21
|
9 |
torch==2.6.0
|
st_app.py
CHANGED
@@ -147,6 +147,11 @@ async def launch_bot():
|
|
147 |
st.session_state.status = st.status('Processing...', expanded=False)
|
148 |
response = await st.session_state.agent.achat(st.session_state.prompt)
|
149 |
res = escape_dollars_outside_latex(response.response)
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
#from vectara_agentic.sub_query_workflow import SequentialSubQuestionsWorkflow
|
152 |
#response = await st.session_state.agent.run(inputs=SequentialSubQuestionsWorkflow.InputsModel(query=st.session_state.prompt))
|
@@ -158,8 +163,10 @@ async def launch_bot():
|
|
158 |
#res = await st.session_state.agent.astream_chat(st.session_state.prompt)
|
159 |
#response = ''.join([token async for token in res.async_response_gen()])
|
160 |
#res = escape_dollars_outside_latex(response)
|
161 |
-
|
162 |
-
|
|
|
|
|
163 |
st.session_state.messages.append(message)
|
164 |
|
165 |
send_amplitude_data(
|
|
|
147 |
st.session_state.status = st.status('Processing...', expanded=False)
|
148 |
response = await st.session_state.agent.achat(st.session_state.prompt)
|
149 |
res = escape_dollars_outside_latex(response.response)
|
150 |
+
fcs = None
|
151 |
+
try:
|
152 |
+
fcs = response.metadata.get('fcs', None)
|
153 |
+
except Exception as e:
|
154 |
+
pass
|
155 |
|
156 |
#from vectara_agentic.sub_query_workflow import SequentialSubQuestionsWorkflow
|
157 |
#response = await st.session_state.agent.run(inputs=SequentialSubQuestionsWorkflow.InputsModel(query=st.session_state.prompt))
|
|
|
163 |
#res = await st.session_state.agent.astream_chat(st.session_state.prompt)
|
164 |
#response = ''.join([token async for token in res.async_response_gen()])
|
165 |
#res = escape_dollars_outside_latex(response)
|
166 |
+
if fcs is not None:
|
167 |
+
message = {"role": "assistant", "content": res + f"\n\n**Factual Consistency Score**: {fcs}", "avatar": '🤖'}
|
168 |
+
else:
|
169 |
+
message = {"role": "assistant", "content": res, "avatar": '🤖'}
|
170 |
st.session_state.messages.append(message)
|
171 |
|
172 |
send_amplitude_data(
|