Spaces:
Sleeping
Sleeping
to correct Web search failed: ManagedAgent.__init__() got an unexpected keyword argument 'llm'
Browse files- pipeline.py +4 -2
pipeline.py
CHANGED
@@ -39,6 +39,8 @@ gemini_llm = ChatGoogleGenerativeAI(
|
|
39 |
# Additional parameters or safety_settings can be added here if needed
|
40 |
)
|
41 |
|
|
|
|
|
42 |
################################################################################
|
43 |
# Pydantic Models
|
44 |
################################################################################
|
@@ -314,7 +316,7 @@ def do_cached_web_search(query: str) -> str:
|
|
314 |
try:
|
315 |
print("DEBUG: Performing a new web search...")
|
316 |
search_tool = DuckDuckGoSearchTool()
|
317 |
-
search_agent = ManagedAgent(llm=
|
318 |
new_search_result = search_agent.run(f"Search for information about: {query}")
|
319 |
|
320 |
# 3) Store in cache for future reuse
|
@@ -443,4 +445,4 @@ brand_vectorstore = build_or_load_vectorstore(brand_csv, brand_store_dir)
|
|
443 |
wellness_rag_chain = build_rag_chain(wellness_vectorstore)
|
444 |
brand_rag_chain = build_rag_chain(brand_vectorstore)
|
445 |
|
446 |
-
print("Pipeline initialized successfully! Ready to handle
|
|
|
39 |
# Additional parameters or safety_settings can be added here if needed
|
40 |
)
|
41 |
|
42 |
+
web_gemini_llm = LiteLLMModel(model_id="gemini/gemini-pro", api_key=os.environ.get("GEMINI_API_KEY"))
|
43 |
+
|
44 |
################################################################################
|
45 |
# Pydantic Models
|
46 |
################################################################################
|
|
|
316 |
try:
|
317 |
print("DEBUG: Performing a new web search...")
|
318 |
search_tool = DuckDuckGoSearchTool()
|
319 |
+
search_agent = ManagedAgent(llm=web_gemini_llm, tools=[search_tool])
|
320 |
new_search_result = search_agent.run(f"Search for information about: {query}")
|
321 |
|
322 |
# 3) Store in cache for future reuse
|
|
|
445 |
wellness_rag_chain = build_rag_chain(wellness_vectorstore)
|
446 |
brand_rag_chain = build_rag_chain(brand_vectorstore)
|
447 |
|
448 |
+
print("Pipeline initialized successfully! Ready to handle querie with caching.")
|