Update app.py
Browse files
app.py
CHANGED
@@ -349,9 +349,8 @@ llm = ChatOpenAI(
|
|
349 |
max_tokens=None,
|
350 |
timeout=None,
|
351 |
max_retries=5
|
352 |
-
)
|
353 |
-
|
354 |
-
template="""
|
355 |
You are JobAI. You process user input and generate human-like responses to assist with job searching and application processes. Tasks include:
|
356 |
(provide links where ever required)
|
357 |
- Searching for job openings based on user criteria
|
@@ -360,11 +359,11 @@ llm = ChatOpenAI(
|
|
360 |
- Generating cover letters tailored to specific job openings
|
361 |
- Offering interview preparation assistance
|
362 |
Respond to user queries and engage in conversation to guide them through the job application process. Utilize context and understanding to provide accurate and helpful responses
|
363 |
-
|
364 |
|
365 |
User Query: {{query}}
|
366 |
"""
|
367 |
-
|
368 |
agent_tools = [ipc_tool, crpc_tool, doj_tool, faq_tool]
|
369 |
|
370 |
agent = initialize_agent(
|
@@ -380,7 +379,7 @@ agent = initialize_agent(
|
|
380 |
def encode_image_to_base64(image_path):
|
381 |
return pytesseract.image_to_string(Image.open(image_path))
|
382 |
def chatbot_response(history,query):
|
383 |
-
|
384 |
if query.get('files'):
|
385 |
# Encode image to base64
|
386 |
image_data=""
|
@@ -390,17 +389,17 @@ def chatbot_response(history,query):
|
|
390 |
# Create a multimodal message with both text and image data
|
391 |
message = HumanMessage(
|
392 |
content=[
|
393 |
-
{"type": "text", "text": query['text'] +" System :Image(s) was added to this prompt by this user. Text Extracted from this image (Some words may be misspelled ,Use your understanding ):"+image_data}, # Add text input
|
394 |
|
395 |
]
|
396 |
)
|
397 |
#k+=" System :Image(s) was added to this prompt by this user. Text Extracted from this image (Some words may be misspelled ,Use your understanding ):"+image_data
|
398 |
else:
|
399 |
# If no image, only pass the text
|
400 |
-
message = HumanMessage(content=[{"type": "text", "text": query}])
|
401 |
|
402 |
# Invoke the model with the multimodal message
|
403 |
-
result = agent.invoke(
|
404 |
response = result['output']
|
405 |
intermediate_steps = result.get('intermediate_steps', [])
|
406 |
|
|
|
349 |
max_tokens=None,
|
350 |
timeout=None,
|
351 |
max_retries=5
|
352 |
+
)
|
353 |
+
template="""
|
|
|
354 |
You are JobAI. You process user input and generate human-like responses to assist with job searching and application processes. Tasks include:
|
355 |
(provide links where ever required)
|
356 |
- Searching for job openings based on user criteria
|
|
|
359 |
- Generating cover letters tailored to specific job openings
|
360 |
- Offering interview preparation assistance
|
361 |
Respond to user queries and engage in conversation to guide them through the job application process. Utilize context and understanding to provide accurate and helpful responses
|
362 |
+
|
363 |
|
364 |
User Query: {{query}}
|
365 |
"""
|
366 |
+
|
367 |
agent_tools = [ipc_tool, crpc_tool, doj_tool, faq_tool]
|
368 |
|
369 |
agent = initialize_agent(
|
|
|
379 |
def encode_image_to_base64(image_path):
|
380 |
return pytesseract.image_to_string(Image.open(image_path))
|
381 |
def chatbot_response(history,query):
|
382 |
+
print(history)
|
383 |
if query.get('files'):
|
384 |
# Encode image to base64
|
385 |
image_data=""
|
|
|
389 |
# Create a multimodal message with both text and image data
|
390 |
message = HumanMessage(
|
391 |
content=[
|
392 |
+
{"type": "text", "text": template.format(query['text'] +" System :Image(s) was added to this prompt by this user. Text Extracted from this image (Some words may be misspelled ,Use your understanding ):"+image_data)}, # Add text input
|
393 |
|
394 |
]
|
395 |
)
|
396 |
#k+=" System :Image(s) was added to this prompt by this user. Text Extracted from this image (Some words may be misspelled ,Use your understanding ):"+image_data
|
397 |
else:
|
398 |
# If no image, only pass the text
|
399 |
+
message = HumanMessage(content=[{"type": "text", "text": template.format(query)}])
|
400 |
|
401 |
# Invoke the model with the multimodal message
|
402 |
+
result = agent.invoke([message],handle_parsing_errors=True)
|
403 |
response = result['output']
|
404 |
intermediate_steps = result.get('intermediate_steps', [])
|
405 |
|