simran0608 commited on
Commit
c48c29b
·
verified ·
1 Parent(s): 81693ce

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +40 -4
model.py CHANGED
@@ -1,39 +1,75 @@
 
 
1
  import os
2
  import logging
3
  from config import MODEL_NAME
4
  from dotenv import load_dotenv
5
  from langchain_groq import ChatGroq
6
  from langchain.agents import AgentExecutor
 
7
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
8
- from langchain.agents import AgentExecutor
9
  from langchain.agents import create_tool_calling_agent
10
  from langchain_core.utils.function_calling import convert_to_openai_function
11
  from utils import book_slot, check_slots, reschedule_event, delete_event
12
 
13
-
14
  load_dotenv()
15
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
16
 
17
  API_KEY = os.environ["API_KEY"]
 
18
  def create_agent(PROMPT):
 
 
 
 
 
 
 
 
19
  prompt_template = ChatPromptTemplate.from_messages([
20
  ("system", PROMPT),
 
21
  ("human", "{input}"),
22
  MessagesPlaceholder(variable_name="agent_scratchpad"),
23
  ])
24
 
 
25
  tools = [book_slot, delete_event, check_slots, reschedule_event]
26
  functions = [convert_to_openai_function(f) for f in tools]
27
 
 
28
  llm = ChatGroq(
29
  model=MODEL_NAME,
30
  temperature=0.7,
31
  max_tokens=None,
32
- timeout=None,
33
  max_retries=2,
34
  api_key=API_KEY
35
  ).bind_functions(functions=functions)
36
 
 
37
  agent = create_tool_calling_agent(llm, tools, prompt_template)
38
- agent_executor = AgentExecutor(agent=agent, tools=tools)
 
 
 
 
 
 
 
 
39
  return agent_executor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
  import os
4
  import logging
5
  from config import MODEL_NAME
6
  from dotenv import load_dotenv
7
  from langchain_groq import ChatGroq
8
  from langchain.agents import AgentExecutor
9
+ from langchain.memory import ConversationBufferWindowMemory
10
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
 
11
  from langchain.agents import create_tool_calling_agent
12
  from langchain_core.utils.function_calling import convert_to_openai_function
13
  from utils import book_slot, check_slots, reschedule_event, delete_event
14
 
 
15
  load_dotenv()
16
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
17
 
18
  API_KEY = os.environ["API_KEY"]
19
+
20
  def create_agent(PROMPT):
21
+ # First create the memory object
22
+ memory = ConversationBufferWindowMemory(
23
+ memory_key="chat_history",
24
+ return_messages=True,
25
+ k=5
26
+ )
27
+
28
+ # Create the prompt template
29
  prompt_template = ChatPromptTemplate.from_messages([
30
  ("system", PROMPT),
31
+ MessagesPlaceholder(variable_name="chat_history"),
32
  ("human", "{input}"),
33
  MessagesPlaceholder(variable_name="agent_scratchpad"),
34
  ])
35
 
36
+ # Define tools and convert to functions
37
  tools = [book_slot, delete_event, check_slots, reschedule_event]
38
  functions = [convert_to_openai_function(f) for f in tools]
39
 
40
+ # Create the LLM instance separately
41
  llm = ChatGroq(
42
  model=MODEL_NAME,
43
  temperature=0.7,
44
  max_tokens=None,
45
+ timeout=60,
46
  max_retries=2,
47
  api_key=API_KEY
48
  ).bind_functions(functions=functions)
49
 
50
+ # Create the agent
51
  agent = create_tool_calling_agent(llm, tools, prompt_template)
52
+
53
+ # Create the agent executor with memory
54
+ agent_executor = AgentExecutor(
55
+ agent=agent,
56
+ tools=tools,
57
+ memory=memory,
58
+ verbose=True
59
+ )
60
+
61
  return agent_executor
62
+
63
+ # Example usage
64
+ def process_query(query: str):
65
+ try:
66
+ agent = create_agent("Your system prompt here")
67
+ response = agent.invoke(
68
+ {
69
+ "input": query,
70
+ }
71
+ )
72
+ return response
73
+ except Exception as e:
74
+ logging.error(f"Error during query processing: {str(e)}")
75
+ raise