Spaces:
Sleeping
Sleeping
Commit
·
8d52d3e
1
Parent(s):
ca26f98
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
|
|
2 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
3 |
from langchain.memory import ConversationBufferMemory
|
4 |
from langchain.chains import ConversationChain
|
|
|
|
|
|
|
5 |
import os
|
6 |
|
7 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
@@ -11,14 +14,46 @@ llm = ChatGoogleGenerativeAI(
|
|
11 |
model="gemini-pro",
|
12 |
temperature=0.7
|
13 |
)
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def chat(prompt):
|
20 |
-
res =
|
21 |
-
return res,
|
22 |
|
23 |
iface = gr.Interface(
|
24 |
fn=chat,
|
|
|
2 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
3 |
from langchain.memory import ConversationBufferMemory
|
4 |
from langchain.chains import ConversationChain
|
5 |
+
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
|
6 |
+
from langchain.chains import LLMChain
|
7 |
+
from langchain_community.utilities import GoogleSearchAPIWrapper
|
8 |
import os
|
9 |
|
10 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
|
|
14 |
model="gemini-pro",
|
15 |
temperature=0.7
|
16 |
)
|
17 |
+
|
18 |
+
search = GoogleSearchAPIWrapper()
|
19 |
+
tools = [
|
20 |
+
Tool(
|
21 |
+
name="Search",
|
22 |
+
func=search.run,
|
23 |
+
description="useful for when you need to answer questions about current events",
|
24 |
+
)
|
25 |
+
]
|
26 |
+
|
27 |
+
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
|
28 |
+
suffix = """Begin!"
|
29 |
+
|
30 |
+
{chat_history}
|
31 |
+
Question: {input}
|
32 |
+
{agent_scratchpad}"""
|
33 |
+
|
34 |
+
prompt = ZeroShotAgent.create_prompt(
|
35 |
+
tools,
|
36 |
+
prefix=prefix,
|
37 |
+
suffix=suffix,
|
38 |
+
input_variables=["input", "chat_history", "agent_scratchpad"],
|
39 |
)
|
40 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
41 |
+
|
42 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
43 |
+
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
|
44 |
+
agent_chain = AgentExecutor.from_agent_and_tools(
|
45 |
+
agent=agent, tools=tools, verbose=True, memory=memory
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
# conversation = ConversationChain(
|
50 |
+
# llm=llm,
|
51 |
+
# verbose=True,
|
52 |
+
# memory=ConversationBufferMemory()
|
53 |
+
# )
|
54 |
def chat(prompt):
|
55 |
+
res = agent_chain.run(input=prompt)
|
56 |
+
return res, agent_chain.memory.chat_memory.messages
|
57 |
|
58 |
iface = gr.Interface(
|
59 |
fn=chat,
|