Spaces:
Sleeping
Sleeping
Kate Forsberg
commited on
Commit
·
d4e00f6
1
Parent(s):
67a06c2
Talks to other agent
Browse files- app.py +136 -31
- feclx5wyaca.json +0 -1
- mjcgsn7aj7.json +0 -1
- poetry.lock +0 -0
- pyproject.toml +4 -3
app.py
CHANGED
@@ -1,34 +1,37 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from uuid import uuid4 as uuid
|
3 |
from huggingface_hub import HfApi
|
4 |
from typing import Any
|
5 |
-
from
|
6 |
-
from griptape.
|
7 |
-
from griptape.
|
|
|
8 |
from griptape.memory.structure import ConversationMemory
|
9 |
-
from griptape.tools import
|
10 |
-
from griptape.rules import Rule
|
|
|
|
|
11 |
import time
|
|
|
12 |
|
13 |
|
14 |
-
#
|
15 |
-
|
16 |
-
# Can add a .env that has session ID and use Redis for conversation memory
|
17 |
|
18 |
-
repo_id = "kateforsberg/gradio-test"
|
19 |
-
api = HfApi()
|
20 |
-
#api.add_space_secret(repo_id=repo_id, key='OPENAI_API_KEY', value='sk-1234')
|
21 |
-
# Could this create a variable that's unique to each session?
|
22 |
-
#api.add_space_variable(repo_id=repo_id, key='SESSION_ID', value=str(uuid()))
|
23 |
|
24 |
-
|
25 |
|
|
|
|
|
26 |
def user(user_message, history):
|
27 |
history.append([user_message, None])
|
28 |
return ("", history)
|
29 |
|
|
|
|
|
30 |
def bot(history):
|
31 |
-
response =
|
32 |
history[-1][1] = ""
|
33 |
for character in response:
|
34 |
history[-1][1] += character
|
@@ -37,20 +40,117 @@ def bot(history):
|
|
37 |
|
38 |
yield history
|
39 |
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
return Agent(
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
value = "You are an intelligent agent tasked with answering questions."
|
50 |
-
),
|
51 |
-
Rule(
|
52 |
-
value = "All of your responses are less than 5 sentences."
|
53 |
-
) ]
|
54 |
)
|
55 |
|
56 |
|
@@ -58,14 +158,19 @@ def build_agent(session_id:str):
|
|
58 |
def send_message(message:str, history, request:gr.Request) -> Any:
|
59 |
if request:
|
60 |
session_hash = request.session_hash
|
61 |
-
agent = build_agent(session_hash)
|
62 |
response = agent.run(message)
|
63 |
return response.output.value
|
64 |
|
65 |
demo = gr.ChatInterface(
|
66 |
fn=send_message,
|
67 |
)
|
68 |
-
demo.launch(share=True)
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
71 |
|
|
|
1 |
+
import glob
|
2 |
import gradio as gr
|
3 |
from uuid import uuid4 as uuid
|
4 |
from huggingface_hub import HfApi
|
5 |
from typing import Any
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
from griptape.structures import Agent
|
8 |
+
from griptape.tasks import PromptTask, StructureRunTask, ToolkitTask
|
9 |
+
from griptape.drivers import LocalConversationMemoryDriver, GriptapeCloudStructureRunDriver, GriptapeCloudEventListenerDriver, LocalFileManagerDriver, LocalStructureRunDriver
|
10 |
from griptape.memory.structure import ConversationMemory
|
11 |
+
from griptape.tools import StructureRunClient, TaskMemoryClient, FileManager
|
12 |
+
from griptape.rules import Rule, Ruleset
|
13 |
+
from griptape.config import AnthropicStructureConfig
|
14 |
+
from griptape.events import EventListener, FinishStructureRunEvent
|
15 |
import time
|
16 |
+
import os
|
17 |
|
18 |
|
19 |
+
#Load environment variables
|
20 |
+
load_dotenv()
|
|
|
21 |
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
#Create an agent that will create a prompt that can be used as input for the query agent from the Griptape Cloud.
|
24 |
|
25 |
+
#Function that logs user history - adds to history parameter of Gradio
|
26 |
+
#TODO: Figure out the exact use of this function
|
27 |
def user(user_message, history):
|
28 |
history.append([user_message, None])
|
29 |
return ("", history)
|
30 |
|
31 |
+
#Function that logs bot history - adds to the history parameter of Gradio
|
32 |
+
#TODO: Figure out the exact use of this function
|
33 |
def bot(history):
|
34 |
+
response = send_message(history[-1][0])
|
35 |
history[-1][1] = ""
|
36 |
for character in response:
|
37 |
history[-1][1] += character
|
|
|
40 |
|
41 |
yield history
|
42 |
|
43 |
+
|
44 |
+
def create_prompt_task(session_id:str, message:str) -> PromptTask:
|
45 |
+
return PromptTask(
|
46 |
+
f"""
|
47 |
+
Re-structure the values from the user's questions: '{message}' and the input value from the conversation memory '{session_id}.json' to fit the following format. Leave out attributes that aren't important to the user:
|
48 |
+
years experience: <x>
|
49 |
+
location: <x>
|
50 |
+
role: <x>
|
51 |
+
skills: <x>
|
52 |
+
expected salary: <x>
|
53 |
+
availability: <x>
|
54 |
+
past companies: <x>
|
55 |
+
past projects: <x>
|
56 |
+
show reel details: <x>
|
57 |
+
""",
|
58 |
+
)
|
59 |
+
|
60 |
+
def build_talk_agent(session_id:str,message:str) -> Agent:
|
61 |
+
ruleset = Ruleset(
|
62 |
+
name="Local Gradio Agent",
|
63 |
+
rules=[
|
64 |
+
Rule(
|
65 |
+
value = "You are responsible for structuring a user's questions into a specific format for a query."
|
66 |
+
),
|
67 |
+
Rule(
|
68 |
+
value = "You ask the user follow-up questions to fill in missing information for the format you are trying to fit."
|
69 |
+
),
|
70 |
+
Rule(
|
71 |
+
value="If the user has no preference for a specific attribute, then you can remove it from the query."
|
72 |
+
),
|
73 |
+
Rule(
|
74 |
+
value="Only return the current query structure and any questions to fill in missing information."
|
75 |
+
),
|
76 |
+
]
|
77 |
+
)
|
78 |
+
file_manager_tool = FileManager(
|
79 |
+
name="FileManager",
|
80 |
+
file_manager_driver=LocalFileManagerDriver(),
|
81 |
+
off_prompt=False
|
82 |
+
)
|
83 |
+
|
84 |
+
return Agent(
|
85 |
+
config= AnthropicStructureConfig(),
|
86 |
+
conversation_memory=ConversationMemory(
|
87 |
+
driver=LocalConversationMemoryDriver(
|
88 |
+
file_path=f'{session_id}.json'
|
89 |
+
)),
|
90 |
+
tools=[file_manager_tool],
|
91 |
+
tasks=[create_prompt_task(session_id,message)],
|
92 |
+
rulesets=[ruleset],
|
93 |
+
)
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
# Creates an agent for each run
|
98 |
+
# The agent uses local memory, which it differentiates between by session_hash.
|
99 |
+
def build_agent(session_id:str,message:str) -> Agent:
|
100 |
+
|
101 |
+
ruleset = Ruleset(
|
102 |
+
name="Local Gradio Agent",
|
103 |
+
rules=[
|
104 |
+
Rule(
|
105 |
+
value = "You are responsible for structuring a user's questions into a specific format for a query and then querying."
|
106 |
+
),
|
107 |
+
Rule(
|
108 |
+
value="Only return the result of the query, do not provide additional commentary."
|
109 |
+
),
|
110 |
+
Rule(
|
111 |
+
value="Only perform one task at a time."
|
112 |
+
),
|
113 |
+
Rule(
|
114 |
+
value="Do not perform the query unless the user has said 'Done' with formulating."
|
115 |
+
),
|
116 |
+
Rule(
|
117 |
+
value="Only perform the query with the proper query structure."
|
118 |
+
),
|
119 |
+
Rule(
|
120 |
+
value="If you reformulate the query, then you must ask the user if they are 'Done' again."
|
121 |
+
)
|
122 |
+
]
|
123 |
+
)
|
124 |
+
|
125 |
+
|
126 |
+
query_client = StructureRunClient(
|
127 |
+
name="QueryResumeSearcher",
|
128 |
+
description="Use it to search for a candidate with the query.",
|
129 |
+
driver = GriptapeCloudStructureRunDriver(
|
130 |
+
base_url=os.getenv("BASE_URL"),
|
131 |
+
structure_id=os.getenv("GT_STRUCTURE_ID"),
|
132 |
+
api_key=os.getenv("GT_CLOUD_API_KEY"),
|
133 |
+
structure_run_wait_time_interval=5,
|
134 |
+
structure_run_max_wait_time_attempts=30
|
135 |
+
),
|
136 |
+
)
|
137 |
+
|
138 |
+
talk_client = StructureRunClient(
|
139 |
+
name="FormulateQueryFromUser",
|
140 |
+
description="Used to formulate a query from the user's input.",
|
141 |
+
driver=LocalStructureRunDriver(
|
142 |
+
structure_factory_fn=lambda: build_talk_agent(session_id,message),
|
143 |
+
)
|
144 |
+
)
|
145 |
+
|
146 |
return Agent(
|
147 |
+
config= AnthropicStructureConfig(),
|
148 |
+
conversation_memory=ConversationMemory(
|
149 |
+
driver=LocalConversationMemoryDriver(
|
150 |
+
file_path=f'{session_id}.json'
|
151 |
+
)),
|
152 |
+
tools=[talk_client,query_client],
|
153 |
+
rulesets=[ruleset],
|
|
|
|
|
|
|
|
|
|
|
154 |
)
|
155 |
|
156 |
|
|
|
158 |
def send_message(message:str, history, request:gr.Request) -> Any:
|
159 |
if request:
|
160 |
session_hash = request.session_hash
|
161 |
+
agent = build_agent(session_hash,message)
|
162 |
response = agent.run(message)
|
163 |
return response.output.value
|
164 |
|
165 |
demo = gr.ChatInterface(
|
166 |
fn=send_message,
|
167 |
)
|
168 |
+
demo.launch(share=True, auth=("griptape","griptaper"))
|
169 |
|
170 |
+
json_files = glob.glob("*.json")
|
171 |
+
for f in json_files:
|
172 |
+
try:
|
173 |
+
os.remove(f)
|
174 |
+
except OSError as e:
|
175 |
+
continue
|
176 |
|
feclx5wyaca.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"type": "ConversationMemory", "runs": [{"type": "Run", "id": "68795eba02cc4f2dafb58e2ae39b50a6", "input": {"type": "TextArtifact", "id": "ea12e13253e442eeb788a68e5ab2180c", "reference": null, "meta": {}, "name": "ea12e13253e442eeb788a68e5ab2180c", "value": "what are you"}, "output": {"type": "TextArtifact", "id": "e97d2f0bfe21482e83f8f8c73c999242", "reference": null, "meta": {}, "name": "e97d2f0bfe21482e83f8f8c73c999242", "value": "I am an intelligent agent designed to assist with answering questions and performing tasks such as calculations. How can I help you today?"}}, {"type": "Run", "id": "631c4709041841d5bd6e6f84793f8833", "input": {"type": "TextArtifact", "id": "7ddd118d20604c4d85050ca42e6a6d67", "reference": null, "meta": {}, "name": "7ddd118d20604c4d85050ca42e6a6d67", "value": "what was the last thing i said"}, "output": {"type": "TextArtifact", "id": "b73f2fc05c1c4df9a48a3ce6b360f2ab", "reference": null, "meta": {}, "name": "b73f2fc05c1c4df9a48a3ce6b360f2ab", "value": "You asked, \"what are you?\""}}, {"type": "Run", "id": "f317d2f9f4f742cfb3ecf1458d9107cb", "input": {"type": "TextArtifact", "id": "a8868bf90080409b9cb74b590a5f033e", "reference": null, "meta": {}, "name": "a8868bf90080409b9cb74b590a5f033e", "value": "ok lit"}, "output": {"type": "TextArtifact", "id": "34960ba434fb4a488d5d99ea1e8098cc", "reference": null, "meta": {}, "name": "34960ba434fb4a488d5d99ea1e8098cc", "value": "Great! How can I assist you further?"}}], "max_runs": null}
|
|
|
|
mjcgsn7aj7.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"type": "ConversationMemory", "runs": [{"type": "Run", "id": "b20024cc07d44cfca53911240a680f37", "input": {"type": "TextArtifact", "id": "9c9f57c1527542a69627a5be494dcb40", "reference": null, "meta": {}, "name": "9c9f57c1527542a69627a5be494dcb40", "value": "hi there"}, "output": {"type": "TextArtifact", "id": "ca15b8187e684e8597e27ce0eb9d4d60", "reference": null, "meta": {}, "name": "ca15b8187e684e8597e27ce0eb9d4d60", "value": "Hello! How can I assist you today?"}}, {"type": "Run", "id": "30adf7f962b24a619a3009684b069de2", "input": {"type": "TextArtifact", "id": "10fab83b16a54cbb9d5456d8bb32b3f3", "reference": null, "meta": {}, "name": "10fab83b16a54cbb9d5456d8bb32b3f3", "value": "what was the last thing i said"}, "output": {"type": "TextArtifact", "id": "349771f62bff442184c8825b42dc6d9c", "reference": null, "meta": {}, "name": "349771f62bff442184c8825b42dc6d9c", "value": "You said, \"hi there.\" How can I help you further?"}}, {"type": "Run", "id": "9da15db3d45547e8a5bed2ed2a904afc", "input": {"type": "TextArtifact", "id": "9bbe11989f7a40f7a9146b10c42fd857", "reference": null, "meta": {}, "name": "9bbe11989f7a40f7a9146b10c42fd857", "value": "what was the last thing i said"}, "output": {"type": "TextArtifact", "id": "cafc351aa90e4fac8295c1109154dcec", "reference": null, "meta": {}, "name": "cafc351aa90e4fac8295c1109154dcec", "value": "You said, \"what was the last thing i said.\" How can I assist you further?"}}], "max_runs": null}
|
|
|
|
poetry.lock
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[tool.poetry]
|
2 |
name = "griptape-chat"
|
3 |
-
version = "0.
|
4 |
description = "Chat demo using the Griptape Framework and Gradio."
|
5 |
authors = ["Griptape <[email protected]>"]
|
6 |
license = "Apache 2.0"
|
@@ -12,14 +12,15 @@ package-mode = false
|
|
12 |
python = "^3.11"
|
13 |
python-dotenv = "^1.0.0"
|
14 |
gradio = "^4.37.1"
|
15 |
-
griptape = "^0.
|
16 |
attrs = "^23.1.0"
|
17 |
-
pypdf = "^4.2.0"
|
18 |
pymupdf = "^1.24.7"
|
|
|
19 |
azure-identity = "^1.17.1"
|
20 |
|
21 |
|
22 |
|
|
|
23 |
[build-system]
|
24 |
requires = ["poetry-core"]
|
25 |
build-backend = "poetry.core.masonry.api"
|
|
|
1 |
[tool.poetry]
|
2 |
name = "griptape-chat"
|
3 |
+
version = "0.1.0"
|
4 |
description = "Chat demo using the Griptape Framework and Gradio."
|
5 |
authors = ["Griptape <[email protected]>"]
|
6 |
license = "Apache 2.0"
|
|
|
12 |
python = "^3.11"
|
13 |
python-dotenv = "^1.0.0"
|
14 |
gradio = "^4.37.1"
|
15 |
+
griptape = {extras = ["drivers-embedding-voyageai","drivers-prompt-anthropic"], version = "^0.29.1"}
|
16 |
attrs = "^23.1.0"
|
|
|
17 |
pymupdf = "^1.24.7"
|
18 |
+
argparse = "^1.4.0"
|
19 |
azure-identity = "^1.17.1"
|
20 |
|
21 |
|
22 |
|
23 |
+
|
24 |
[build-system]
|
25 |
requires = ["poetry-core"]
|
26 |
build-backend = "poetry.core.masonry.api"
|