Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitignore +1 -0
- .gradio/certificate.pem +31 -0
- company_info_search.py +206 -0
- multi_agent_swarm.py +150 -0
- readme.md +43 -0
- requirements.txt +7 -0
- wsgi.py +5 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
company_info_search.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from swarm import Swarm, Agent
|
3 |
+
from openai import OpenAI
|
4 |
+
from exa_py import Exa
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
import tweepy
|
8 |
+
import json
|
9 |
+
|
10 |
+
load_dotenv()
|
11 |
+
|
12 |
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
13 |
+
client = Swarm(client=openai_client)
|
14 |
+
exa_client = Exa(api_key=os.getenv("EXA_API_KEY"))
|
15 |
+
|
16 |
+
# Twitter API setup
|
17 |
+
bearer_token = os.getenv('bearer_token')
|
18 |
+
twitter_client = tweepy.Client(bearer_token=bearer_token)
|
19 |
+
|
20 |
+
def search_prop_firm_info(query, num_results=5):
|
21 |
+
results = exa_client.search_and_contents(
|
22 |
+
query,
|
23 |
+
type="keyword",
|
24 |
+
num_results=int(num_results),
|
25 |
+
text=True,
|
26 |
+
start_published_date="2023-01-01",
|
27 |
+
category="company",
|
28 |
+
include_domains=["propfirmmatch.com"],
|
29 |
+
summary=True
|
30 |
+
)
|
31 |
+
formatted_results = []
|
32 |
+
for result in results.results:
|
33 |
+
formatted_results.append(f"Title: {result.title}\nURL: {result.url}\nSummary: {result.summary}\n")
|
34 |
+
return "\n".join(formatted_results)
|
35 |
+
|
36 |
+
def search_trustpilot_reviews(company_name, num_results=3):
|
37 |
+
query = f"site:trustpilot.com {company_name} reviews"
|
38 |
+
results = exa_client.search_and_contents(
|
39 |
+
query,
|
40 |
+
type="keyword",
|
41 |
+
num_results=int(num_results),
|
42 |
+
text=True,
|
43 |
+
start_published_date="2023-01-01",
|
44 |
+
summary=True
|
45 |
+
)
|
46 |
+
formatted_results = []
|
47 |
+
for result in results.results:
|
48 |
+
formatted_results.append(f"Title: {result.title}\nURL: {result.url}\nSummary: {result.summary}\n")
|
49 |
+
return "\n".join(formatted_results)
|
50 |
+
|
51 |
+
def search_tweets(query, max_results=100):
|
52 |
+
try:
|
53 |
+
formatted_query = query.replace('"', '').strip()
|
54 |
+
tweets = twitter_client.search_recent_tweets(query=formatted_query, max_results=max_results)
|
55 |
+
return tweets.data if tweets.data else []
|
56 |
+
except tweepy.errors.BadRequest as e:
|
57 |
+
print(f"BadRequest error: {e}")
|
58 |
+
return []
|
59 |
+
except tweepy.errors.TweepyException as e:
|
60 |
+
print(f"Tweepy error: {e}")
|
61 |
+
return []
|
62 |
+
except Exception as e:
|
63 |
+
print(f"Unexpected error in search_tweets: {e}")
|
64 |
+
return []
|
65 |
+
|
66 |
+
twitter_sentiment_agent = Agent(
|
67 |
+
name="Twitter Sentiment Analyzer",
|
68 |
+
instructions="""You are an agent that analyzes Twitter sentiment for proprietary trading firms.
|
69 |
+
Given a list of tweets about a specific firm, analyze the overall sentiment and provide a summary.
|
70 |
+
Consider the following:
|
71 |
+
1. Overall sentiment (positive, negative, or neutral)
|
72 |
+
2. Common themes or topics mentioned
|
73 |
+
3. Any notable praise or complaints
|
74 |
+
4. Level of engagement (replies, retweets, likes)
|
75 |
+
Provide a concise summary of your findings.""",
|
76 |
+
functions=[search_tweets],
|
77 |
+
)
|
78 |
+
|
79 |
+
trustpilot_review_agent = Agent(
|
80 |
+
name="TrustPilot Review Analyzer",
|
81 |
+
instructions="""You are an agent that searches for and analyzes TrustPilot reviews for proprietary trading firms.
|
82 |
+
Use the search_trustpilot_reviews function to find reviews for a given company.
|
83 |
+
Your tasks are to:
|
84 |
+
1. Search for TrustPilot reviews using the provided company name.
|
85 |
+
2. Analyze the overall sentiment of the reviews (positive, negative, or mixed).
|
86 |
+
3. Identify common themes or recurring points in the reviews.
|
87 |
+
4. Note any standout positive or negative comments.
|
88 |
+
5. If available, mention the overall TrustPilot rating for the company.
|
89 |
+
6. Provide a concise summary of your findings, highlighting the most important aspects for a potential trader.
|
90 |
+
|
91 |
+
Your summary should be informative and balanced, presenting both positives and negatives if they exist.""",
|
92 |
+
functions=[search_trustpilot_reviews],
|
93 |
+
)
|
94 |
+
|
95 |
+
prop_firm_search_agent = Agent(
|
96 |
+
name="Prop Firm Search",
|
97 |
+
instructions="""You are an agent that searches for proprietary trading firm information on propfirmmatch.com. Use the search_prop_firm_info function to find information based on the user's query. The function takes two parameters: query (string) and num_results (integer, default 5).
|
98 |
+
|
99 |
+
If the user names a specific firm:
|
100 |
+
1. Use the firm's name as the search query.
|
101 |
+
2. Call the search_prop_firm_info function with the firm's name.
|
102 |
+
3. Provide a detailed summary of the information found about the firm from propfirmmatch.com.
|
103 |
+
|
104 |
+
Always include relevant details such as leverage, accepted countries, and any unique features of the firms.""",
|
105 |
+
functions=[search_prop_firm_info],
|
106 |
+
)
|
107 |
+
|
108 |
+
score_agent = Agent(
|
109 |
+
name="Score Generator",
|
110 |
+
instructions="""You are an agent that generates an overall score for proprietary trading firms based on the information provided.
|
111 |
+
Consider the following factors:
|
112 |
+
1. Information from propfirmmatch.com
|
113 |
+
2. TrustPilot reviews
|
114 |
+
3. Twitter sentiment
|
115 |
+
4. Any unique features or advantages of the firm
|
116 |
+
|
117 |
+
Generate a score out of 100, where:
|
118 |
+
90-100: Excellent
|
119 |
+
80-89: Very Good
|
120 |
+
70-79: Good
|
121 |
+
60-69: Fair
|
122 |
+
Below 60: Poor
|
123 |
+
|
124 |
+
Provide a brief explanation for the score.
|
125 |
+
|
126 |
+
Format your response as follows:
|
127 |
+
**Score:** (your score here)
|
128 |
+
**Twitter:** (Highlights of twitter analysis here)
|
129 |
+
**TrustPilot:** (Highlights / notable reviews here)
|
130 |
+
**PropFirmMatch:** (Summary of firm from search_propfirm_info / propfirmmatch agent)""",
|
131 |
+
functions=[],
|
132 |
+
)
|
133 |
+
|
134 |
+
def fetch_twitter_sentiment(company_name, num_tweets=100):
|
135 |
+
query = f"{company_name} -is:retweet"
|
136 |
+
tweets = search_tweets(query, max_results=num_tweets)
|
137 |
+
|
138 |
+
if not tweets:
|
139 |
+
return "No tweets found."
|
140 |
+
|
141 |
+
tweet_texts = [tweet.text for tweet in tweets]
|
142 |
+
tweet_data = "\n\n".join(tweet_texts)
|
143 |
+
|
144 |
+
analysis_prompt = f"Analyze the sentiment of the following tweets about {company_name}:\n\n{tweet_data}"
|
145 |
+
|
146 |
+
sentiment_response = client.run(twitter_sentiment_agent, messages=[{"role": "user", "content": analysis_prompt}])
|
147 |
+
sentiment_analysis = sentiment_response.messages[-1]["content"] if sentiment_response.messages else "No sentiment analysis available."
|
148 |
+
|
149 |
+
return sentiment_analysis
|
150 |
+
|
151 |
+
def search_prop_firms(query):
|
152 |
+
search_response = client.run(prop_firm_search_agent, messages=[{"role": "user", "content": query}])
|
153 |
+
search_results = search_response.messages[-1]["content"] if search_response.messages else "No search results."
|
154 |
+
|
155 |
+
trustpilot_analysis = get_trustpilot_analysis(query)
|
156 |
+
twitter_sentiment = fetch_twitter_sentiment(query)
|
157 |
+
|
158 |
+
combined_results = f"{search_results}\n\nTrustPilot Analysis:\n{trustpilot_analysis}\n\nTwitter Sentiment Analysis:\n{twitter_sentiment}"
|
159 |
+
|
160 |
+
score_prompt = f"Generate a score for {query} based on the following information:\n\n{combined_results}"
|
161 |
+
score_response = client.run(score_agent, messages=[{"role": "user", "content": score_prompt}])
|
162 |
+
score_result = score_response.messages[-1]["content"] if score_response.messages else "No score available."
|
163 |
+
|
164 |
+
return query, search_results, trustpilot_analysis, twitter_sentiment, score_result
|
165 |
+
|
166 |
+
def get_trustpilot_analysis(firm_name):
|
167 |
+
trustpilot_prompt = f"Find and analyze TrustPilot reviews for {firm_name}"
|
168 |
+
trustpilot_response = client.run(trustpilot_review_agent, messages=[{"role": "user", "content": trustpilot_prompt}])
|
169 |
+
return trustpilot_response.messages[-1]["content"] if trustpilot_response.messages else "No TrustPilot analysis available."
|
170 |
+
|
171 |
+
def format_report_card(query):
|
172 |
+
firm_name, search_results, trustpilot_analysis, twitter_sentiment, score_result = search_prop_firms(query)
|
173 |
+
|
174 |
+
report_card = {
|
175 |
+
"firm_name": firm_name,
|
176 |
+
"overall_score": score_result,
|
177 |
+
"firm_info": search_results,
|
178 |
+
"trustpilot_analysis": trustpilot_analysis,
|
179 |
+
"twitter_sentiment": twitter_sentiment
|
180 |
+
}
|
181 |
+
|
182 |
+
return json.dumps(report_card)
|
183 |
+
|
184 |
+
def run_gradio_interface():
|
185 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
186 |
+
gr.Markdown("# Proprietary Trading Firm Analysis")
|
187 |
+
|
188 |
+
with gr.Row():
|
189 |
+
with gr.Column(scale=2):
|
190 |
+
query_input = gr.Textbox(label="Enter firm name", lines=2)
|
191 |
+
search_button = gr.Button("Generate Report", variant="primary")
|
192 |
+
with gr.Column(scale=1):
|
193 |
+
gr.Markdown("### Example Queries")
|
194 |
+
gr.Examples(
|
195 |
+
examples=["FTMO", "MyForexFunds", "The5ers"],
|
196 |
+
inputs=query_input
|
197 |
+
)
|
198 |
+
|
199 |
+
output = gr.JSON(label="Report Card")
|
200 |
+
|
201 |
+
search_button.click(format_report_card, inputs=[query_input], outputs=[output])
|
202 |
+
|
203 |
+
demo.launch(share=True)
|
204 |
+
|
205 |
+
if __name__ == "__main__":
|
206 |
+
run_gradio_interface()
|
multi_agent_swarm.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from swarm import Swarm, Agent
|
2 |
+
from swarm.repl import run_demo_loop
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from openai import OpenAI
|
6 |
+
from rich.console import Console
|
7 |
+
from rich.panel import Panel
|
8 |
+
from rich.text import Text
|
9 |
+
from exa_py import Exa
|
10 |
+
import json
|
11 |
+
|
12 |
+
load_dotenv() # This loads the variables from .env
|
13 |
+
|
14 |
+
# Initialize the OpenAI client
|
15 |
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
16 |
+
|
17 |
+
# Initialize the Swarm client with the OpenAI client
|
18 |
+
client = Swarm(client=openai_client)
|
19 |
+
|
20 |
+
# Initialize the Exa client
|
21 |
+
exa_client = Exa(api_key=os.getenv("EXA_API_KEY"))
|
22 |
+
|
23 |
+
def transfer_to_executor(enhanced_prompt):
|
24 |
+
print(f"\nEnhancer Agent output:\n{enhanced_prompt}")
|
25 |
+
return executor_agent, {"prompt": enhanced_prompt}
|
26 |
+
|
27 |
+
def transfer_to_planner(executed_response):
|
28 |
+
print(f"\nExecutor Agent output:\n{executed_response}")
|
29 |
+
return planner_agent, {"response": executed_response}
|
30 |
+
|
31 |
+
def transfer_to_checker(executed_response):
|
32 |
+
print(f"\nExecutor Agent output:\n{executed_response}")
|
33 |
+
return checker_agent, {"response": executed_response}
|
34 |
+
|
35 |
+
def transfer_to_enhancer(checked_response):
|
36 |
+
print(f"\nChecker Agent output:\n{checked_response}")
|
37 |
+
return enhancer_agent, {"response": checked_response}
|
38 |
+
|
39 |
+
def search_internet(query, num_results=5):
|
40 |
+
results = exa_client.search(query, num_results=num_results)
|
41 |
+
formatted_results = []
|
42 |
+
for result in results:
|
43 |
+
formatted_results.append(f"Title: {result.title}\nURL: {result.url}\nSnippet: {result.snippet}\n")
|
44 |
+
return "\n".join(formatted_results)
|
45 |
+
|
46 |
+
def transfer_to_internet_search(query):
|
47 |
+
print(f"\nTransferring to Internet Search Agent with query:\n{query}")
|
48 |
+
return internet_search_agent, {"query": query}
|
49 |
+
|
50 |
+
enhancer_agent = Agent(
|
51 |
+
name="Enhancer",
|
52 |
+
instructions="""You are an agent that enhances user prompts. Your task is to:
|
53 |
+
1. Reword the user's input to be more clear, specific, and thorough.
|
54 |
+
2. Determine if the query requires internet search or not.
|
55 |
+
3. If internet search is required, call the transfer_to_internet_search function with the enhanced query.
|
56 |
+
4. If not, return the enhanced prompt with the following instructions prepended:
|
57 |
+
|
58 |
+
[Instructions for the next agent:
|
59 |
+
1. Begin by enclosing all thoughts within <thinking> tags, exploring multiple angles and approaches.
|
60 |
+
2. Break down the solution into clear steps within <step> tags.
|
61 |
+
3. Use <count> tags after each step to show the remaining budget. Stop when reaching 0.
|
62 |
+
4. Continuously adjust your reasoning based on intermediate results and reflections.
|
63 |
+
5. Regularly evaluate progress using <reflection> tags.
|
64 |
+
6. Synthesize the final answer within <answer> tags, providing a clear, concise summary.
|
65 |
+
7. Conclude with a final reflection on the overall solution.]
|
66 |
+
|
67 |
+
Ensure these instructions are clearly separated from the enhanced user prompt.""",
|
68 |
+
functions=[transfer_to_executor, transfer_to_internet_search],
|
69 |
+
)
|
70 |
+
|
71 |
+
clarifier_agent = Agent(
|
72 |
+
name="Clarifier",
|
73 |
+
instructions="You are an agent that clarifies and enhances user prompts. Reword the user's input to be more clear, specific, and thorough. Ask for additional information if needed.",
|
74 |
+
functions=[transfer_to_executor],
|
75 |
+
)
|
76 |
+
|
77 |
+
executor_agent = Agent(
|
78 |
+
name="Executor",
|
79 |
+
instructions="You are an agent that executes enhanced prompts. Provide a detailed, well-thought-out response to the enhanced question or request.",
|
80 |
+
functions=[transfer_to_planner],
|
81 |
+
)
|
82 |
+
|
83 |
+
checker_agent = Agent(
|
84 |
+
name="Checker",
|
85 |
+
instructions="You are an agent that checks responses for correctness. If correct, approve it. If not, edit and improve it. Always provide your reasoning.",
|
86 |
+
functions=[transfer_to_enhancer],
|
87 |
+
)
|
88 |
+
|
89 |
+
planner_agent = Agent(
|
90 |
+
name="Planner",
|
91 |
+
instructions="You are an agent that creates action plans based on the user's request and the executor's response. Provide a step-by-step plan with resources and instructions for the user to follow up on their request.",
|
92 |
+
functions=[],
|
93 |
+
)
|
94 |
+
|
95 |
+
internet_search_agent = Agent(
|
96 |
+
name="Internet Search",
|
97 |
+
instructions="You are an agent that searches the internet for relevant information. Use the search_internet function to find information, then summarize and present the findings. Always include the source URLs in your response.",
|
98 |
+
functions=[search_internet, transfer_to_executor],
|
99 |
+
)
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
console = Console()
|
103 |
+
console.print(Panel("Starting Multi-Agent Swarm. Type 'exit' to quit.", style="bold green"))
|
104 |
+
|
105 |
+
while True:
|
106 |
+
user_input = console.input("\n[bold cyan]User:[/bold cyan] ")
|
107 |
+
if user_input.lower() == 'exit':
|
108 |
+
break
|
109 |
+
console.print(Panel(f"[bold cyan]User input:[/bold cyan]\n{user_input}", border_style="cyan"))
|
110 |
+
|
111 |
+
# Enhancer Agent
|
112 |
+
enhanced_response = client.run(enhancer_agent, messages=[{"role": "user", "content": user_input}])
|
113 |
+
if enhanced_response.messages:
|
114 |
+
last_message = enhanced_response.messages[-1]
|
115 |
+
function_call = last_message.get("function_call")
|
116 |
+
if function_call and isinstance(function_call, dict) and function_call.get("name") == "transfer_to_internet_search":
|
117 |
+
# Internet Search Agent
|
118 |
+
search_query = function_call.get("arguments", {}).get("query", user_input)
|
119 |
+
search_response = client.run(internet_search_agent, messages=[{"role": "user", "content": search_query}])
|
120 |
+
search_results = search_response.messages[-1]["content"] if search_response.messages else "No search results."
|
121 |
+
console.print(Panel(f"[bold blue]Internet Search Results:[/bold blue]\n{search_results}", border_style="blue"))
|
122 |
+
|
123 |
+
# Pass search results to Executor Agent
|
124 |
+
executed_response = client.run(executor_agent, messages=[
|
125 |
+
{"role": "user", "content": user_input},
|
126 |
+
{"role": "assistant", "content": f"Based on the internet search, here are the results:\n\n{search_results}\n\nPlease provide a detailed response to the user's query using this information."}
|
127 |
+
])
|
128 |
+
else:
|
129 |
+
enhanced_prompt = last_message.get("content", user_input)
|
130 |
+
console.print(Panel(f"[bold green]Enhanced Prompt:[/bold green]\n{enhanced_prompt}", border_style="green"))
|
131 |
+
|
132 |
+
# Executor Agent
|
133 |
+
executed_response = client.run(executor_agent, messages=[{"role": "user", "content": enhanced_prompt}])
|
134 |
+
else:
|
135 |
+
console.print(Panel("[bold red]Error: Enhancer Agent returned no response.[/bold red]", border_style="red"))
|
136 |
+
continue
|
137 |
+
|
138 |
+
executed_output = executed_response.messages[-1]["content"]
|
139 |
+
console.print(Panel(f"[bold yellow]Executor Response:[/bold yellow]\n{executed_output}", border_style="yellow"))
|
140 |
+
|
141 |
+
# Planner Agent
|
142 |
+
plan_response = client.run(planner_agent, messages=[
|
143 |
+
{"role": "user", "content": user_input},
|
144 |
+
{"role": "assistant", "content": executed_output},
|
145 |
+
{"role": "user", "content": "Based on this information, create an action plan with resources and instructions."}
|
146 |
+
])
|
147 |
+
action_plan = plan_response.messages[-1]["content"]
|
148 |
+
console.print(Panel(f"[bold magenta]Action Plan:[/bold magenta]\n{action_plan}", border_style="magenta"))
|
149 |
+
|
150 |
+
console.print("\n[bold blue]Process Complete[/bold blue]")
|
readme.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: multi-agent
|
3 |
+
app_file: company_info_search.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 5.1.0
|
6 |
+
---
|
7 |
+
# Multi-Agent Swarm
|
8 |
+
|
9 |
+
This example demonstrates a multi-agent swarm with three agents: an Enhancer, an Executor, and a Checker. The agents work together to process user queries and provide well-thought-out, verified responses.
|
10 |
+
|
11 |
+
## Setup
|
12 |
+
|
13 |
+
To run this example:
|
14 |
+
|
15 |
+
1. Ensure you have installed the Swarm framework as described in the main README.
|
16 |
+
2. Set up your `.env` file in the root directory with the necessary API key:
|
17 |
+
```
|
18 |
+
OPENAI_API_KEY=your_openai_api_key_here
|
19 |
+
```
|
20 |
+
3. Navigate to this directory:
|
21 |
+
```
|
22 |
+
cd examples/multi_agent_swarm
|
23 |
+
```
|
24 |
+
4. Run the example:
|
25 |
+
```
|
26 |
+
python multi_agent_swarm.py
|
27 |
+
```
|
28 |
+
|
29 |
+
## Agents
|
30 |
+
|
31 |
+
1. **Enhancer Agent**: Enhances user prompts by adding instructions for reasoning steps and chain of thought.
|
32 |
+
2. **Executor Agent**: Executes the enhanced prompts and provides detailed, well-thought-out responses.
|
33 |
+
3. **Checker Agent**: Checks responses for correctness, approves correct ones, and edits/improves incorrect ones.
|
34 |
+
|
35 |
+
## Process Flow
|
36 |
+
|
37 |
+
1. User input is received by the Enhancer Agent.
|
38 |
+
2. Enhancer Agent enhances the prompt and transfers to the Executor Agent.
|
39 |
+
3. Executor Agent processes the enhanced prompt and transfers the response to the Checker Agent.
|
40 |
+
4. Checker Agent verifies the response. If correct, it's shown to the user. If not, it's edited and the process restarts from the Enhancer Agent.
|
41 |
+
|
42 |
+
This example uses the `run_demo_loop` helper function to create an interactive Swarm session.
|
43 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
exa-py
|
3 |
+
python-dotenv
|
4 |
+
rich
|
5 |
+
flask
|
6 |
+
flask-cors
|
7 |
+
gunicorn
|
wsgi.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from company_info_search import app
|
2 |
+
|
3 |
+
if __name__ == "__main__":
|
4 |
+
app.run()
|
5 |
+
|