Shreyask09 commited on
Commit
4d559b9
·
1 Parent(s): 10dfa9d
app.py CHANGED
@@ -1,64 +1,126 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
 
 
 
 
 
 
 
 
 
 
60
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
 
 
62
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from email_generator.main import loop_email_workflow, EMAIL_EVALUATOR_PROMPT, EMAIL_GENERATOR_PROMPT
3
+ import json
4
 
5
+ # Function to generate the email
6
+ def generate_email_workflow(persona_json: str, campaign_json: str, sender_json: str, max_attempts: int, openai_api_key: str):
7
+ """
8
+ Generate a complete email with persona, campaign, and sender details.
9
 
10
+ Args:
11
+ persona_json (str): A JSON string representing the persona.
12
+ campaign_json (str): A JSON string representing the campaign details.
13
+ sender_json (str): A JSON string representing the sender details.
14
+ max_attempts (int): Maximum number of attempts for generating a valid email.
15
+ openai_api_key (str): The API key for OpenAI, if applicable.
16
 
17
+ Returns:
18
+ str: The complete generated email or an error message.
19
+ """
20
+ try:
21
+ # Parse JSON strings to dictionaries
22
+ persona = json.loads(persona_json)
23
+ campaign = json.loads(campaign_json)
24
+ sender = json.loads(sender_json)
 
25
 
26
+ # Determine the model to use based on the API key
27
+ use_huggingface = not bool(openai_api_key)
28
+ model_used = "HuggingFace (Zephyr-7B)" if use_huggingface else "OpenAI (gpt-3.5-turbo)"
 
 
29
 
30
+ # Run the email generation workflow
31
+ result = loop_email_workflow(
32
+ persona=persona,
33
+ campaign=campaign,
34
+ sender_data=sender,
35
+ evaluator_prompt=EMAIL_EVALUATOR_PROMPT,
36
+ generator_prompt=EMAIL_GENERATOR_PROMPT,
37
+ max_tries=max_attempts,
38
+ use_huggingface=use_huggingface,
39
+ openai_api_key=openai_api_key if not use_huggingface else None,
40
+ )
41
 
42
+ if not result["final_email"]:
43
+ return f"Failed to generate a valid email after {max_attempts} attempts. Feedback: {result.get('message', 'No additional information.')}\n\nModel Used: {model_used}"
44
 
45
+ # Add sender information to the email content
46
+ generated_email = result["final_email"]
 
 
 
 
 
 
47
 
48
+ return generated_email
 
49
 
50
+ except json.JSONDecodeError:
51
+ return "Invalid JSON format. Please ensure all inputs are valid JSON."
52
+ except Exception as e:
53
+ return f"Error: {e}"
54
 
55
+
56
+ # Create Gradio interface
57
+ persona_input = gr.Textbox(
58
+ label="Enter Persona (JSON format)",
59
+ lines=10,
60
+ value='{"name": "John", "city": "New York", "hobbies": "Reading"}',
61
+ placeholder='{"name": "John", "city": "New York", "hobbies": "Reading"}'
62
+ )
63
+ campaign_input = gr.Textbox(
64
+ label="Enter Campaign Details (JSON format)",
65
+ lines=10,
66
+ value='{"subject_line": "Discover Our New Product!", "product": "Backpacks", "discount": "20%", "validity": "Until January 31, 2025"}',
67
+ placeholder='{"subject_line": "Discover Our New Product!", "product": "Backpacks", "discount": "20%", "validity": "Until January 31, 2025"}'
68
+ )
69
+ sender_input = gr.Textbox(
70
+ label="Enter Sender Details (JSON format)",
71
+ lines=5,
72
+ value='{"name": "Jane Doe", "company": "Outdoor Gear Co."}',
73
+ placeholder='{"name": "Jane Doe", "company": "Outdoor Gear Co.", "cta_text": "Shop Now", "cta_link": "https://example.com"}'
74
+ )
75
+ max_attempts_input = gr.Slider(
76
+ label="Max Attempts",
77
+ minimum=1,
78
+ maximum=10,
79
+ step=1,
80
+ value=3,
81
+ interactive=True
82
  )
83
+ openai_api_key_input = gr.Textbox(
84
+ label="Enter OpenAI API Key (Leave blank to use HuggingFace Zephyr-7B Beta)",
85
+ type="password",
86
+ placeholder="sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
87
+ )
88
+ email_output = gr.Textbox(
89
+ label="Generated Email",
90
+ lines=15,
91
+ interactive=False,
92
+ )
93
+
94
+ # Interface layout
95
+ with gr.Blocks() as interface:
96
+ gr.Markdown(
97
+ """
98
+ # Personalized Email Generator
99
+ Generate a personalized email based on user persona and campaign details.
100
+ Provide the inputs in JSON format and specify the maximum number of attempts for generation.
101
+
102
+ ### Available Models:
103
+ - **OpenAI (gpt-3.5-turbo)**: A highly advanced language model known for its accuracy and contextual understanding, ideal for generating professional and creative emails.
104
+ - **HuggingFace Zephyr-7B Beta**: An open-source model optimized for text generation tasks, offering a cost-effective alternative to proprietary APIs.
105
+ """
106
+ )
107
+ with gr.Row():
108
+ with gr.Column():
109
+ persona_input.render()
110
+ campaign_input.render()
111
+ sender_input.render()
112
+ max_attempts_input.render()
113
+ openai_api_key_input.render()
114
+ with gr.Column():
115
+ email_output.render()
116
 
117
+ generate_button = gr.Button("Generate Email")
118
+ generate_button.click(
119
+ fn=generate_email_workflow,
120
+ inputs=[persona_input, campaign_input, sender_input, max_attempts_input, openai_api_key_input],
121
+ outputs=email_output,
122
+ )
123
 
124
+ # Launch the app
125
  if __name__ == "__main__":
126
+ interface.launch()
email_generator/__pycache__/main.cpython-312.pyc ADDED
Binary file (17.6 kB). View file
 
email_generator/main.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Literal
3
+ from pydantic import ValidationError
4
+ from rich.console import Console
5
+ from rich.logging import RichHandler
6
+ import logging
7
+ import re
8
+ from openai import OpenAI
9
+ import os
10
+ from dotenv import load_dotenv
11
+ from huggingface_hub import InferenceClient
12
+ from typing import List, Optional
13
+
14
+ # Load environment variables
15
+ load_dotenv()
16
+
17
+ def initialize_client(api_key=None):
18
+ """Initialize OpenAI client if API key is provided."""
19
+ if api_key:
20
+ return OpenAI(api_key=api_key)
21
+ return None
22
+
23
+ # Setup logging
24
+ console = Console()
25
+ logging.basicConfig(
26
+ level=logging.INFO,
27
+ format="%(asctime)s - %(levelname)s - %(message)s",
28
+ datefmt="%Y-%m-%d %H:%M:%S",
29
+ handlers=[RichHandler(console=console)]
30
+ )
31
+ logger = logging.getLogger("email_agent")
32
+
33
+ EMAIL_GENERATOR_PROMPT = """
34
+ Your goal is to write a personalized email for the user based on the provided persona, campaign, and sender details.
35
+ If there are feedback points from previous generations, you should reflect on them to improve your solution.
36
+
37
+ Persona:
38
+ {persona}
39
+
40
+ Campaign Details:
41
+ {campaign}
42
+
43
+ Sender Details:
44
+ {sender}
45
+
46
+ **Output Format Requirement**: The response must strictly adhere to the following format. Ensure that:
47
+ 1. All opening tags have corresponding closing tags.
48
+ 2. The content inside each tag is complete and relevant to the provided details.
49
+ 3. Do not use placeholders such as `[Insert "Shop Now" button]`, `[Company Logo]`, or `[Unsubscribe link]`.
50
+
51
+ ```
52
+ <thoughts>
53
+ [Include your understanding of the persona, campaign, sender details.]
54
+ </thoughts>
55
+
56
+ <email>
57
+ [Your email content here,without any placeholders or incomplete references.]
58
+ </email>
59
+ ```
60
+ Important: The tags <thoughts> and <email> must always be properly closed.
61
+ """
62
+
63
+ EMAIL_EVALUATOR_PROMPT1 = """
64
+ Evaluate the provided email content using the following criteria:
65
+ 1. **Personalization Accuracy**: Does the email reflect the persona details and campaign details?
66
+ 2. **Tone and Style**: Is the tone engaging and appropriate for the persona? Does it align with the persona's characteristics?
67
+ 3. **Clarity and Readability**: Is the email easy to read, with clear and concise sentences? Does it avoid ambiguity and jargon?
68
+
69
+ **Instructions:**
70
+ - Only output "PASS" if all criteria are met with no room for improvement.
71
+ - If the email does not meet the criteria, output "NEEDS_IMPROVEMENT" or "FAIL", followed by specific feedback.
72
+
73
+ **Output Format:**
74
+ {{"evaluation": "<PASS | NEEDS_IMPROVEMENT | FAIL>", "feedback": "<Provide specific feedback explaining what needs to be improved and why.>"}}
75
+
76
+ Persona:
77
+ {persona}
78
+
79
+ Campaign Details:
80
+ {campaign}
81
+
82
+ Sender Details:
83
+ {sender}
84
+
85
+ Email Content:
86
+ {generated_content}
87
+ """
88
+
89
+ EMAIL_EVALUATOR_PROMPT = """
90
+ Evaluate email against these criteria:
91
+ 1. Personalization: Match with persona & campaign
92
+ 2. Tone: Appropriate for persona
93
+ 3. Clarity: Readable, concise language
94
+
95
+ Scoring:
96
+ - Personalization (0-10)
97
+ - Tone Alignment (0-10)
98
+ - Readability (0-10)
99
+
100
+ Output Format:
101
+ {{"evaluation": "<PASS | NEEDS_IMPROVEMENT | FAIL>","feedback": {{"personalization_score": 0,"tone_alignment_score": 0,"readability_score": 0,"improvements": ["Suggestion 1", "Suggestion 2"]}}}}
102
+
103
+ Persona: {persona}
104
+ Campaign: {campaign}
105
+ Sender: {sender}
106
+ Email: {generated_content}
107
+ """
108
+
109
+ def JSON_llm(prompt: str, openai_api_key: str = None, use_huggingface: bool = False, schema: BaseModel = None) -> dict:
110
+ """
111
+ Calls the LLM to generate a response and validates it against a given schema.
112
+
113
+ Args:
114
+ prompt (str): The input prompt for the LLM.
115
+ schema (BaseModel): A pydantic schema for validating the LLM's output.
116
+
117
+ Returns:
118
+ dict: The validated response from the LLM.
119
+
120
+ Raises:
121
+ ValidationError: If the response doesn't match the schema.
122
+ """
123
+ # Example: Use llm_call or a similar function to generate a response
124
+ raw_response = llm_call(prompt,model="gpt-3.5-turbo", api_key=openai_api_key, use_huggingface=use_huggingface)
125
+
126
+ try:
127
+ # Parse and validate the response against the schema
128
+ parsed_response = schema.parse_raw(raw_response)
129
+ return parsed_response.dict()
130
+ except ValidationError as e:
131
+ # Log or handle the validation error
132
+ logger.error(f"Validation failed: {e}")
133
+ logger.error(f"Raw response: {raw_response}")
134
+ raise ValueError(f"Invalid response format: {raw_response}") from e
135
+
136
+ def extract_response_content(generated_text: str) -> str:
137
+ # Extract content after "Response:"
138
+ response_match = re.search(r"Response:\s*(.*)", generated_text, re.DOTALL)
139
+ return response_match.group(1).strip() if response_match else ""
140
+
141
+
142
+
143
+ def llm_call(prompt: str, model: str = "gpt-3.5-turbo", api_key: str = None, use_huggingface: bool = False) -> str:
144
+ """
145
+ Call the LLM model (OpenAI or an open-source alternative) and return the response.
146
+ """
147
+ if api_key and not use_huggingface:
148
+ console.print("Using OpenAI model.")
149
+ client = initialize_client(api_key)
150
+ messages = [{"role": "user", "content": prompt}]
151
+ print("---messages", messages)
152
+ response = client.chat.completions.create(
153
+ model=model,
154
+ messages=messages,
155
+ )
156
+
157
+ return response.choices[0].message.content
158
+
159
+ elif use_huggingface:
160
+ console.print("Using Hugging Face model.")
161
+ model = "Qwen/Qwen2.5-72B-Instruct"
162
+ hf_client = InferenceClient(model)
163
+ messages = [{"role": "user", "content": prompt}]
164
+ response = ""
165
+ for message in hf_client.chat_completion(
166
+ messages,
167
+ max_tokens=900,
168
+ stream=True,
169
+ temperature=0.4,
170
+ top_p=0.95,
171
+ ):
172
+ token = message.choices[0].delta.content
173
+ response += token
174
+ return response
175
+
176
+ else:
177
+ console.print("Using default simulated response.")
178
+ # Simulated response matching the schema for evaluation
179
+ return '{"evaluation": "NEEDS_IMPROVEMENT", "feedback": "Simulated fallback response for testing purposes."}'
180
+
181
+
182
+ def extract_xml(text: str, tag: str) -> str:
183
+ """
184
+ Extracts the content of the specified XML tag from the given text.
185
+
186
+ Args:
187
+ text (str): The text containing the XML.
188
+ tag (str): The XML tag to extract content from.
189
+
190
+ Returns:
191
+ str: The content of the specified XML tag, or an empty string if the tag is not found.
192
+ """
193
+ match = re.search(f'<{tag}>(.*?)</{tag}>', text, re.DOTALL)
194
+ return match.group(1) if match else ""
195
+
196
+ def extract_xml(text: str, tag: str) -> str:
197
+ """
198
+ Extracts the content of the specified XML tag from the given text.Next tip
199
+
200
+
201
+ Args:
202
+ text (str): The text containing the XML.
203
+ tag (str): The XML tag to extract content from.
204
+
205
+ Returns:
206
+ str: The content of the specified XML tag, or an empty string if the tag is not found.
207
+ """
208
+ match = re.search(f'<{tag}>(.*?)</{tag}>', text, re.DOTALL)
209
+ return match.group(1) if match else ""
210
+
211
+ def generate_email(persona: dict, campaign: dict,sender_data: dict , generator_prompt: str, context: str = "", openai_api_key: str = None, use_huggingface: bool = False) -> tuple[str, str]:
212
+ """Generate a personalized email based on persona, campaign details, and feedback."""
213
+ # Dynamically build the persona and campaign text from the dictionaries
214
+ persona_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in persona.items()])
215
+ campaign_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in campaign.items()])
216
+ sender_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in sender_data.items()])
217
+ full_prompt = generator_prompt.format(persona=persona_text, campaign=campaign_text,sender=sender_text)
218
+ if context:
219
+ full_prompt += f"\nFeedback: {context}"
220
+ console.print("Generating email using LLM...")
221
+ console.print(f"Prompt: {full_prompt}")
222
+ response = llm_call(full_prompt, model="gpt-3.5-turbo", api_key=openai_api_key, use_huggingface=use_huggingface)
223
+ console.print("Generated email response.")
224
+ console.print("[bold green]Generated Email Output:[/bold green]")
225
+ console.print(response)
226
+ return response
227
+
228
+ def evaluate_email(persona: dict, campaign: dict,sender_data: dict , evaluator_prompt: str, generated_content: str,openai_api_key: str = None, use_huggingface: bool = False):
229
+ """Evaluate if a generated email meets requirements."""
230
+ try:
231
+ print("evaluator_prompt type:", type(evaluator_prompt))
232
+
233
+ # Validate inputs
234
+ if not persona:
235
+ raise ValueError("Persona is required")
236
+ if not campaign:
237
+ raise ValueError("Campaign is required")
238
+ if not generated_content:
239
+ raise ValueError("Generated content is required")
240
+ if sender_data is None:
241
+ raise ValueError("Sender data is required")
242
+
243
+ # Dynamically build text representations
244
+ persona_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in persona.items()])
245
+ campaign_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in campaign.items()])
246
+ sender_text = "\n".join([f"{key.replace('_', ' ').capitalize()}: {value}" for key, value in sender_data.items()])
247
+
248
+ # Format the prompt
249
+ full_prompt = evaluator_prompt.format(
250
+ persona=persona_text,
251
+ campaign=campaign_text,
252
+ sender=sender_text,
253
+ generated_content=generated_content
254
+ )
255
+
256
+ except Exception as e:
257
+ # Catch and print any exceptions
258
+ import traceback
259
+ traceback.print_exc()
260
+ logger.error(f"Error in evaluate_email: {e}")
261
+ print(f"Error details: {e}")
262
+ raise
263
+
264
+ # Build a schema for evaluation
265
+ class Evaluation(BaseModel):
266
+ evaluation: Literal["PASS", "NEEDS_IMPROVEMENT", "FAIL"]
267
+ feedback: Optional[dict] = {
268
+ "personalization_score": 0,
269
+ "tone_alignment_score": 0,
270
+ "readability_score": 0,
271
+ "improvements": []
272
+ }
273
+ console.print("Evaluating generated email...")
274
+ response = JSON_llm(full_prompt, openai_api_key, use_huggingface, Evaluation)
275
+ print("Email evaluation complete.", response)
276
+ evaluation = response["evaluation"]
277
+ feedback = response["feedback"]
278
+
279
+ console.print(f"Evaluation result: {evaluation}")
280
+ if feedback:
281
+ console.print(f"Feedback: {feedback}")
282
+
283
+ console.print("[bold yellow]Evaluation Feedback:[/bold yellow]")
284
+ console.print(feedback)
285
+
286
+ return evaluation, feedback
287
+
288
+ def loop_email_workflow(persona: dict, campaign: dict,sender_data: dict ,evaluator_prompt: str, generator_prompt: str, max_tries: int = 5, openai_api_key: str = None, use_huggingface: bool = False) -> dict:
289
+ """Keep generating and evaluating emails until the evaluator passes or max tries reached."""
290
+ memory = [] # Store previous responses
291
+ llm_hits = 0
292
+ tokens_used = 0
293
+ cost = 0
294
+
295
+ console.print("Starting email generation workflow...")
296
+ if not persona or not campaign or not sender_data:
297
+ raise ValueError("Persona, campaign, and sender data are required for email generation.")
298
+
299
+ response = generate_email(persona, campaign,sender_data, generator_prompt, openai_api_key=openai_api_key, use_huggingface=use_huggingface)
300
+ llm_hits += 1
301
+ tokens_used += len(response.split()) # Approximation of tokens
302
+ memory.append(response)
303
+
304
+ for attempt in range(max_tries):
305
+ console.print(f"Attempt {attempt + 1} to generate a successful email.")
306
+ try:
307
+ email_content = extract_xml(response, "email")
308
+ console.print(f"Email content: {email_content}")
309
+ evaluation, feedback = evaluate_email(persona, campaign,sender_data, evaluator_prompt, email_content, openai_api_key=openai_api_key, use_huggingface=use_huggingface)
310
+ except ValueError as e:
311
+ console.error(f"Evaluation failed: {e}")
312
+ break
313
+
314
+ llm_hits += 1
315
+ tokens_used += len(str(feedback).split())
316
+
317
+ if evaluation == "PASS":
318
+ cost = tokens_used * 0.0001 # Example cost calculation
319
+ console.print("Email generation completed successfully.")
320
+ return {
321
+ "final_email": email_content,
322
+ "llm_hits": llm_hits,
323
+ "tokens_used": tokens_used,
324
+ "cost": cost,
325
+ }
326
+
327
+ context = "\n".join([
328
+ "Previous attempts:",
329
+ *[f"- {m}" for m in memory],
330
+ f"Feedback: {feedback}"
331
+ ])
332
+ response = generate_email(persona, campaign,sender_data, generator_prompt, context, openai_api_key=openai_api_key, use_huggingface=use_huggingface)
333
+ llm_hits += 1
334
+ tokens_used += len(response.split())
335
+ memory.append(response)
336
+
337
+ logger.warning("Max attempts reached without generating a successful email.")
338
+ cost = tokens_used * 0.0001
339
+ return {
340
+ "final_email": None,
341
+ "llm_hits": llm_hits,
342
+ "tokens_used": tokens_used,
343
+ "cost": cost,
344
+ "message": "Max attempts reached without a PASS.",
345
+ }
346
+
347
+
348
+ # Example user persona
349
+ def example():
350
+ persona_data = {
351
+ "name": "Alice Smith",
352
+ "city": "San Francisco",
353
+ "hobbies": "Hiking, Cooking",
354
+ "purchase_history": "Outdoor Gear"
355
+ }
356
+
357
+ # Example campaign details
358
+ campaign_data = {
359
+ "subject_line": "Discover Your Next Outdoor Adventure",
360
+ "product": "New Hiking Backpacks",
361
+ "discount": "20% off",
362
+ "validity": "Until January 31st, 2025",
363
+ }
364
+
365
+ # Example sender details
366
+ sender_data = {
367
+ "name": "John Doe",
368
+ "email": "[email protected]"
369
+ }
370
+
371
+ # Generate and evaluate emails
372
+ workflow_result = loop_email_workflow(
373
+ persona=persona_data,
374
+ campaign=campaign_data,
375
+ sender_data=sender_data,
376
+ evaluator_prompt=EMAIL_EVALUATOR_PROMPT,
377
+ generator_prompt=EMAIL_GENERATOR_PROMPT,
378
+ max_tries=5,
379
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
380
+ use_huggingface=False
381
+ )
382
+
383
+ # Display final result
384
+ if workflow_result["final_email"]:
385
+ console.print("Final Email Generated Successfully:")
386
+ console.print("[bold green]Final Email Content:[/bold green]")
387
+ console.print(workflow_result["final_email"])
388
+ else:
389
+ logger.error("Failed to generate a passing email after maximum attempts.")
390
+ console.print("[bold red]Workflow Result:[/bold red]")
391
+ console.print(workflow_result)
392
+
393
+
394
+ if __name__ == "__main__":
395
+ example()
email_generator/util.py ADDED
File without changes
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
1
+ pydantic
2
+ rich
3
+ openai
4
+ huggingface_hub
5
+ python-dotenv