siddhartharya commited on
Commit
37e6cfd
·
verified ·
1 Parent(s): b6846ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -21
app.py CHANGED
@@ -2,6 +2,10 @@ import os
2
  import requests
3
  import gradio as gr
4
  from openai import OpenAI
 
 
 
 
5
 
6
  # Fetch API keys from environment variables
7
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@@ -14,12 +18,16 @@ def fetch_linkedin_data(linkedin_url):
14
  headers = {'Authorization': f'Bearer {api_key}'}
15
  api_endpoint = 'https://nubela.co/proxycurl/api/v2/linkedin'
16
 
 
17
  response = requests.get(api_endpoint,
18
  params={'url': linkedin_url},
19
- headers=headers)
 
20
  if response.status_code == 200:
 
21
  return response.json()
22
  else:
 
23
  return {"error": f"Error fetching LinkedIn data: {response.text}"}
24
 
25
  # Function to fetch company information using Firecrawl API
@@ -39,10 +47,13 @@ def fetch_company_info(company_url):
39
  }
40
  }
41
 
42
- response = requests.post(api_endpoint, json=data, headers=headers)
 
43
  if response.status_code == 200:
 
44
  return response.json()
45
  else:
 
46
  return {"error": f"Error fetching company information: {response.text}"}
47
 
48
  # Function to structure the email using the "Start with Why" model
@@ -53,31 +64,31 @@ def structure_email(user_data, linkedin_info, company_info):
53
  structured_input = f"{why}\n\n{how}\n\n{what}"
54
  return structured_input
55
 
56
- # Function to generate email content using Nvidia Nemotron LLM
57
  def generate_email_content(api_key, prompt):
58
  client = OpenAI(
59
  base_url="https://integrate.api.nvidia.com/v1",
60
  api_key=api_key
61
  )
62
 
63
- completion = client.chat.completions.create(
64
- model="nvidia/llama-3.1-nemotron-70b-instruct",
65
- messages=[
66
- {"role": "user", "content": prompt}
67
- ],
68
- temperature=0.5,
69
- top_p=1,
70
- max_tokens=1024,
71
- stream=True
72
- )
73
-
74
- # Collect the streamed response
75
- response_text = ""
76
- for chunk in completion:
77
- if chunk.choices[0].delta.content is not None:
78
- response_text += chunk.choices[0].delta.content
79
-
80
- return response_text
81
 
82
  # Function to validate the generated email for professional tone and completeness
83
  def validate_email(email_content):
 
2
  import requests
3
  import gradio as gr
4
  from openai import OpenAI
5
+ import logging
6
+
7
+ # Configure logging
8
+ logging.basicConfig(level=logging.INFO)
9
 
10
  # Fetch API keys from environment variables
11
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
18
  headers = {'Authorization': f'Bearer {api_key}'}
19
  api_endpoint = 'https://nubela.co/proxycurl/api/v2/linkedin'
20
 
21
+ logging.info("Fetching LinkedIn data...")
22
  response = requests.get(api_endpoint,
23
  params={'url': linkedin_url},
24
+ headers=headers,
25
+ timeout=10) # Adding a timeout for safety
26
  if response.status_code == 200:
27
+ logging.info("LinkedIn data fetched successfully.")
28
  return response.json()
29
  else:
30
+ logging.error(f"Error fetching LinkedIn data: {response.text}")
31
  return {"error": f"Error fetching LinkedIn data: {response.text}"}
32
 
33
  # Function to fetch company information using Firecrawl API
 
47
  }
48
  }
49
 
50
+ logging.info("Fetching company information...")
51
+ response = requests.post(api_endpoint, json=data, headers=headers, timeout=15) # Adding a timeout for safety
52
  if response.status_code == 200:
53
+ logging.info("Company information fetched successfully.")
54
  return response.json()
55
  else:
56
+ logging.error(f"Error fetching company information: {response.text}")
57
  return {"error": f"Error fetching company information: {response.text}"}
58
 
59
  # Function to structure the email using the "Start with Why" model
 
64
  structured_input = f"{why}\n\n{how}\n\n{what}"
65
  return structured_input
66
 
67
+ # Function to generate email content using Nvidia Nemotron LLM (non-streaming for simplicity)
68
  def generate_email_content(api_key, prompt):
69
  client = OpenAI(
70
  base_url="https://integrate.api.nvidia.com/v1",
71
  api_key=api_key
72
  )
73
 
74
+ logging.info("Generating email content...")
75
+ try:
76
+ response = client.chat.completions.create(
77
+ model="nvidia/llama-3.1-nemotron-70b-instruct",
78
+ messages=[
79
+ {"role": "user", "content": prompt}
80
+ ],
81
+ temperature=0.5,
82
+ top_p=1,
83
+ max_tokens=1024,
84
+ stream=False # Disable streaming for simplicity
85
+ )
86
+ email_content = response['choices'][0]['message']['content']
87
+ logging.info("Email content generated successfully.")
88
+ return email_content
89
+ except Exception as e:
90
+ logging.error(f"Error generating email content: {e}")
91
+ return "Error generating email content."
92
 
93
  # Function to validate the generated email for professional tone and completeness
94
  def validate_email(email_content):