Sobit commited on
Commit
bca15bb
·
verified ·
1 Parent(s): 10c1c0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -127
app.py CHANGED
@@ -9,174 +9,119 @@ import gradio as gr
9
 
10
 
11
 
12
- # Set up API keys
13
- litellm.api_key = os.getenv('GOOGLE_API_KEY')
14
- os.environ['SERPER_API_KEY'] = os.getenv('SERPER_API_KEY')
 
 
 
 
 
 
 
 
 
15
 
16
  # Define the LLM
17
  llm = "gemini/gemini-1.5-flash-exp-0827" # Your LLM model
18
 
19
  # Initialize the tool for internet searching capabilities
20
- tool = SerperDevTool()
21
-
22
- # Create the CV Analysis Agent
23
- cv_analysis_agent = Agent(
24
- role="CV Analyzer",
25
- goal='Analyze the given CV and extract key skills and experiences and make improvements if needed for portfolio creation.',
 
 
 
 
26
  verbose=True,
27
  memory=True,
28
  backstory=(
29
- "You are an expert CV Analyzer with a keen eye for detail. Your role is to meticulously examine the provided CV, "
30
- "identifying and extracting key skills, experiences, accomplishments, and areas for improvement. "
31
- "Your analysis should highlight strengths and suggest enhancements that would make the portfolio more competitive."
32
- ),
33
-
34
- tools=[tool],
35
  llm=llm,
36
  allow_delegation=True
37
  )
38
 
39
- # Create the Portfolio Generation Agent
40
- portfolio_generation_agent = Agent(
41
- role='Portfolio Generator',
42
- goal='Generate a professional HTML/CSS/JS responsive landing portfolio webpage based on {cv} analysis.',
43
  verbose=True,
44
  memory=True,
45
  backstory=(
46
- "As a Responsive Portfolio Generator, your expertise lies in creating visually appealing and user-friendly web pages. "
47
- "Based on the CV analysis, you will generate a professional HTML/CSS/JS portfolio. "
48
- "Ensure the design reflects the individual's strengths and experiences while incorporating effective functionality. "
49
- "Consider responsiveness, color schemes, and navigation for an optimal user experience."
50
  ),
51
  tools=[tool],
52
  llm=llm,
53
  allow_delegation=False
54
  )
55
 
56
- # Research task for CV analysis
57
- cv_analysis_task = Task(
58
  description=(
59
- "Analyze the provided {cv} in detail. Identify and summarize key skills, experiences, and notable accomplishments. "
60
- "Highlight educational background and suggest potential enhancements to improve the overall presentation and competitiveness of the CV."
 
61
  ),
62
- expected_output='A detailed summary of skills, experiences, accomplishments, and improvement suggestions formatted for a portfolio.',
63
  tools=[tool],
64
- agent=cv_analysis_agent,
65
  )
66
 
67
-
68
- # Writing task for portfolio generation with enhanced UI requirements
69
-
70
- portfolio_task = Task(
71
  description=(
72
- "Generate a responsive HTML/CSS portfolio webpage based on the given CV analysis. "
73
- "Include a navbar with the individual's name, and sections for skills, projects, experiences, certifications, and contact information. "
74
- "Ensure the layout is clean and visually appealing with a light/dark theme toggle option. "
75
- "Embed CSS/JS directly into the HTML for easy deployment, and optimize for both desktop and mobile viewing."
76
  ),
77
- expected_output='A complete and responsive HTML document ready for deployment, showcasing the individual’s strengths.',
78
  tools=[tool],
79
- agent=portfolio_generation_agent,
80
  async_execution=True,
 
81
  )
82
 
83
- # Function to read CV from PDF or DOCX file
84
- def read_cv_file(file_path):
85
- ext = os.path.splitext(file_path)[1].lower()
86
- cv_content = ""
87
-
88
- if ext == '.pdf':
89
- with pdfplumber.open(file_path) as pdf:
90
- for page in pdf.pages:
91
- cv_content += page.extract_text()
92
- elif ext == '.docx':
93
- doc = Document(file_path)
94
- for para in doc.paragraphs:
95
- cv_content += para.text + "\n"
96
- else:
97
- raise ValueError("Unsupported file format. Please use .pdf or .docx.")
98
-
99
- return cv_content.strip()
100
-
101
  # Create a Crew for processing
102
  crew = Crew(
103
- agents=[cv_analysis_agent, portfolio_generation_agent],
104
- tasks=[cv_analysis_task, portfolio_task],
105
  process=Process.sequential,
106
  )
107
 
108
-
109
-
110
- # Function to process CV and generate portfolio
111
- def process_cv(file):
112
  try:
113
- cv_file_content = read_cv_file(file.name)
114
- result = crew.kickoff(inputs={'cv': cv_file_content})
115
-
116
- # Print the entire result object to explore its contents (for debugging)
117
- print(result)
118
-
119
- # Convert the result to string
120
- html_output = str(result)
121
-
122
- # Use replace to remove '''html''' and ''' from the output
123
- clean_html_output = html_output.replace("```html", '').replace("```", '').strip()
124
-
125
- return clean_html_output # Return the cleaned HTML
126
  except Exception as e:
127
- return f"Error: {e}"
128
-
129
-
130
-
131
- def save_html_to_file(html_content):
132
- output_file_path = "Portfolio_generated_by_FiftyBit.html"
133
- with open(output_file_path, "w") as f:
134
- f.write(html_content)
135
- return output_file_path
136
-
137
-
138
- import html
139
-
140
- def upload_file(filepath):
141
- name = Path(filepath).name
142
- html_content = process_cv(filepath) # Get HTML content from the CV
143
-
144
- # Clean the HTML content and escape it for proper iframe embedding
145
- clean_html_output = html_content.replace("```html", '').replace("```", '').strip()
146
- escaped_html_content = html.escape(clean_html_output) # Escape HTML content
147
-
148
- # Debugging print to check the escaped HTML content
149
- #print("Escaped HTML content:", escaped_html_content)
150
-
151
- # Save the cleaned HTML content to a file (if you still want this feature)
152
- file_path = save_html_to_file(clean_html_output)
153
-
154
- # Return a full HTML string with embedded iframe for preview
155
- iframe_html = f"""
156
- <iframe srcdoc="{escaped_html_content}" style="width:100%; height:1000px; border:none; overflow:auto;"></iframe>
157
- """
158
- return iframe_html, gr.UploadButton(visible=False), gr.DownloadButton(label=f"Download Code", value=file_path, visible=True)
159
-
160
- def download_file():
161
- return [gr.UploadButton(label=f"Regenerate", visible=True), gr.DownloadButton(visible=False)]
162
-
163
- # Gradio App
164
- with gr.Blocks() as demo:
165
- gr.Markdown("<center><h1> CV-2-Portfolio Site Generator</center></h1>")
166
- gr.Markdown("<center><h2>Upload your CV in PDF or DOCX format for analysis and portfolio webpage generation.</center></h2>")
167
 
168
- u = gr.UploadButton("Upload CV (.pdf or .docx)", file_count="single")
169
- d = gr.DownloadButton("Download Portfolio", visible=False)
170
 
171
- # Use gr.HTML with larger iframe size to display the full preview
172
- output_preview = gr.HTML(
173
- value="<div style='width:100%; height:1000px; border:1px solid #ccc; text-align:center;'>Upload a file to preview the generated portfolio</div>"
174
- )
 
 
 
 
 
175
 
176
- # Connect the upload button to the upload_file function and update the output preview
177
- u.upload(upload_file, u, [output_preview, u, d])
178
 
179
- # Handle download button click
180
- d.click(download_file, None, [u, d])
181
 
182
- demo.launch(debug=True)
 
 
 
9
 
10
 
11
 
12
+
13
+ # Error handling for API keys
14
+ try:
15
+ # Set up API keys
16
+ litellm.api_key = os.getenv('GOOGLE_API_KEY')
17
+ os.environ['SERPER_API_KEY'] = os.getenv('SERPER_API_KEY')
18
+
19
+ if not litellm.api_key or not os.environ['SERPER_API_KEY']:
20
+ raise ValueError("API keys are missing. Please ensure both Google API Key and SERPER API Key are set.")
21
+ except Exception as e:
22
+ print(f"Error setting up API keys: {e}")
23
+ exit()
24
 
25
  # Define the LLM
26
  llm = "gemini/gemini-1.5-flash-exp-0827" # Your LLM model
27
 
28
  # Initialize the tool for internet searching capabilities
29
+ try:
30
+ tool = SerperDevTool(search_url="https://google.serper.dev/scholar", n_results=10)
31
+ except Exception as e:
32
+ print(f"Error initializing search tool: {e}")
33
+ exit()
34
+
35
+ # Research agent
36
+ research_agent = Agent(
37
+ role="Research Assistant",
38
+ goal='Discover and retrieve the latest groundbreaking papers and publications on {topic}.',
39
  verbose=True,
40
  memory=True,
41
  backstory=(
42
+ "You are an expert researcher who specializes in locating the most recent and relevant research papers. "
43
+ "You focus on analyzing research from credible sources like Google Scholar, ensuring they are closely aligned with the {topic}. "
44
+ "Your insights help refine ongoing research by identifying gaps and suggesting areas for improvement."
45
+ ),
 
 
46
  llm=llm,
47
  allow_delegation=True
48
  )
49
 
50
+ # Writer agent
51
+ writer_agent = Agent(
52
+ role="Research Key Points Writer",
53
+ goal="Extract and present the key points of relevant research papers, including publication links.",
54
  verbose=True,
55
  memory=True,
56
  backstory=(
57
+ "As a skilled research writer, your task is to extract key information such as objectives, methodologies, findings, and future improvements. "
58
+ "You will list the publication links in an organized manner."
 
 
59
  ),
60
  tools=[tool],
61
  llm=llm,
62
  allow_delegation=False
63
  )
64
 
65
+ # Research task
66
+ research_task = Task(
67
  description=(
68
+ "Identify all relevant research papers on {topic}. "
69
+ "For each paper, extract key points such as the main objectives, methodology, findings, and any significant flaws in the study. "
70
+ "Highlight gaps in the research and suggest possible improvements."
71
  ),
72
+ expected_output='A structured list of key points from relevant papers, including strengths, weaknesses, and improvement suggestions.',
73
  tools=[tool],
74
+ agent=research_agent,
75
  )
76
 
77
+ # Writer task
78
+ writer_task = Task(
 
 
79
  description=(
80
+ "Compose a report highlighting the key points from {topic}-related publications. "
81
+ "The report should include the main objectives, methodologies, and findings of each paper, along with a link to the publication. "
82
+ "Ensure that the information is accurate, clear and well-organized."
 
83
  ),
84
+ expected_output='A markdown file (.md) containing key points and publication links for each paper.',
85
  tools=[tool],
86
+ agent=writer_agent,
87
  async_execution=True,
88
+ output_file='key_points_report.md'
89
  )
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  # Create a Crew for processing
92
  crew = Crew(
93
+ agents=[research_agent, writer_agent],
94
+ tasks=[research_task, writer_task],
95
  process=Process.sequential,
96
  )
97
 
98
+ # Define a function that will take the research topic as input and return the markdown output
99
+ def generate_report(topic):
 
 
100
  try:
101
+ # Kickoff the Crew process with the provided topic
102
+ result = crew.kickoff(inputs={'topic': topic})
103
+ # Read the generated markdown file (assuming report is saved as 'key_points_report.md')
104
+ with open('key_points_report.md', 'r') as file:
105
+ markdown_output = file.read()
106
+ return markdown_output
 
 
 
 
 
 
 
107
  except Exception as e:
108
+ return f"Error during processing: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
 
 
110
 
111
+ # Gradio Interface
112
+ def gradio_interface():
113
+ # Use Column to organize input and output in vertical layout
114
+ with gr.Blocks() as interface:
115
+ gr.Markdown("<center><h1>AI Research Assistant Agent-Key Points Extractor</h1></center>")
116
+ with gr.Column():
117
+ topic_input = gr.Textbox(lines=2, placeholder="Enter your research topic/keywords", label="Research Topic/Keywords")
118
+ result_output = gr.Markdown(label="Key Points Output")
119
+ submit_button = gr.Button("Generate Report")
120
 
121
+ submit_button.click(generate_report, inputs=topic_input, outputs=result_output)
 
122
 
123
+ interface.launch(debug=True)
 
124
 
125
+ # Run the Gradio interface
126
+ if __name__ == "__main__":
127
+ gradio_interface()