Spaces:
Sleeping
Sleeping
uploading files
Browse files- .env +2 -0
- app.py +99 -0
- components/__pycache__/manual_entry.cpython-312.pyc +0 -0
- components/__pycache__/resume_entry.cpython-312.pyc +0 -0
- components/manual_entry.py +66 -0
- components/resume_entry.py +83 -0
- generate_questions/__pycache__/questions_generator.cpython-312.pyc +0 -0
- generate_questions/questions_generator.py +32 -0
- main.py +39 -0
- prompts/__pycache__/prompts.cpython-312.pyc +0 -0
- prompts/prompts.py +19 -0
- requirements.txt +90 -0
- src/__pycache__/end_greets.cpython-312.pyc +0 -0
- src/__pycache__/evaluate.cpython-312.pyc +0 -0
- src/__pycache__/feedback.cpython-312.pyc +0 -0
- src/__pycache__/greetings.cpython-312.pyc +0 -0
- src/__pycache__/home_page.cpython-312.pyc +0 -0
- src/__pycache__/manual_entry.cpython-312.pyc +0 -0
- src/__pycache__/resume_parser.cpython-312.pyc +0 -0
- src/__pycache__/sentiment_analysis.cpython-312.pyc +0 -0
- src/evaluate.py +53 -0
- src/feedback.py +46 -0
- src/greetings.py +37 -0
- src/home_page.py +45 -0
- src/manual_entry.py +88 -0
- src/resume_parser.py +115 -0
.env
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
GOOGLE_API_KEY = AIzaSyAYUaHLg1cA80tog1IBIjFF1P8bBjpoT1w
|
app.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import google.generativeai as genai
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
load_dotenv()
|
5 |
+
import re
|
6 |
+
import time
|
7 |
+
import os
|
8 |
+
|
9 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
10 |
+
|
11 |
+
model = genai.GenerativeModel("gemini-pro")
|
12 |
+
|
13 |
+
|
14 |
+
chat = model.start_chat(
|
15 |
+
history=[
|
16 |
+
{"role": "user", "parts": "Hello"},
|
17 |
+
{"role": "model", "parts": "Great to meet you. What would you like to know?"},
|
18 |
+
]
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
def generate_questions(chat, tech):
|
23 |
+
"""Generates 3-5 technical questions for a specific tech item, starting from easy to advanced."""
|
24 |
+
prompt = (
|
25 |
+
f"You are a technical interviewer. Create 3 to 5 technical interview questions for the technology: {tech}. "
|
26 |
+
"Don't mention level of question and just provide the questions directly."
|
27 |
+
|
28 |
+
)
|
29 |
+
open_ended_response = chat.send_message(prompt, stream=True)
|
30 |
+
open_ended_response.resolve()
|
31 |
+
open_ended= open_ended_response.text if open_ended_response.text else "Unable to generate open-ended questions."
|
32 |
+
# print(open_ended)
|
33 |
+
return open_ended
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
# Streamlit UI
|
40 |
+
st.title("TalentScout - Hiring Assistant Chatbot")
|
41 |
+
st.sidebar.title("Candidate Details")
|
42 |
+
|
43 |
+
# Collect Candidate Information
|
44 |
+
with st.sidebar.form("candidate_form"):
|
45 |
+
name = st.text_input("Full Name")
|
46 |
+
email = st.text_input("Email Address")
|
47 |
+
phone = st.text_input("Phone Number")
|
48 |
+
experience = st.number_input("Years of Experience", min_value=0, step=1)
|
49 |
+
position = st.text_input("Desired Position")
|
50 |
+
location = st.text_input("Current Location")
|
51 |
+
tech_stack = st.text_area("Tech Stack (comma-separated)").split(",")
|
52 |
+
submit = st.form_submit_button("Submit")
|
53 |
+
|
54 |
+
if submit:
|
55 |
+
st.success("Candidate information submitted successfully!")
|
56 |
+
|
57 |
+
# Display Candidate Information
|
58 |
+
st.write("### Candidate Information")
|
59 |
+
st.write(f"**Name:** {name}")
|
60 |
+
st.write(f"**Email:** {email}")
|
61 |
+
st.write(f"**Phone:** {phone}")
|
62 |
+
st.write(f"**Experience:** {experience} years")
|
63 |
+
st.write(f"**Position:** {position}")
|
64 |
+
st.write(f"**Location:** {location}")
|
65 |
+
st.write(f"**Tech Stack:** {', '.join(tech_stack)}")
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
# Interactive Question and Answer Section
|
70 |
+
st.write("### Technical Questions")
|
71 |
+
all_answers = []
|
72 |
+
|
73 |
+
if tech_stack:
|
74 |
+
for tech in tech_stack:
|
75 |
+
tech = tech.strip()
|
76 |
+
|
77 |
+
if tech:
|
78 |
+
|
79 |
+
st.write(f"**Questions for {tech}:**")
|
80 |
+
questions = generate_questions(chat, tech[:5])
|
81 |
+
for idx, question in enumerate(questions.split("\n\n")):
|
82 |
+
st.write(f"{question.strip("\n")}")
|
83 |
+
|
84 |
+
answer = st.text_area(f"Answer for Q{idx+1} ({tech})", key=f"answer_{tech}_{idx}")
|
85 |
+
all_answers.append({"question": question, "answer": answer})
|
86 |
+
# Final Submission Button
|
87 |
+
if st.button("Submit All Answers"):
|
88 |
+
|
89 |
+
st.write("### Thank You for Completing the Questions!")
|
90 |
+
st.write("We appreciate your time and effort in answering the questions.")
|
91 |
+
st.write("Our team will review your responses and get back to you shortly.")
|
92 |
+
st.write("Warm regards,\nTalentScout Team")
|
93 |
+
else:
|
94 |
+
st.error("Please provide a valid tech stack.")
|
95 |
+
|
96 |
+
# Exit Conversation
|
97 |
+
# if st.button("End Conversation"):
|
98 |
+
# st.write("Thank you for using TalentScout! Best of luck to the candidates.")
|
99 |
+
# st.stop()
|
components/__pycache__/manual_entry.cpython-312.pyc
ADDED
Binary file (4.2 kB). View file
|
|
components/__pycache__/resume_entry.cpython-312.pyc
ADDED
Binary file (5.96 kB). View file
|
|
components/manual_entry.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from src.manual_entry import ManualEntry
|
2 |
+
import streamlit as st
|
3 |
+
from generate_questions.questions_generator import QuestionGenerator
|
4 |
+
import google.generativeai as genai
|
5 |
+
import os
|
6 |
+
from src.feedback import feed_back
|
7 |
+
from src.greetings import end_conversation, home_greetings
|
8 |
+
import json
|
9 |
+
|
10 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
11 |
+
model = genai.GenerativeModel("gemini-pro")
|
12 |
+
|
13 |
+
chat = model.start_chat(history=[])
|
14 |
+
manual_entry = ManualEntry()
|
15 |
+
questions_generator= QuestionGenerator()
|
16 |
+
|
17 |
+
|
18 |
+
class EnterManually:
|
19 |
+
def manual_entry(self):
|
20 |
+
tech_stack = manual_entry.manual_entry_form()
|
21 |
+
if tech_stack:
|
22 |
+
st.session_state.tech_stack = tech_stack
|
23 |
+
|
24 |
+
if "questions" not in st.session_state:
|
25 |
+
st.session_state.questions = {}
|
26 |
+
if "answers" not in st.session_state:
|
27 |
+
st.session_state.answers = {}
|
28 |
+
|
29 |
+
if st.session_state.tech_stack:
|
30 |
+
if st.button("Generate Questions"):
|
31 |
+
for tech in st.session_state.tech_stack:
|
32 |
+
if tech.strip() not in st.session_state.questions:
|
33 |
+
questions = questions_generator.generate_questions(chat, tech)
|
34 |
+
st.session_state.questions[tech] = questions.split("\n")
|
35 |
+
st.session_state.answers[tech] = [""] * len(st.session_state.questions[tech])
|
36 |
+
|
37 |
+
if st.session_state.questions:
|
38 |
+
for tech, questions in st.session_state.questions.items():
|
39 |
+
st.write(f"### Technical Questions for {tech}")
|
40 |
+
for idx, question in enumerate(questions):
|
41 |
+
st.write(f"{idx + 1}. {question}")
|
42 |
+
st.session_state.answers[tech][idx] = st.text_area(
|
43 |
+
f"Answer for Q{idx + 1} ({tech})",
|
44 |
+
value=st.session_state.answers[tech][idx],
|
45 |
+
key=f"answer_{tech}_{idx}",
|
46 |
+
)
|
47 |
+
|
48 |
+
if st.button("Submit Answers"):
|
49 |
+
greets = end_conversation()
|
50 |
+
st.write(greets)
|
51 |
+
output_data = []
|
52 |
+
for tech, questions in st.session_state.questions.items():
|
53 |
+
for idx, question in enumerate(questions):
|
54 |
+
output_data.append({
|
55 |
+
"tech": tech,
|
56 |
+
"question": question,
|
57 |
+
"answer": st.session_state.answers[tech][idx]
|
58 |
+
})
|
59 |
+
|
60 |
+
# Save as JSON file
|
61 |
+
json_filename = "questions_answers.json"
|
62 |
+
with open(json_filename, "w") as json_file:
|
63 |
+
json.dump(output_data, json_file, indent=4)
|
64 |
+
|
65 |
+
feed_back()
|
66 |
+
|
components/resume_entry.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from src.resume_parser import ResumeParser
|
3 |
+
from generate_questions.questions_generator import QuestionGenerator
|
4 |
+
import google.generativeai as genai
|
5 |
+
import os
|
6 |
+
from src.greetings import end_conversation,start_greeting
|
7 |
+
import json
|
8 |
+
from src.feedback import feed_back
|
9 |
+
|
10 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
11 |
+
model = genai.GenerativeModel("gemini-pro")
|
12 |
+
|
13 |
+
chat = model.start_chat(history=[])
|
14 |
+
resume_parser = ResumeParser()
|
15 |
+
questions_generator = QuestionGenerator()
|
16 |
+
|
17 |
+
|
18 |
+
class ResumeUploader:
|
19 |
+
def resume_upload(self):
|
20 |
+
st.title("TalentScout - Hiring Assistant Chatbot")
|
21 |
+
start_greeting()
|
22 |
+
st.sidebar.title("Candidate Details")
|
23 |
+
|
24 |
+
if "tech_stack" not in st.session_state:
|
25 |
+
st.session_state.tech_stack = []
|
26 |
+
if "questions" not in st.session_state:
|
27 |
+
st.session_state.questions = {}
|
28 |
+
if "answers" not in st.session_state:
|
29 |
+
st.session_state.answers = {}
|
30 |
+
|
31 |
+
uploaded_file = st.sidebar.file_uploader("Upload Resume (PDF):", type=["pdf"])
|
32 |
+
if uploaded_file:
|
33 |
+
candidate_details = resume_parser.parse(uploaded_file)
|
34 |
+
st.success("Resume uploaded and processed successfully!")
|
35 |
+
st.write("### Candidate Information")
|
36 |
+
st.write(f"**Name:** {candidate_details['name']}")
|
37 |
+
st.write(f"**Email:** {candidate_details['email']}")
|
38 |
+
st.write(f"**Phone:** {candidate_details['phone']}")
|
39 |
+
st.write(f"**Experience:** {candidate_details['experience']}")
|
40 |
+
st.write(f"**Position:** {candidate_details['position']}")
|
41 |
+
st.write(f"**Location:** {candidate_details['location']}")
|
42 |
+
st.write(f"**Tech Stack:** {', '.join(candidate_details['tech_stack'])}")
|
43 |
+
st.session_state.tech_stack = candidate_details["tech_stack"][:5]
|
44 |
+
|
45 |
+
if st.session_state.tech_stack:
|
46 |
+
st.write("**Based on your Top 5 skills ,we generate Technical Question , you need to answer the question .If you are ready please click Generate Questions button**")
|
47 |
+
if st.button("Generate Questions"):
|
48 |
+
for tech in st.session_state.tech_stack:
|
49 |
+
tech = tech.strip()
|
50 |
+
if tech not in st.session_state.questions:
|
51 |
+
questions = questions_generator.generate_questions(chat, tech)
|
52 |
+
st.session_state.questions[tech] = questions.split("\n")
|
53 |
+
st.session_state.answers[tech] = [""] * len(st.session_state.questions[tech])
|
54 |
+
|
55 |
+
# Display questions and answers
|
56 |
+
if st.session_state.questions:
|
57 |
+
for tech, questions in st.session_state.questions.items():
|
58 |
+
st.write(f"### Technical Questions for {tech}")
|
59 |
+
for idx, question in enumerate(questions):
|
60 |
+
st.write(f"{idx + 1}. {question}")
|
61 |
+
st.session_state.answers[tech][idx] = st.text_area(
|
62 |
+
f"Answer for Q{idx + 1} ({tech})",
|
63 |
+
value=st.session_state.answers[tech][idx],
|
64 |
+
key=f"answer_{tech}_{idx}",)
|
65 |
+
|
66 |
+
if st.button("Submit Answers"):
|
67 |
+
# Combine questions and answers into a JSON format
|
68 |
+
greets = end_conversation()
|
69 |
+
st.write(greets)
|
70 |
+
output_data = []
|
71 |
+
for tech, questions in st.session_state.questions.items():
|
72 |
+
for idx, question in enumerate(questions):
|
73 |
+
output_data.append({
|
74 |
+
"tech": tech,
|
75 |
+
"question": question,
|
76 |
+
"answer": st.session_state.answers[tech][idx]
|
77 |
+
})
|
78 |
+
|
79 |
+
# Save as JSON file
|
80 |
+
json_filename = "questions_answers.json"
|
81 |
+
with open(json_filename, "w") as json_file:
|
82 |
+
json.dump(output_data, json_file, indent=4)
|
83 |
+
feed_back()
|
generate_questions/__pycache__/questions_generator.cpython-312.pyc
ADDED
Binary file (1.68 kB). View file
|
|
generate_questions/questions_generator.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import google.generativeai as genai
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
load_dotenv()
|
5 |
+
import os
|
6 |
+
from prompts.prompts import PromptGenerator
|
7 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
8 |
+
|
9 |
+
model = genai.GenerativeModel("gemini-pro")
|
10 |
+
|
11 |
+
|
12 |
+
chat = model.start_chat(
|
13 |
+
history=[
|
14 |
+
{"role": "user", "parts": "Hello"},
|
15 |
+
{"role": "model", "parts": "Great to meet you. What would you like to know?"},
|
16 |
+
]
|
17 |
+
)
|
18 |
+
|
19 |
+
class QuestionGenerator:
|
20 |
+
def generate_questions(self,chat, tech):
|
21 |
+
"""Generates 3-5 technical questions for a specific tech item, starting from easy to advanced."""
|
22 |
+
# prompt = (
|
23 |
+
# f"You are a technical interviewer. Create 3 to 5 technical interview questions for the technology: {tech}. "
|
24 |
+
# "Don't mention level of question and just provide the questions directly."
|
25 |
+
|
26 |
+
# )
|
27 |
+
prompt = PromptGenerator().get_prompts(tech)
|
28 |
+
open_ended_response = chat.send_message(prompt, stream=True)
|
29 |
+
open_ended_response.resolve()
|
30 |
+
open_ended= open_ended_response.text if open_ended_response.text else "Unable to generate open-ended questions."
|
31 |
+
# print(open_ended)
|
32 |
+
return open_ended
|
main.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from components.resume_entry import ResumeUploader
|
2 |
+
import streamlit as st
|
3 |
+
from src.greetings import end_conversation,start_greeting
|
4 |
+
from components.manual_entry import EnterManually
|
5 |
+
from src.feedback import feed_back
|
6 |
+
from src.home_page import home_ui, about_me
|
7 |
+
|
8 |
+
|
9 |
+
# run_main()
|
10 |
+
import streamlit as st
|
11 |
+
from streamlit_option_menu import option_menu
|
12 |
+
with st.sidebar:
|
13 |
+
selected = option_menu(
|
14 |
+
menu_title="Main Menu",
|
15 |
+
options=["Home", "Resume Uploader", "Manual Enter","About Me"],
|
16 |
+
icons=["house", "app-indicator", "bar-chart","person-video" ],
|
17 |
+
menu_icon="cast",
|
18 |
+
default_index=1,)
|
19 |
+
if selected == "Home":
|
20 |
+
home_ui()
|
21 |
+
if selected == "Resume Uploader":
|
22 |
+
resume_uploader = ResumeUploader()
|
23 |
+
greets =resume_uploader.resume_upload()
|
24 |
+
if greets:
|
25 |
+
st.write(greets)
|
26 |
+
# feed_back()
|
27 |
+
|
28 |
+
if selected == "Manual Enter":
|
29 |
+
st.title("TalentScout - Hiring Assistant Chatbot")
|
30 |
+
start_greeting()
|
31 |
+
enter_manually = EnterManually()
|
32 |
+
greets = enter_manually.manual_entry()
|
33 |
+
if greets:
|
34 |
+
st.write(greets)
|
35 |
+
# feed_back()
|
36 |
+
|
37 |
+
if selected == "About Me":
|
38 |
+
about_me()
|
39 |
+
|
prompts/__pycache__/prompts.cpython-312.pyc
ADDED
Binary file (1.18 kB). View file
|
|
prompts/prompts.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import streamlit as st
|
3 |
+
class PromptGenerator:
|
4 |
+
def get_prompts(self, tech):
|
5 |
+
|
6 |
+
prompt = (
|
7 |
+
f"You are a technical interviewer. Create 3 to 5 technical interview questions for the technology: {tech}. "
|
8 |
+
"Don't mention level of question and just provide the questions directly."
|
9 |
+
|
10 |
+
)
|
11 |
+
return prompt
|
12 |
+
def evaluate_prompt(self):
|
13 |
+
evaluation_prompt = """
|
14 |
+
Evaluate each answer based on accuracy, relevance, and clarity. If an answer is provided, assess it; if not, mark it as "No answer provided." After evaluating all answers, return the percentage of valid answers (answers that are accurate, relevant, and clear).
|
15 |
+
"""
|
16 |
+
|
17 |
+
return evaluation_prompt
|
18 |
+
|
19 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiohappyeyeballs==2.4.4
|
2 |
+
aiohttp==3.11.11
|
3 |
+
aiosignal==1.3.2
|
4 |
+
altair==5.5.0
|
5 |
+
annotated-types==0.7.0
|
6 |
+
anyio==4.7.0
|
7 |
+
attrs==24.3.0
|
8 |
+
blinker==1.9.0
|
9 |
+
cachetools==5.5.0
|
10 |
+
certifi==2024.12.14
|
11 |
+
cffi==1.17.1
|
12 |
+
charset-normalizer==3.4.1
|
13 |
+
click==8.1.8
|
14 |
+
cryptography==44.0.0
|
15 |
+
distro==1.9.0
|
16 |
+
frozenlist==1.5.0
|
17 |
+
gitdb==4.0.12
|
18 |
+
GitPython==3.1.44
|
19 |
+
google-ai-generativelanguage==0.6.10
|
20 |
+
google-api-core==2.24.0
|
21 |
+
google-api-python-client==2.157.0
|
22 |
+
google-auth==2.37.0
|
23 |
+
google-auth-httplib2==0.2.0
|
24 |
+
google-generativeai==0.8.3
|
25 |
+
googleapis-common-protos==1.66.0
|
26 |
+
grpcio==1.68.1
|
27 |
+
grpcio-status==1.68.1
|
28 |
+
h11==0.14.0
|
29 |
+
httpcore==1.0.7
|
30 |
+
httplib2==0.22.0
|
31 |
+
httpx==0.28.1
|
32 |
+
idna==3.10
|
33 |
+
iniconfig==2.0.0
|
34 |
+
Jinja2==3.1.5
|
35 |
+
jiter==0.8.2
|
36 |
+
joblib==1.4.2
|
37 |
+
jsonschema==4.23.0
|
38 |
+
jsonschema-specifications==2024.10.1
|
39 |
+
logging==0.4.9.6
|
40 |
+
markdown-it-py==3.0.0
|
41 |
+
MarkupSafe==3.0.2
|
42 |
+
mdurl==0.1.2
|
43 |
+
multidict==6.1.0
|
44 |
+
narwhals==1.20.1
|
45 |
+
nltk==3.9.1
|
46 |
+
numpy==2.2.1
|
47 |
+
openai==1.58.1
|
48 |
+
packaging==24.2
|
49 |
+
pandas==2.2.3
|
50 |
+
pdfminer.six==20240706
|
51 |
+
pillow==11.1.0
|
52 |
+
pluggy==1.5.0
|
53 |
+
propcache==0.2.1
|
54 |
+
proto-plus==1.25.0
|
55 |
+
protobuf==5.29.2
|
56 |
+
pyarrow==18.1.0
|
57 |
+
pyasn1==0.6.1
|
58 |
+
pyasn1_modules==0.4.1
|
59 |
+
pycparser==2.22
|
60 |
+
pydantic==2.10.4
|
61 |
+
pydantic_core==2.27.2
|
62 |
+
pydeck==0.9.1
|
63 |
+
Pygments==2.18.0
|
64 |
+
pyparsing==3.2.1
|
65 |
+
pytest==8.3.4
|
66 |
+
python-dateutil==2.9.0.post0
|
67 |
+
python-dotenv==1.0.1
|
68 |
+
pytz==2024.2
|
69 |
+
referencing==0.35.1
|
70 |
+
regex==2024.11.6
|
71 |
+
requests==2.32.3
|
72 |
+
rich==13.9.4
|
73 |
+
rpds-py==0.22.3
|
74 |
+
rsa==4.9
|
75 |
+
six==1.17.0
|
76 |
+
smmap==5.0.2
|
77 |
+
sniffio==1.3.1
|
78 |
+
streamlit==1.41.1
|
79 |
+
streamlit-option-menu==0.4.0
|
80 |
+
tenacity==9.0.0
|
81 |
+
textblob==0.18.0.post0
|
82 |
+
toml==0.10.2
|
83 |
+
tornado==6.4.2
|
84 |
+
tqdm==4.67.1
|
85 |
+
typing_extensions==4.12.2
|
86 |
+
tzdata==2024.2
|
87 |
+
uritemplate==4.1.1
|
88 |
+
urllib3==2.3.0
|
89 |
+
watchdog==6.0.0
|
90 |
+
yarl==1.18.3
|
src/__pycache__/end_greets.cpython-312.pyc
ADDED
Binary file (883 Bytes). View file
|
|
src/__pycache__/evaluate.cpython-312.pyc
ADDED
Binary file (2.64 kB). View file
|
|
src/__pycache__/feedback.cpython-312.pyc
ADDED
Binary file (1.31 kB). View file
|
|
src/__pycache__/greetings.cpython-312.pyc
ADDED
Binary file (1.96 kB). View file
|
|
src/__pycache__/home_page.cpython-312.pyc
ADDED
Binary file (2.76 kB). View file
|
|
src/__pycache__/manual_entry.cpython-312.pyc
ADDED
Binary file (3.43 kB). View file
|
|
src/__pycache__/resume_parser.cpython-312.pyc
ADDED
Binary file (6.33 kB). View file
|
|
src/__pycache__/sentiment_analysis.cpython-312.pyc
ADDED
Binary file (811 Bytes). View file
|
|
src/evaluate.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import google.generativeai as genai
|
2 |
+
from prompts.prompts import PromptGenerator
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
load_dotenv
|
5 |
+
import os
|
6 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
7 |
+
class AnswerEvaluator:
|
8 |
+
def __init__(self, api_key):
|
9 |
+
"""Initialize with the Gemini API key."""
|
10 |
+
genai.configure(api_key=api_key)
|
11 |
+
self.model = genai.GenerativeModel("gemini-pro")
|
12 |
+
|
13 |
+
def evaluate_answers(self, all_answers):
|
14 |
+
"""Evaluates answers based on their relevance, accuracy, and clarity."""
|
15 |
+
prompt = self._construct_prompt(all_answers)
|
16 |
+
|
17 |
+
try:
|
18 |
+
# Evaluate the answers using Gemini's model
|
19 |
+
response = self.model.generate_content(
|
20 |
+
prompt
|
21 |
+
)
|
22 |
+
|
23 |
+
# Return the response generated by the model
|
24 |
+
return response.text.strip()
|
25 |
+
|
26 |
+
except Exception as e:
|
27 |
+
return f"Error in evaluation: {str(e)}"
|
28 |
+
|
29 |
+
def _construct_prompt(self, all_answers):
|
30 |
+
"""Constructs a detailed prompt for the Gemini model."""
|
31 |
+
prompt = PromptGenerator()
|
32 |
+
evaluation_prompt = prompt.evaluate_prompt()
|
33 |
+
|
34 |
+
for idx, item in enumerate(all_answers, 1):
|
35 |
+
evaluation_prompt += f"\n{idx}. Question: {item['question']}\n Answer: {item['answer']}\n"
|
36 |
+
|
37 |
+
evaluation_prompt += "\nProvide your detailed evaluation below:"
|
38 |
+
return evaluation_prompt
|
39 |
+
|
40 |
+
|
41 |
+
# Initialize with your Gemini API key
|
42 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
43 |
+
evaluator = AnswerEvaluator(api_key)
|
44 |
+
|
45 |
+
# Example answers
|
46 |
+
all_answers = [
|
47 |
+
{"question": "What is machine learning?", "answer": "Machine learning is a subset of AI."},
|
48 |
+
{"question": "Explain logistic regression.", "answer": "It is used to classify data points into classes."},
|
49 |
+
]
|
50 |
+
|
51 |
+
# Evaluate the answers
|
52 |
+
evaluation_result = evaluator.evaluate_answers(all_answers)
|
53 |
+
print(evaluation_result)
|
src/feedback.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
from textblob import TextBlob # Install using: pip install textblob
|
4 |
+
def feed_back():
|
5 |
+
# Collect user feedback
|
6 |
+
st.write("---")
|
7 |
+
st.write("### Chatbot Feedback")
|
8 |
+
feedback = st.text_area("Additional Feedback:")
|
9 |
+
|
10 |
+
if st.button("Submit Feedback"):
|
11 |
+
# Show thank you message after submitting feedback
|
12 |
+
st.success("Thank you for your feedback!")
|
13 |
+
|
14 |
+
# Perform sentiment analysis on the provided feedback
|
15 |
+
predict_sentiment(feedback)
|
16 |
+
|
17 |
+
|
18 |
+
# Optionally, save or store feedback data
|
19 |
+
# feedback_data = {
|
20 |
+
# "rating": rating,
|
21 |
+
# "feedback": feedback,
|
22 |
+
# }
|
23 |
+
# st.write("Feedback Data:", feedback_data)
|
24 |
+
|
25 |
+
return feedback
|
26 |
+
|
27 |
+
def predict_sentiment(feedback):
|
28 |
+
if feedback: # Only perform sentiment analysis if feedback is provided
|
29 |
+
# Analyze sentiment
|
30 |
+
analysis = TextBlob(feedback)
|
31 |
+
sentiment_score = analysis.sentiment.polarity
|
32 |
+
|
33 |
+
# Predict sentiment
|
34 |
+
if sentiment_score > 0:
|
35 |
+
sentiment = "Positive 😊"
|
36 |
+
elif sentiment_score < 0:
|
37 |
+
sentiment = "Negative 😞"
|
38 |
+
else:
|
39 |
+
sentiment = "Neutral 😐"
|
40 |
+
|
41 |
+
# Display sentiment analysis result
|
42 |
+
st.write(f"Sentiment Analysis Result: **{sentiment}**")
|
43 |
+
# st.write(f"Sentiment Score: {sentiment_score}")
|
44 |
+
|
45 |
+
# Running the feedback function
|
46 |
+
# feed_back()
|
src/greetings.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
def start_greeting():
|
6 |
+
# Display a friendly greeting message to the candidate
|
7 |
+
st.write("---")
|
8 |
+
st.write("### Welcome to the TalentScout Hiring Assistant Chatbot! 🎉")
|
9 |
+
|
10 |
+
# Greet the candidate and introduce the purpose of the chatbot
|
11 |
+
st.write("Hello and thank you for applying! I'm here to help you through the interview process.")
|
12 |
+
st.write("We'll be reviewing your skills and experience through a series of technical questions. Let's get started!")
|
13 |
+
|
14 |
+
# Optionally, you can provide a brief overview of the process
|
15 |
+
st.write("You'll have the opportunity to upload your resume or enter your details manually. Then, I'll generate some technical questions based on your expertise.")
|
16 |
+
|
17 |
+
# Encourage the candidate to proceed
|
18 |
+
st.write("Please select your preferred method to proceed, and we'll take it from there!")
|
19 |
+
|
20 |
+
# Call this function to display the greeting message at the beginning
|
21 |
+
# start_greeting()
|
22 |
+
|
23 |
+
|
24 |
+
def end_conversation():
|
25 |
+
# After submitting answers, display a thank you message
|
26 |
+
st.write("---")
|
27 |
+
end_greetings = """
|
28 |
+
### Thank You for Your Responses!
|
29 |
+
Thank you for your time and thoughtful answers. We appreciate your interest in the position!\n
|
30 |
+
Our team will review your responses and contact you soon regarding the next steps in the hiring process.\n
|
31 |
+
If you have any questions or need further assistance, feel free to reach out!\n
|
32 |
+
Good luck and stay positive! 😊
|
33 |
+
"""
|
34 |
+
return end_greetings
|
35 |
+
|
36 |
+
def home_greetings():
|
37 |
+
st.write("Home page")
|
src/home_page.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
def home_ui():
|
4 |
+
st.title("TalentScout - Hiring Assistant Chatbot 🤖")
|
5 |
+
st.write("""
|
6 |
+
Welcome to **TalentScout**, your AI-powered hiring assistant! 🎉
|
7 |
+
This tool simplifies the hiring process by leveraging the power of AI to:
|
8 |
+
- Parse resumes and extract candidate details.
|
9 |
+
- Generate tailored technical questions based on a candidate's expertise.
|
10 |
+
- Collect and save answers for structured analysis.
|
11 |
+
|
12 |
+
Navigate using the sidebar to:
|
13 |
+
- Upload a candidate's resume for automated parsing.
|
14 |
+
- Enter candidate details manually for question generation.
|
15 |
+
|
16 |
+
Let's revolutionize the hiring process together! 🚀
|
17 |
+
""")
|
18 |
+
st.image(
|
19 |
+
"https://via.placeholder.com/800x400.png?text=Welcome+to+TalentScout",
|
20 |
+
use_container_width=True,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
def about_me():
|
26 |
+
st.title("About Me 🙋♂️ ")
|
27 |
+
st.write("""
|
28 |
+
Hello! 👋 I'm **R. Sarath Kumar**, and I'm thrilled to have you here! I’m a dedicated and passionate professional in **Data Science** and **Machine Learning** 🤖.
|
29 |
+
With a strong foundation in statistics, machine learning, and MLOps, I love transforming data into valuable insights and building predictive models that solve real-world problems. My work spans across multiple domains, and I’m always excited to explore new tools and techniques to make data-driven decisions more effective and impactful.
|
30 |
+
|
31 |
+
Feel free to browse through my projects, where you’ll find some of the most exciting applications of AI and machine learning, and don't hesitate to reach out if you’d like to connect or discuss potential collaborations!
|
32 |
+
""")
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
st.subheader("Contact Information")
|
37 |
+
|
38 |
+
|
39 |
+
st.write("🔗LinkedIn: [LinkedIn](https://www.linkedin.com/in/r-sarath-kumar-666084257)")
|
40 |
+
st.write("🔗Github:[Github](https://www.github.com/sarathkumar1304)")
|
41 |
+
|
42 |
+
|
43 |
+
st.write("📧 Email: [[email protected]](mailto:[email protected])")
|
44 |
+
|
45 |
+
st.write("📞 Phone: 7780651312")
|
src/manual_entry.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
|
3 |
+
|
4 |
+
# class DataProcessor:
|
5 |
+
# def manual_entry_form(self):
|
6 |
+
# with st.form("manual_entry_form"):
|
7 |
+
# name = st.text_input("Full Name")
|
8 |
+
# email = st.text_input("Email Address")
|
9 |
+
# phone = st.text_input("Phone Number")
|
10 |
+
# experience = st.number_input("Years of Experience", min_value=0, step=1)
|
11 |
+
# position = st.text_input("Desired Position")
|
12 |
+
# location = st.text_input("Current Location")
|
13 |
+
# tech_stack = st.text_area("Tech Stack (comma-separated)").split(",")
|
14 |
+
# submit = st.form_submit_button("Submit")
|
15 |
+
# if submit:
|
16 |
+
# self.display_submitted_data(
|
17 |
+
# name, email, phone, experience, position, location, tech_stack
|
18 |
+
# )
|
19 |
+
# return tech_stack
|
20 |
+
|
21 |
+
|
22 |
+
# @staticmethod
|
23 |
+
# def display_submitted_data(name, email, phone, experience, position, location, tech_stack):
|
24 |
+
# """Displays the submitted form data."""
|
25 |
+
# st.markdown("### Submitted Information")
|
26 |
+
# st.write(f"**Full Name:** {name}")
|
27 |
+
# st.write(f"**Email Address:** {email}")
|
28 |
+
# st.write(f"**Phone Number:** {phone}")
|
29 |
+
# st.write(f"**Years of Experience:** {experience}")
|
30 |
+
# st.write(f"**Desired Position:** {position}")
|
31 |
+
# st.write(f"**Current Location:** {location}")
|
32 |
+
# st.write(f"**Tech Stack:** {', '.join(tech_stack)}")
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
import streamlit as st
|
39 |
+
|
40 |
+
class ManualEntry:
|
41 |
+
def manual_entry_form(self):
|
42 |
+
with st.form("manual_entry_form"):
|
43 |
+
name = st.text_input("Full Name")
|
44 |
+
email = st.text_input("Email Address")
|
45 |
+
phone = st.text_input("Phone Number")
|
46 |
+
experience = st.number_input("Years of Experience", min_value=0, step=1)
|
47 |
+
position = st.text_input("Desired Position")
|
48 |
+
location = st.text_input("Current Location")
|
49 |
+
tech_stack = st.text_area("Tech Stack (comma-separated)").split(",")
|
50 |
+
submit = st.form_submit_button("Submit")
|
51 |
+
|
52 |
+
# Check if any field is empty
|
53 |
+
if submit:
|
54 |
+
missing_fields = []
|
55 |
+
|
56 |
+
# Check for empty fields
|
57 |
+
if not name:
|
58 |
+
missing_fields.append("Full Name")
|
59 |
+
if not email:
|
60 |
+
missing_fields.append("Email Address")
|
61 |
+
if not phone:
|
62 |
+
missing_fields.append("Phone Number")
|
63 |
+
if experience == 0:
|
64 |
+
missing_fields.append("Years of Experience")
|
65 |
+
if not position:
|
66 |
+
missing_fields.append("Desired Position")
|
67 |
+
if not location:
|
68 |
+
missing_fields.append("Current Location")
|
69 |
+
if not tech_stack or all(not tech.strip() for tech in tech_stack):
|
70 |
+
missing_fields.append("Tech Stack")
|
71 |
+
|
72 |
+
if missing_fields:
|
73 |
+
# Show a message if any fields are missing
|
74 |
+
st.warning(f"Please provide details for the following fields: {', '.join(missing_fields)}")
|
75 |
+
else:
|
76 |
+
self.display_submitted_data(name, email, phone, experience, position, location, tech_stack)
|
77 |
+
return tech_stack
|
78 |
+
|
79 |
+
|
80 |
+
def display_submitted_data(self, name, email, phone, experience, position, location, tech_stack):
|
81 |
+
st.success("Data Submitted Successfully!")
|
82 |
+
st.write(f"**Name:** {name}")
|
83 |
+
st.write(f"**Email:** {email}")
|
84 |
+
st.write(f"**Phone:** {phone}")
|
85 |
+
st.write(f"**Experience:** {experience} years")
|
86 |
+
st.write(f"**Position:** {position}")
|
87 |
+
st.write(f"**Location:** {location}")
|
88 |
+
st.write(f"**Tech Stack:** {', '.join(tech_stack)}")
|
src/resume_parser.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pdfminer.high_level import extract_text
|
2 |
+
import re
|
3 |
+
from datetime import datetime
|
4 |
+
|
5 |
+
class ResumeParser:
|
6 |
+
def parse(self, resume_file):
|
7 |
+
"""Extracts text from a PDF resume and processes candidate information."""
|
8 |
+
text = extract_text(resume_file)
|
9 |
+
return self.extract_candidate_info(text)
|
10 |
+
|
11 |
+
def extract_candidate_info(self, text):
|
12 |
+
"""Extracts candidate details from the parsed resume text."""
|
13 |
+
return {
|
14 |
+
"name": self.extract_name(text),
|
15 |
+
"email": self.extract_email(text),
|
16 |
+
"phone": self.extract_phone(text),
|
17 |
+
"experience": self.extract_experience(text),
|
18 |
+
"position": self.extract_position(text),
|
19 |
+
"location": self.extract_location(text),
|
20 |
+
"tech_stack": self.extract_tech_stack(text),
|
21 |
+
}
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def extract_name(text):
|
25 |
+
"""Extracts the candidate's name from the first line or common patterns."""
|
26 |
+
# Split text into lines and take the first non-empty line
|
27 |
+
lines = text.splitlines()
|
28 |
+
for line in lines:
|
29 |
+
line = line.strip()
|
30 |
+
if line: # Ignore empty lines
|
31 |
+
# Check for a valid name format (e.g., avoiding single words like "Resume")
|
32 |
+
if len(line.split()) >= 2: # Name should have at least two words
|
33 |
+
return line
|
34 |
+
break
|
35 |
+
return "Name not found"
|
36 |
+
|
37 |
+
@staticmethod
|
38 |
+
def extract_email(text):
|
39 |
+
"""Extracts the candidate's email address."""
|
40 |
+
match = re.search(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}", text)
|
41 |
+
return match.group(0) if match else "Email not found"
|
42 |
+
|
43 |
+
@staticmethod
|
44 |
+
def extract_phone(text):
|
45 |
+
"""Extracts the candidate's phone number."""
|
46 |
+
match = re.search(r"\+?\d{10,13}", text)
|
47 |
+
return match.group(0) if match else "Phone number not found"
|
48 |
+
|
49 |
+
@staticmethod
|
50 |
+
def extract_position(text):
|
51 |
+
"""Extracts the candidate's position (e.g., Job Title)."""
|
52 |
+
match = re.search(r"(?i)experience(?:\:|\s+)([^\n]+)", text)
|
53 |
+
return match.group(1).strip() if match else "Position not found"
|
54 |
+
|
55 |
+
@staticmethod
|
56 |
+
def extract_location(text):
|
57 |
+
"""Extracts the candidate's location."""
|
58 |
+
# Regex to match patterns like 'Location: Bengaluru, Karnataka' or standalone 'Bengaluru, Karnataka'
|
59 |
+
match = re.search(r"(?i)location(?:\:|\s+)([^\n]+)|\b([A-Za-z\s]+,\s*[A-Za-z\s]+)\b", text)
|
60 |
+
if match:
|
61 |
+
# Group 1 matches 'Location: <value>' and Group 2 matches '<City>, <State>'
|
62 |
+
location = match.group(1) or match.group(2)
|
63 |
+
return location.strip()
|
64 |
+
return "Location not found"
|
65 |
+
|
66 |
+
@staticmethod
|
67 |
+
def extract_tech_stack(text):
|
68 |
+
"""Extracts technical skills dynamically from the skills section."""
|
69 |
+
# Find the 'Skills' or 'Technical Skills' section in the text
|
70 |
+
match = re.search(r"(?i)(skills|technical skills)(?:\:|\s+)([^\n]+)", text)
|
71 |
+
if match:
|
72 |
+
tech_line = match.group(2).strip()
|
73 |
+
# Split the skills based on common delimiters (comma, semicolon, etc.)
|
74 |
+
skills = re.split(r"[,\;\|]", tech_line)
|
75 |
+
# Strip whitespace and return unique skills
|
76 |
+
return [skill.strip() for skill in skills if skill.strip()]
|
77 |
+
return ["No tech stack found"]
|
78 |
+
@staticmethod
|
79 |
+
def extract_experience(text):
|
80 |
+
"""Extracts and calculates the candidate's total experience based on date ranges."""
|
81 |
+
# Updated regex pattern to match abbreviated and full month names along with 'Present'
|
82 |
+
date_pattern = r"(\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?) \d{4})"
|
83 |
+
regex = rf"{date_pattern}\s*-\s*({date_pattern}|Present)"
|
84 |
+
|
85 |
+
matches = re.findall(regex, text, re.IGNORECASE)
|
86 |
+
|
87 |
+
total_months = 0
|
88 |
+
for match in matches:
|
89 |
+
start_date_str = match[0]
|
90 |
+
end_date_str = match[1]
|
91 |
+
|
92 |
+
start_date = ResumeParser.parse_date(start_date_str)
|
93 |
+
end_date = datetime.now() if "Present" in end_date_str else ResumeParser.parse_date(end_date_str)
|
94 |
+
|
95 |
+
if start_date and end_date:
|
96 |
+
delta = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
|
97 |
+
total_months += delta
|
98 |
+
|
99 |
+
years = total_months // 12
|
100 |
+
months = total_months % 12
|
101 |
+
|
102 |
+
return f"{years} years, {months} months" if total_months > 0 else "Experience not found"
|
103 |
+
|
104 |
+
@staticmethod
|
105 |
+
def parse_date(date_str):
|
106 |
+
"""Parses a date string like 'January 2015' or 'Feb 2024' into a datetime object."""
|
107 |
+
try:
|
108 |
+
return datetime.strptime(date_str, "%b %Y") # Abbreviated month
|
109 |
+
except ValueError:
|
110 |
+
try:
|
111 |
+
return datetime.strptime(date_str, "%B %Y") # Full month
|
112 |
+
except ValueError:
|
113 |
+
return None
|
114 |
+
|
115 |
+
|